repository_name stringlengths 5 67 | func_path_in_repository stringlengths 4 234 | func_name stringlengths 0 314 | whole_func_string stringlengths 52 3.87M | language stringclasses 6
values | func_code_string stringlengths 52 3.87M | func_code_tokens listlengths 15 672k | func_documentation_string stringlengths 1 47.2k | func_documentation_tokens listlengths 1 3.92k | split_name stringclasses 1
value | func_code_url stringlengths 85 339 |
|---|---|---|---|---|---|---|---|---|---|---|
flo-compbio/genometools | genometools/gcloud/storage.py | get_files | def get_files(client, bucket, prefix=''):
"""Lists files/objects on a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
files = list(bucket.list_blobs(prefix=prefix))
return files | python | def get_files(client, bucket, prefix=''):
"""Lists files/objects on a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
files = list(bucket.list_blobs(prefix=prefix))
return files | [
"def",
"get_files",
"(",
"client",
",",
"bucket",
",",
"prefix",
"=",
"''",
")",
":",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket",
")",
"files",
"=",
"list",
"(",
"bucket",
".",
"list_blobs",
"(",
"prefix",
"=",
"prefix",
")",
")",
"ret... | Lists files/objects on a bucket.
TODO: docstring | [
"Lists",
"files",
"/",
"objects",
"on",
"a",
"bucket",
".",
"TODO",
":",
"docstring"
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/storage.py#L64-L70 |
flo-compbio/genometools | genometools/gcloud/storage.py | download_file | def download_file(client, bucket, remote_path, local_path, overwrite=False):
"""Downloads a file from a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
if (not overwrite) and os.path.isfile(local_path):
raise OSError('File already exists!')
with open(local_path, 'wb') as ofh:
bucket.get_blob(remote_path).download_to_file(ofh) | python | def download_file(client, bucket, remote_path, local_path, overwrite=False):
"""Downloads a file from a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
if (not overwrite) and os.path.isfile(local_path):
raise OSError('File already exists!')
with open(local_path, 'wb') as ofh:
bucket.get_blob(remote_path).download_to_file(ofh) | [
"def",
"download_file",
"(",
"client",
",",
"bucket",
",",
"remote_path",
",",
"local_path",
",",
"overwrite",
"=",
"False",
")",
":",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket",
")",
"if",
"(",
"not",
"overwrite",
")",
"and",
"os",
".",
... | Downloads a file from a bucket.
TODO: docstring | [
"Downloads",
"a",
"file",
"from",
"a",
"bucket",
".",
"TODO",
":",
"docstring"
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/storage.py#L73-L81 |
flo-compbio/genometools | genometools/gcloud/storage.py | delete_file | def delete_file(client, bucket, remote_path):
"""Deletes a file from a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
bucket.delete_blob(remote_path) | python | def delete_file(client, bucket, remote_path):
"""Deletes a file from a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
bucket.delete_blob(remote_path) | [
"def",
"delete_file",
"(",
"client",
",",
"bucket",
",",
"remote_path",
")",
":",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket",
")",
"bucket",
".",
"delete_blob",
"(",
"remote_path",
")"
] | Deletes a file from a bucket.
TODO: docstring | [
"Deletes",
"a",
"file",
"from",
"a",
"bucket",
".",
"TODO",
":",
"docstring"
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/storage.py#L84-L89 |
flo-compbio/genometools | genometools/gcloud/storage.py | upload_file | def upload_file(client, bucket, local_path, remote_path, overwrite=False):
"""Uploads a file to a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
blob = storage.Blob(remote_path, bucket)
if (not overwrite) and blob.exists():
raise Conflict('File/object already exists on the bucket!')
blob.upload_from_filename(local_path) | python | def upload_file(client, bucket, local_path, remote_path, overwrite=False):
"""Uploads a file to a bucket.
TODO: docstring"""
bucket = client.get_bucket(bucket)
blob = storage.Blob(remote_path, bucket)
if (not overwrite) and blob.exists():
raise Conflict('File/object already exists on the bucket!')
blob.upload_from_filename(local_path) | [
"def",
"upload_file",
"(",
"client",
",",
"bucket",
",",
"local_path",
",",
"remote_path",
",",
"overwrite",
"=",
"False",
")",
":",
"bucket",
"=",
"client",
".",
"get_bucket",
"(",
"bucket",
")",
"blob",
"=",
"storage",
".",
"Blob",
"(",
"remote_path",
... | Uploads a file to a bucket.
TODO: docstring | [
"Uploads",
"a",
"file",
"to",
"a",
"bucket",
".",
"TODO",
":",
"docstring"
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/storage.py#L92-L100 |
acorg/dark-matter | dark/alignments.py | ReadsAlignmentsFilter.filter | def filter(self, readAlignments):
"""
Filter a read's alignments.
@param readAlignments: A C{ReadAlignments} instance.
@return: A C{ReadAlignments} instance if the passed
C{readAlignments} is not filtered out, else C{False}.
"""
# Implementation notes:
#
# 1. The order in which we carry out the filtering actions can make
# a big difference in the result of this function. The current
# ordering is based on what seems reasonable - it may not be the
# best way to do things. E.g., if maxHspsPerHit is 1 and there
# is a title regex, which should we perform first?
#
# We perform filtering based on alignment before that based on
# HSPs. That's because there's no point filtering all HSPs for
# an alignment that we end up throwing away anyhow.
#
# 2. This function could be made faster if it first looked at its
# arguments and dynamically created an acceptance function
# (taking a readAlignments as an argument). The acceptance
# function would run without examining the desired filtering
# settings on each call the way the current code does.
#
# 3. A better approach with readIdRegex would be to allow the
# passing of a regex object. Then the caller would make the
# regex with whatever flags they liked (e.g., case insensitive).
#
# Alignment-only (i.e., non-HSP based) filtering.
#
if self.limit is not None and self.count == self.limit:
return False
# Does the read have too many alignments?
if (self.maxAlignmentsPerRead is not None and
len(readAlignments) > self.maxAlignmentsPerRead):
return False
# Filter on the read id.
if (self.readIdRegex and
self.readIdRegex.search(readAlignments.read.id) is None):
return False
if self.titleFilter:
# Remove alignments against sequences whose titles are
# unacceptable.
wantedAlignments = []
for alignment in readAlignments:
if (self.titleFilter.accept(alignment.subjectTitle) !=
TitleFilter.REJECT):
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
# Only return alignments that are against sequences of the
# desired length.
minSequenceLen = self.minSequenceLen
maxSequenceLen = self.maxSequenceLen
if minSequenceLen is not None or maxSequenceLen is not None:
wantedAlignments = []
for alignment in readAlignments:
length = alignment.subjectLength
if not ((minSequenceLen is not None and
length < minSequenceLen) or
(maxSequenceLen is not None and
length > self.maxSequenceLen)):
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
if self.taxonomy is not None:
wantedAlignments = []
for alignment in readAlignments:
lineage = self.lineageFetcher.lineage(alignment.subjectTitle)
if lineage:
for taxonomyIdAndScientificName in lineage:
if self.taxonomy in taxonomyIdAndScientificName:
wantedAlignments.append(alignment)
else:
# No lineage info was found. Keep the alignment
# since we can't rule it out. We could add another
# option to control this.
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
if self.oneAlignmentPerRead and readAlignments:
readAlignments[:] = [bestAlignment(readAlignments)]
#
# From here on we do only HSP-based filtering.
#
# Throw out any unwanted HSPs due to maxHspsPerHit.
if self.maxHspsPerHit is not None:
for alignment in readAlignments:
hsps = alignment.hsps
if len(hsps) > self.maxHspsPerHit:
alignment.hsps = hsps[:self.maxHspsPerHit]
# Throw out HSPs whose scores are not good enough.
if self.scoreCutoff is not None:
wantedAlignments = []
for alignment in readAlignments:
hsps = alignment.hsps
wantedHsps = []
for hsp in hsps:
if hsp.betterThan(self.scoreCutoff):
wantedHsps.append(hsp)
if wantedHsps:
alignment.hsps = wantedHsps
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
# Throw out HSPs that don't match in the desired place on the
# matched sequence.
minStart = self.minStart
maxStop = self.maxStop
if minStart is not None or maxStop is not None:
wantedAlignments = []
for alignment in readAlignments:
hsps = alignment.hsps
wantedHsps = []
for hsp in hsps:
if not ((minStart is not None and
hsp.readStartInSubject < minStart) or
(maxStop is not None and
hsp.readEndInSubject > maxStop)):
wantedHsps.append(hsp)
if wantedHsps:
alignment.hsps = wantedHsps
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
self.count += 1
return readAlignments | python | def filter(self, readAlignments):
"""
Filter a read's alignments.
@param readAlignments: A C{ReadAlignments} instance.
@return: A C{ReadAlignments} instance if the passed
C{readAlignments} is not filtered out, else C{False}.
"""
# Implementation notes:
#
# 1. The order in which we carry out the filtering actions can make
# a big difference in the result of this function. The current
# ordering is based on what seems reasonable - it may not be the
# best way to do things. E.g., if maxHspsPerHit is 1 and there
# is a title regex, which should we perform first?
#
# We perform filtering based on alignment before that based on
# HSPs. That's because there's no point filtering all HSPs for
# an alignment that we end up throwing away anyhow.
#
# 2. This function could be made faster if it first looked at its
# arguments and dynamically created an acceptance function
# (taking a readAlignments as an argument). The acceptance
# function would run without examining the desired filtering
# settings on each call the way the current code does.
#
# 3. A better approach with readIdRegex would be to allow the
# passing of a regex object. Then the caller would make the
# regex with whatever flags they liked (e.g., case insensitive).
#
# Alignment-only (i.e., non-HSP based) filtering.
#
if self.limit is not None and self.count == self.limit:
return False
# Does the read have too many alignments?
if (self.maxAlignmentsPerRead is not None and
len(readAlignments) > self.maxAlignmentsPerRead):
return False
# Filter on the read id.
if (self.readIdRegex and
self.readIdRegex.search(readAlignments.read.id) is None):
return False
if self.titleFilter:
# Remove alignments against sequences whose titles are
# unacceptable.
wantedAlignments = []
for alignment in readAlignments:
if (self.titleFilter.accept(alignment.subjectTitle) !=
TitleFilter.REJECT):
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
# Only return alignments that are against sequences of the
# desired length.
minSequenceLen = self.minSequenceLen
maxSequenceLen = self.maxSequenceLen
if minSequenceLen is not None or maxSequenceLen is not None:
wantedAlignments = []
for alignment in readAlignments:
length = alignment.subjectLength
if not ((minSequenceLen is not None and
length < minSequenceLen) or
(maxSequenceLen is not None and
length > self.maxSequenceLen)):
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
if self.taxonomy is not None:
wantedAlignments = []
for alignment in readAlignments:
lineage = self.lineageFetcher.lineage(alignment.subjectTitle)
if lineage:
for taxonomyIdAndScientificName in lineage:
if self.taxonomy in taxonomyIdAndScientificName:
wantedAlignments.append(alignment)
else:
# No lineage info was found. Keep the alignment
# since we can't rule it out. We could add another
# option to control this.
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
if self.oneAlignmentPerRead and readAlignments:
readAlignments[:] = [bestAlignment(readAlignments)]
#
# From here on we do only HSP-based filtering.
#
# Throw out any unwanted HSPs due to maxHspsPerHit.
if self.maxHspsPerHit is not None:
for alignment in readAlignments:
hsps = alignment.hsps
if len(hsps) > self.maxHspsPerHit:
alignment.hsps = hsps[:self.maxHspsPerHit]
# Throw out HSPs whose scores are not good enough.
if self.scoreCutoff is not None:
wantedAlignments = []
for alignment in readAlignments:
hsps = alignment.hsps
wantedHsps = []
for hsp in hsps:
if hsp.betterThan(self.scoreCutoff):
wantedHsps.append(hsp)
if wantedHsps:
alignment.hsps = wantedHsps
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
# Throw out HSPs that don't match in the desired place on the
# matched sequence.
minStart = self.minStart
maxStop = self.maxStop
if minStart is not None or maxStop is not None:
wantedAlignments = []
for alignment in readAlignments:
hsps = alignment.hsps
wantedHsps = []
for hsp in hsps:
if not ((minStart is not None and
hsp.readStartInSubject < minStart) or
(maxStop is not None and
hsp.readEndInSubject > maxStop)):
wantedHsps.append(hsp)
if wantedHsps:
alignment.hsps = wantedHsps
wantedAlignments.append(alignment)
if wantedAlignments:
readAlignments[:] = wantedAlignments
else:
return False
self.count += 1
return readAlignments | [
"def",
"filter",
"(",
"self",
",",
"readAlignments",
")",
":",
"# Implementation notes:",
"#",
"# 1. The order in which we carry out the filtering actions can make",
"# a big difference in the result of this function. The current",
"# ordering is based on what seems reasonable - it may... | Filter a read's alignments.
@param readAlignments: A C{ReadAlignments} instance.
@return: A C{ReadAlignments} instance if the passed
C{readAlignments} is not filtered out, else C{False}. | [
"Filter",
"a",
"read",
"s",
"alignments",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/alignments.py#L173-L324 |
acorg/dark-matter | dark/alignments.py | ReadsAlignments.hsps | def hsps(self):
"""
Provide access to all HSPs for all alignments of all reads.
@return: A generator that yields HSPs (or LSPs).
"""
for readAlignments in self:
for alignment in readAlignments:
for hsp in alignment.hsps:
yield hsp | python | def hsps(self):
"""
Provide access to all HSPs for all alignments of all reads.
@return: A generator that yields HSPs (or LSPs).
"""
for readAlignments in self:
for alignment in readAlignments:
for hsp in alignment.hsps:
yield hsp | [
"def",
"hsps",
"(",
"self",
")",
":",
"for",
"readAlignments",
"in",
"self",
":",
"for",
"alignment",
"in",
"readAlignments",
":",
"for",
"hsp",
"in",
"alignment",
".",
"hsps",
":",
"yield",
"hsp"
] | Provide access to all HSPs for all alignments of all reads.
@return: A generator that yields HSPs (or LSPs). | [
"Provide",
"access",
"to",
"all",
"HSPs",
"for",
"all",
"alignments",
"of",
"all",
"reads",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/alignments.py#L375-L384 |
acorg/dark-matter | dark/alignments.py | ReadsAlignments.filter | def filter(self, **kwargs):
"""
Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self}
"""
self._filters.append(ReadsAlignmentsFilter(**kwargs).filter)
return self | python | def filter(self, **kwargs):
"""
Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self}
"""
self._filters.append(ReadsAlignmentsFilter(**kwargs).filter)
return self | [
"def",
"filter",
"(",
"self",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_filters",
".",
"append",
"(",
"ReadsAlignmentsFilter",
"(",
"*",
"*",
"kwargs",
")",
".",
"filter",
")",
"return",
"self"
] | Add a filter to this C{readsAlignments}.
@param kwargs: Keyword arguments, as accepted by
C{ReadsAlignmentsFilter}.
@return: C{self} | [
"Add",
"a",
"filter",
"to",
"this",
"C",
"{",
"readsAlignments",
"}",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/alignments.py#L416-L425 |
flo-compbio/genometools | genometools/ensembl/annotations.py | get_annotation_urls_and_checksums | def get_annotation_urls_and_checksums(species, release=None, ftp=None):
"""Get FTP URLs and checksums for Ensembl genome annotations.
Parameters
----------
species : str or list of str
The species or list of species for which to get genome annotations
(e.g., "Homo_sapiens").
release : int, optional
The release number to look up. If `None`, use latest release. [None]
ftp : ftplib.FTP, optional
The FTP connection to use. If `None`, the function will open and close
its own connection using user "anonymous".
"""
### type checks
assert isinstance(species, (str, _oldstr)) or isinstance(species, Iterable)
if release is not None:
assert isinstance(release, int)
if ftp is not None:
assert isinstance(ftp, ftplib.FTP)
### open FTP connection if necessary
close_connection = False
ftp_server = 'ftp.ensembl.org'
ftp_user = 'anonymous'
if ftp is None:
ftp = ftplib.FTP(ftp_server)
ftp.login(ftp_user)
close_connection = True
### determine release if necessary
if release is None:
# use latest release
release = util.get_latest_release(ftp=ftp)
species_data = OrderedDict()
if isinstance(species, (str, _oldstr)):
species_list = [species]
else:
species_list = species
for spec in species_list:
# get the GTF file URL
# => since the naming scheme isn't consistent across species,
# we're using a flexible scheme here to find the right file
species_dir = '/pub/release-%d/gtf/%s' % (release, spec.lower())
data = []
ftp.dir(species_dir, data.append)
gtf_file = []
for d in data:
i = d.rindex(' ')
fn = d[(i + 1):]
if fn.endswith('.%d.gtf.gz' % release):
gtf_file.append(fn)
assert len(gtf_file) == 1
gtf_file = gtf_file[0]
_LOGGER.debug('GTF file: %s', gtf_file)
### get the checksum for the GTF file
checksum_url = '/'.join([species_dir, 'CHECKSUMS'])
file_checksums = util.get_file_checksums(checksum_url, ftp=ftp)
gtf_checksum = file_checksums[gtf_file]
_LOGGER.debug('GTF file checksum: %d', gtf_checksum)
gtf_url = 'ftp://%s%s/%s' %(ftp_server, species_dir, gtf_file)
species_data[spec] = (gtf_url, gtf_checksum)
# close FTP connection, if we opened it
if close_connection:
ftp.close()
return species_data | python | def get_annotation_urls_and_checksums(species, release=None, ftp=None):
"""Get FTP URLs and checksums for Ensembl genome annotations.
Parameters
----------
species : str or list of str
The species or list of species for which to get genome annotations
(e.g., "Homo_sapiens").
release : int, optional
The release number to look up. If `None`, use latest release. [None]
ftp : ftplib.FTP, optional
The FTP connection to use. If `None`, the function will open and close
its own connection using user "anonymous".
"""
### type checks
assert isinstance(species, (str, _oldstr)) or isinstance(species, Iterable)
if release is not None:
assert isinstance(release, int)
if ftp is not None:
assert isinstance(ftp, ftplib.FTP)
### open FTP connection if necessary
close_connection = False
ftp_server = 'ftp.ensembl.org'
ftp_user = 'anonymous'
if ftp is None:
ftp = ftplib.FTP(ftp_server)
ftp.login(ftp_user)
close_connection = True
### determine release if necessary
if release is None:
# use latest release
release = util.get_latest_release(ftp=ftp)
species_data = OrderedDict()
if isinstance(species, (str, _oldstr)):
species_list = [species]
else:
species_list = species
for spec in species_list:
# get the GTF file URL
# => since the naming scheme isn't consistent across species,
# we're using a flexible scheme here to find the right file
species_dir = '/pub/release-%d/gtf/%s' % (release, spec.lower())
data = []
ftp.dir(species_dir, data.append)
gtf_file = []
for d in data:
i = d.rindex(' ')
fn = d[(i + 1):]
if fn.endswith('.%d.gtf.gz' % release):
gtf_file.append(fn)
assert len(gtf_file) == 1
gtf_file = gtf_file[0]
_LOGGER.debug('GTF file: %s', gtf_file)
### get the checksum for the GTF file
checksum_url = '/'.join([species_dir, 'CHECKSUMS'])
file_checksums = util.get_file_checksums(checksum_url, ftp=ftp)
gtf_checksum = file_checksums[gtf_file]
_LOGGER.debug('GTF file checksum: %d', gtf_checksum)
gtf_url = 'ftp://%s%s/%s' %(ftp_server, species_dir, gtf_file)
species_data[spec] = (gtf_url, gtf_checksum)
# close FTP connection, if we opened it
if close_connection:
ftp.close()
return species_data | [
"def",
"get_annotation_urls_and_checksums",
"(",
"species",
",",
"release",
"=",
"None",
",",
"ftp",
"=",
"None",
")",
":",
"### type checks",
"assert",
"isinstance",
"(",
"species",
",",
"(",
"str",
",",
"_oldstr",
")",
")",
"or",
"isinstance",
"(",
"specie... | Get FTP URLs and checksums for Ensembl genome annotations.
Parameters
----------
species : str or list of str
The species or list of species for which to get genome annotations
(e.g., "Homo_sapiens").
release : int, optional
The release number to look up. If `None`, use latest release. [None]
ftp : ftplib.FTP, optional
The FTP connection to use. If `None`, the function will open and close
its own connection using user "anonymous". | [
"Get",
"FTP",
"URLs",
"and",
"checksums",
"for",
"Ensembl",
"genome",
"annotations",
".",
"Parameters",
"----------",
"species",
":",
"str",
"or",
"list",
"of",
"str",
"The",
"species",
"or",
"list",
"of",
"species",
"for",
"which",
"to",
"get",
"genome",
... | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/annotations.py#L38-L110 |
flo-compbio/genometools | genometools/ensembl/annotations.py | get_genes | def get_genes(
path_or_buffer, valid_biotypes,
chunksize=10000,
chromosome_pattern=None,
#chromosome_pattern=r'(?:\d\d?|MT|X|Y)$',
only_manual=False,
remove_duplicates=True,
sort_by='name'):
"""Get all genes of a specific a biotype from an Ensembl GTF file.
Parameters
----------
path_or_buffer : str or buffer
The GTF file (either the file path or a buffer).
valid_biotypes : set of str
The set of biotypes to include (e.g., "protein_coding").
chromosome_pattern : str, optional
Regular expression specifying valid chromosomes. [None]
only_manual : bool, optional
Whether to exclude annotations with source "ensembl", which
are based only on an automatic annotation pipeline. [True]
remove_duplicates : bool, optional
Whether to remove duplicate annotations, i.e. those with different
Ensembl IDs for the same gene (only applies to protein-coding genes).
[True]
sort_by : str, optional
How to sort the genes. One of:
- 'name': Genes are ordered alphabetically by their name
- 'position': Genes are sorted by their position in the genome.abs
Genes are first sorted by chromosome, then by their
starting base pair position on the chromosome.
- 'position_fancy': Like 'position', but attempts to sort the
chromosomes in a more logical order than strictly
alphabetically. This currently works for human
and mouse genomes.
- 'none': The order from the GTF file is retained.
Default: 'name'
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to the genes found.
Notes
-----
Annotation sources and redundant gene annotations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
According to the Ensembl website (1), the Ensembl gene annotation
GTF files for human, mouse, zebrafish, rat and pig essentially
contain two sets of annotations:
One set consists of all annotations with the "ensembl"
source annotation (column 2). These annotations are the product of
the automated Ensembl "genebuild" pipeline.
The other set consists of genes that are manually annotated by
the HAVANA team (source "havana"), some of which have been merged with the
automatic annotations (source "ensembl_havana").
There seems to be no overlap between genes annotated with "havana" and
"ensembl_havana" sources, respectively. However, there are a few genes for
which only annotations with source "ensembl" exist.
Our policy is therefore to prefer annotations with source "ensembl_havana"
and "havana" over those with source "ensembl", and to only keep annotations
with source "ensembl" if there are no manually curated alternative
annotations.
A special case is represented by mitochondrial genes, which always have the
source "insdc".
(1) see http://www.ensembl.org/Help/Faq?id=152
Removal of duplicates
~~~~~~~~~~~~~~~~~~~~~
Unfortunately, the Ensembl gene annotations contain duplicates for a
handful of genes. For example, for MATR3, there are ENSG00000015479 and
ENSG00000280987, both of type
"ensembl_havana". There seems to be no clear criterion by which we could
rationally and automatically choose one ID over the other, at least based
on information contained
in the GTF file.
We therefore remove duplicates according to following policy:
- For genes on '+' strand, keep the gene with the left-most starting
position.
- For genes on '-' strand, keep the gene with the right-most starting
position.
(In case the starting positions are equal, we keep the one that occurs
first in the GTF file.)
We would like to use the pandas.DataFrame.drop_duplicates() function for
this. So we're temporarily reordering genes using their signed position,
and then we're using the original index (position) to restore the original
order.
"""
chrompat = None
if chromosome_pattern is not None:
chrompat = re.compile(chromosome_pattern)
# make sure this is a set
valid_biotypes = set(valid_biotypes)
c = 0
num_lines = 0
num_chunks = 0
t0 = time.time()
reader = pd.read_csv(path_or_buffer, encoding='ascii', sep='\t',
header=None, comment='#', dtype={0: str},
chunksize=chunksize)
# "insdc" is required to catch the mitochondrial protein-coding genes
valid_sources = set(['ensembl_havana', 'havana', 'insdc'])
if not only_manual:
# we also accept annotations with source "ensembl", which are the
# product of an automated annotation pipeline
valid_sources.add('ensembl')
excluded_chromosomes = set()
# parse GTF file and keep specific information
data = []
for j, df in enumerate(reader):
num_chunks += 1
num_lines += (df.shape[0])
# select rows of type "gene"
sel = (df.iloc[:, 2] == 'gene')
for i, row in df.loc[sel].iterrows():
# parse attribute in 9th column
attr = gtf.parse_attributes(row[8].lstrip(' '))
# check if biotype is valid
biotype = attr['gene_biotype']
if biotype not in valid_biotypes:
continue
chrom = str(row[0])
source = row[1]
if chrompat is not None:
match = chrompat.match(chrom)
if match is None:
excluded_chromosomes.add(chrom)
continue
c += 1
# extract gene ID and gene name
ensembl_id = attr['gene_id']
try:
gene_name = attr['gene_name']
except KeyError:
# no gene name, so we'll use the ID as the name
gene_name = ensembl_id
# We define the position to be the index of the 5'-most base of the gene,
# according its orientation on the chromosome (DNA sequences are always represented 5'->3').
# We encode the strand as the sign of the index
# ("+" strand = positive sign, "-" strand = negative sign).
if row[6] == '+':
pos = int(row[3])-1
elif row[6] == '-':
pos = -(int(row[4])-1)
else:
raise ValueError('Invalid strand information: %s'
% str(row[6]))
length = abs(int(row[4]) - int(row[3])) + 1
#data.append([gene_name, ensembl_id, chrom, pos, length,
# source, biotype])
data.append([ensembl_id, gene_name, chrom, pos, length,
biotype, source])
t1 = time.time()
header = ['ensembl_id', 'name',
'chromosome', 'position', 'length',
'type', 'source']
df = pd.DataFrame(columns=header, data=data)
if 'protein_coding' in valid_biotypes:
if only_manual:
# exclude protein-coding genes that are the based on
# automatic annotation (source "ensembl")
sel = (df['type'] == 'protein_coding' & df['source'] == 'ensembl')
df = df.loc[~sel]
else:
# make sure we only keep protein-coding genes with source "ensembl"
# if no manual annotations are available
sel_pc = df['type'] == 'protein_coding'
sel_ensembl = ((df['source'] == 'ensembl') & sel_pc)
sel_manual = ((df['source'] != 'ensembl') & sel_pc)
redundant_ensembl_genes = set(df.loc[sel_ensembl, 'name'].values) \
& set(df.loc[sel_manual, 'name'].values)
sel_redund = sel_ensembl & df['name'].isin(redundant_ensembl_genes)
num_genes_before = df.shape[0]
df = df.loc[~sel_redund]
num_genes_after = df.shape[0]
_LOGGER.info('Removed %d protein-coding genes with source '
'"ensembl" that also had manual annotations.',
num_genes_before - num_genes_after)
if remove_duplicates:
# remove duplicate annotations (two or more Ensembl IDs for the
# same gene)
num_genes_before = df.shape[0]
sel_pc = df['type'] == 'protein_coding'
df_sel = df.loc[sel_pc].copy()
# sort by signed position value,
# in order to make sure we keep the most "upstream" annotation in
# the next step
df_sel.sort_values('position', kind='mergesort', inplace=True)
# remove duplicates by keeping the first occurrence
#df.drop_duplicates(['chromosome', 'name'], inplace=True)
df_sel.drop_duplicates('name', inplace=True)
# combine protein-coding genes and non-protein-coding genes again
df = pd.concat([df_sel, df.loc[~sel_pc]])
# restore original order using the numeric index
df.sort_index(inplace=True)
num_genes_after = df.shape[0]
_LOGGER.info('Removed %d duplicate protein-coding gene entries',
num_genes_before - num_genes_after)
else:
# print names of genes with duplicate IDs
sel = df['type'] == 'protein_coding'
counts = df.loc[sel]['name'].value_counts()
sel = counts > 1
if sel.sum() > 0:
_LOGGER.info('Protein-coding genes with multiple Ensembl IDs:'
'%s', ', '.join(['%s (%d)' % (k, v)
for k, v in counts[sel].items()]))
if sort_by == 'name':
# sort alphabetically by gene name
df.sort_values(['name'], kind='mergesort', inplace=True)
elif sort_by in ['position', 'position_fancy']:
# sort first by chromsome, then by absolute position
df_sort = pd.concat([df['chromosome'], df['position'].abs()], axis=1)
df_sort = df_sort.sort_values(['chromosome', 'position'],
kind='mergesort')
df = df.loc[df_sort.index]
if sort_by == 'position_fancy':
# Perform "fancy" positional sorting. Numbered chromosomes
# are ordered numerically, and followed by the X, Y, and MT
# chromosomes.
def transform_chrom(chrom):
"""Helper function to obtain specific sort order."""
try:
c = int(chrom)
except:
if chrom in ['X', 'Y']:
return chrom
elif chrom == 'MT':
return '_MT' # sort to the end
else:
return '__' + chrom # sort to the very end
else:
# make sure numbered chromosomes are sorted numerically
return '%02d' % c
chrom_for_sorting = df['chromosome'].apply(transform_chrom)
a = chrom_for_sorting.argsort(kind='mergesort')
df = df.iloc[a]
_LOGGER.info('Performed fancy sorting of chromosomes.')
# set index to ensembl ID
df.set_index('ensembl_id', inplace=True)
_LOGGER.info('Read %d lines (in %d chunks).', num_lines, num_chunks)
_LOGGER.info('Found %d valid gene entries.', c)
_LOGGER.info('Final number of unique genes: %d', df.shape[0])
_LOGGER.info('Parsing time: %.1f s', t1-t0)
# additional statistics
all_chromosomes = list(df['chromosome'].unique())
_LOGGER.info('Valid chromosomes (%d): %s',
len(all_chromosomes),
', '.join(all_chromosomes))
_LOGGER.info('Excluded chromosomes (%d): %s',
len(excluded_chromosomes),
', '.join(sorted(excluded_chromosomes)))
_LOGGER.info('Sources:')
for i, c in df['source'].value_counts().iteritems():
_LOGGER.info('- %s: %d', i, c)
_LOGGER.info('Gene types:')
for i, c in df['type'].value_counts().iteritems():
_LOGGER.info('- %s: %d', i, c)
return df | python | def get_genes(
path_or_buffer, valid_biotypes,
chunksize=10000,
chromosome_pattern=None,
#chromosome_pattern=r'(?:\d\d?|MT|X|Y)$',
only_manual=False,
remove_duplicates=True,
sort_by='name'):
"""Get all genes of a specific a biotype from an Ensembl GTF file.
Parameters
----------
path_or_buffer : str or buffer
The GTF file (either the file path or a buffer).
valid_biotypes : set of str
The set of biotypes to include (e.g., "protein_coding").
chromosome_pattern : str, optional
Regular expression specifying valid chromosomes. [None]
only_manual : bool, optional
Whether to exclude annotations with source "ensembl", which
are based only on an automatic annotation pipeline. [True]
remove_duplicates : bool, optional
Whether to remove duplicate annotations, i.e. those with different
Ensembl IDs for the same gene (only applies to protein-coding genes).
[True]
sort_by : str, optional
How to sort the genes. One of:
- 'name': Genes are ordered alphabetically by their name
- 'position': Genes are sorted by their position in the genome.abs
Genes are first sorted by chromosome, then by their
starting base pair position on the chromosome.
- 'position_fancy': Like 'position', but attempts to sort the
chromosomes in a more logical order than strictly
alphabetically. This currently works for human
and mouse genomes.
- 'none': The order from the GTF file is retained.
Default: 'name'
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to the genes found.
Notes
-----
Annotation sources and redundant gene annotations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
According to the Ensembl website (1), the Ensembl gene annotation
GTF files for human, mouse, zebrafish, rat and pig essentially
contain two sets of annotations:
One set consists of all annotations with the "ensembl"
source annotation (column 2). These annotations are the product of
the automated Ensembl "genebuild" pipeline.
The other set consists of genes that are manually annotated by
the HAVANA team (source "havana"), some of which have been merged with the
automatic annotations (source "ensembl_havana").
There seems to be no overlap between genes annotated with "havana" and
"ensembl_havana" sources, respectively. However, there are a few genes for
which only annotations with source "ensembl" exist.
Our policy is therefore to prefer annotations with source "ensembl_havana"
and "havana" over those with source "ensembl", and to only keep annotations
with source "ensembl" if there are no manually curated alternative
annotations.
A special case is represented by mitochondrial genes, which always have the
source "insdc".
(1) see http://www.ensembl.org/Help/Faq?id=152
Removal of duplicates
~~~~~~~~~~~~~~~~~~~~~
Unfortunately, the Ensembl gene annotations contain duplicates for a
handful of genes. For example, for MATR3, there are ENSG00000015479 and
ENSG00000280987, both of type
"ensembl_havana". There seems to be no clear criterion by which we could
rationally and automatically choose one ID over the other, at least based
on information contained
in the GTF file.
We therefore remove duplicates according to following policy:
- For genes on '+' strand, keep the gene with the left-most starting
position.
- For genes on '-' strand, keep the gene with the right-most starting
position.
(In case the starting positions are equal, we keep the one that occurs
first in the GTF file.)
We would like to use the pandas.DataFrame.drop_duplicates() function for
this. So we're temporarily reordering genes using their signed position,
and then we're using the original index (position) to restore the original
order.
"""
chrompat = None
if chromosome_pattern is not None:
chrompat = re.compile(chromosome_pattern)
# make sure this is a set
valid_biotypes = set(valid_biotypes)
c = 0
num_lines = 0
num_chunks = 0
t0 = time.time()
reader = pd.read_csv(path_or_buffer, encoding='ascii', sep='\t',
header=None, comment='#', dtype={0: str},
chunksize=chunksize)
# "insdc" is required to catch the mitochondrial protein-coding genes
valid_sources = set(['ensembl_havana', 'havana', 'insdc'])
if not only_manual:
# we also accept annotations with source "ensembl", which are the
# product of an automated annotation pipeline
valid_sources.add('ensembl')
excluded_chromosomes = set()
# parse GTF file and keep specific information
data = []
for j, df in enumerate(reader):
num_chunks += 1
num_lines += (df.shape[0])
# select rows of type "gene"
sel = (df.iloc[:, 2] == 'gene')
for i, row in df.loc[sel].iterrows():
# parse attribute in 9th column
attr = gtf.parse_attributes(row[8].lstrip(' '))
# check if biotype is valid
biotype = attr['gene_biotype']
if biotype not in valid_biotypes:
continue
chrom = str(row[0])
source = row[1]
if chrompat is not None:
match = chrompat.match(chrom)
if match is None:
excluded_chromosomes.add(chrom)
continue
c += 1
# extract gene ID and gene name
ensembl_id = attr['gene_id']
try:
gene_name = attr['gene_name']
except KeyError:
# no gene name, so we'll use the ID as the name
gene_name = ensembl_id
# We define the position to be the index of the 5'-most base of the gene,
# according its orientation on the chromosome (DNA sequences are always represented 5'->3').
# We encode the strand as the sign of the index
# ("+" strand = positive sign, "-" strand = negative sign).
if row[6] == '+':
pos = int(row[3])-1
elif row[6] == '-':
pos = -(int(row[4])-1)
else:
raise ValueError('Invalid strand information: %s'
% str(row[6]))
length = abs(int(row[4]) - int(row[3])) + 1
#data.append([gene_name, ensembl_id, chrom, pos, length,
# source, biotype])
data.append([ensembl_id, gene_name, chrom, pos, length,
biotype, source])
t1 = time.time()
header = ['ensembl_id', 'name',
'chromosome', 'position', 'length',
'type', 'source']
df = pd.DataFrame(columns=header, data=data)
if 'protein_coding' in valid_biotypes:
if only_manual:
# exclude protein-coding genes that are the based on
# automatic annotation (source "ensembl")
sel = (df['type'] == 'protein_coding' & df['source'] == 'ensembl')
df = df.loc[~sel]
else:
# make sure we only keep protein-coding genes with source "ensembl"
# if no manual annotations are available
sel_pc = df['type'] == 'protein_coding'
sel_ensembl = ((df['source'] == 'ensembl') & sel_pc)
sel_manual = ((df['source'] != 'ensembl') & sel_pc)
redundant_ensembl_genes = set(df.loc[sel_ensembl, 'name'].values) \
& set(df.loc[sel_manual, 'name'].values)
sel_redund = sel_ensembl & df['name'].isin(redundant_ensembl_genes)
num_genes_before = df.shape[0]
df = df.loc[~sel_redund]
num_genes_after = df.shape[0]
_LOGGER.info('Removed %d protein-coding genes with source '
'"ensembl" that also had manual annotations.',
num_genes_before - num_genes_after)
if remove_duplicates:
# remove duplicate annotations (two or more Ensembl IDs for the
# same gene)
num_genes_before = df.shape[0]
sel_pc = df['type'] == 'protein_coding'
df_sel = df.loc[sel_pc].copy()
# sort by signed position value,
# in order to make sure we keep the most "upstream" annotation in
# the next step
df_sel.sort_values('position', kind='mergesort', inplace=True)
# remove duplicates by keeping the first occurrence
#df.drop_duplicates(['chromosome', 'name'], inplace=True)
df_sel.drop_duplicates('name', inplace=True)
# combine protein-coding genes and non-protein-coding genes again
df = pd.concat([df_sel, df.loc[~sel_pc]])
# restore original order using the numeric index
df.sort_index(inplace=True)
num_genes_after = df.shape[0]
_LOGGER.info('Removed %d duplicate protein-coding gene entries',
num_genes_before - num_genes_after)
else:
# print names of genes with duplicate IDs
sel = df['type'] == 'protein_coding'
counts = df.loc[sel]['name'].value_counts()
sel = counts > 1
if sel.sum() > 0:
_LOGGER.info('Protein-coding genes with multiple Ensembl IDs:'
'%s', ', '.join(['%s (%d)' % (k, v)
for k, v in counts[sel].items()]))
if sort_by == 'name':
# sort alphabetically by gene name
df.sort_values(['name'], kind='mergesort', inplace=True)
elif sort_by in ['position', 'position_fancy']:
# sort first by chromsome, then by absolute position
df_sort = pd.concat([df['chromosome'], df['position'].abs()], axis=1)
df_sort = df_sort.sort_values(['chromosome', 'position'],
kind='mergesort')
df = df.loc[df_sort.index]
if sort_by == 'position_fancy':
# Perform "fancy" positional sorting. Numbered chromosomes
# are ordered numerically, and followed by the X, Y, and MT
# chromosomes.
def transform_chrom(chrom):
"""Helper function to obtain specific sort order."""
try:
c = int(chrom)
except:
if chrom in ['X', 'Y']:
return chrom
elif chrom == 'MT':
return '_MT' # sort to the end
else:
return '__' + chrom # sort to the very end
else:
# make sure numbered chromosomes are sorted numerically
return '%02d' % c
chrom_for_sorting = df['chromosome'].apply(transform_chrom)
a = chrom_for_sorting.argsort(kind='mergesort')
df = df.iloc[a]
_LOGGER.info('Performed fancy sorting of chromosomes.')
# set index to ensembl ID
df.set_index('ensembl_id', inplace=True)
_LOGGER.info('Read %d lines (in %d chunks).', num_lines, num_chunks)
_LOGGER.info('Found %d valid gene entries.', c)
_LOGGER.info('Final number of unique genes: %d', df.shape[0])
_LOGGER.info('Parsing time: %.1f s', t1-t0)
# additional statistics
all_chromosomes = list(df['chromosome'].unique())
_LOGGER.info('Valid chromosomes (%d): %s',
len(all_chromosomes),
', '.join(all_chromosomes))
_LOGGER.info('Excluded chromosomes (%d): %s',
len(excluded_chromosomes),
', '.join(sorted(excluded_chromosomes)))
_LOGGER.info('Sources:')
for i, c in df['source'].value_counts().iteritems():
_LOGGER.info('- %s: %d', i, c)
_LOGGER.info('Gene types:')
for i, c in df['type'].value_counts().iteritems():
_LOGGER.info('- %s: %d', i, c)
return df | [
"def",
"get_genes",
"(",
"path_or_buffer",
",",
"valid_biotypes",
",",
"chunksize",
"=",
"10000",
",",
"chromosome_pattern",
"=",
"None",
",",
"#chromosome_pattern=r'(?:\\d\\d?|MT|X|Y)$',",
"only_manual",
"=",
"False",
",",
"remove_duplicates",
"=",
"True",
",",
"sort... | Get all genes of a specific a biotype from an Ensembl GTF file.
Parameters
----------
path_or_buffer : str or buffer
The GTF file (either the file path or a buffer).
valid_biotypes : set of str
The set of biotypes to include (e.g., "protein_coding").
chromosome_pattern : str, optional
Regular expression specifying valid chromosomes. [None]
only_manual : bool, optional
Whether to exclude annotations with source "ensembl", which
are based only on an automatic annotation pipeline. [True]
remove_duplicates : bool, optional
Whether to remove duplicate annotations, i.e. those with different
Ensembl IDs for the same gene (only applies to protein-coding genes).
[True]
sort_by : str, optional
How to sort the genes. One of:
- 'name': Genes are ordered alphabetically by their name
- 'position': Genes are sorted by their position in the genome.abs
Genes are first sorted by chromosome, then by their
starting base pair position on the chromosome.
- 'position_fancy': Like 'position', but attempts to sort the
chromosomes in a more logical order than strictly
alphabetically. This currently works for human
and mouse genomes.
- 'none': The order from the GTF file is retained.
Default: 'name'
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to the genes found.
Notes
-----
Annotation sources and redundant gene annotations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
According to the Ensembl website (1), the Ensembl gene annotation
GTF files for human, mouse, zebrafish, rat and pig essentially
contain two sets of annotations:
One set consists of all annotations with the "ensembl"
source annotation (column 2). These annotations are the product of
the automated Ensembl "genebuild" pipeline.
The other set consists of genes that are manually annotated by
the HAVANA team (source "havana"), some of which have been merged with the
automatic annotations (source "ensembl_havana").
There seems to be no overlap between genes annotated with "havana" and
"ensembl_havana" sources, respectively. However, there are a few genes for
which only annotations with source "ensembl" exist.
Our policy is therefore to prefer annotations with source "ensembl_havana"
and "havana" over those with source "ensembl", and to only keep annotations
with source "ensembl" if there are no manually curated alternative
annotations.
A special case is represented by mitochondrial genes, which always have the
source "insdc".
(1) see http://www.ensembl.org/Help/Faq?id=152
Removal of duplicates
~~~~~~~~~~~~~~~~~~~~~
Unfortunately, the Ensembl gene annotations contain duplicates for a
handful of genes. For example, for MATR3, there are ENSG00000015479 and
ENSG00000280987, both of type
"ensembl_havana". There seems to be no clear criterion by which we could
rationally and automatically choose one ID over the other, at least based
on information contained
in the GTF file.
We therefore remove duplicates according to following policy:
- For genes on '+' strand, keep the gene with the left-most starting
position.
- For genes on '-' strand, keep the gene with the right-most starting
position.
(In case the starting positions are equal, we keep the one that occurs
first in the GTF file.)
We would like to use the pandas.DataFrame.drop_duplicates() function for
this. So we're temporarily reordering genes using their signed position,
and then we're using the original index (position) to restore the original
order. | [
"Get",
"all",
"genes",
"of",
"a",
"specific",
"a",
"biotype",
"from",
"an",
"Ensembl",
"GTF",
"file",
".",
"Parameters",
"----------",
"path_or_buffer",
":",
"str",
"or",
"buffer",
"The",
"GTF",
"file",
"(",
"either",
"the",
"file",
"path",
"or",
"a",
"b... | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/annotations.py#L113-L422 |
flo-compbio/genometools | genometools/ensembl/annotations.py | get_protein_coding_genes | def get_protein_coding_genes(
path_or_buffer,
include_polymorphic_pseudogenes=True,
remove_duplicates=True,
**kwargs):
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
"""
valid_biotypes = set(['protein_coding'])
if include_polymorphic_pseudogenes:
valid_biotypes.add('polymorphic_pseudogene')
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df | python | def get_protein_coding_genes(
path_or_buffer,
include_polymorphic_pseudogenes=True,
remove_duplicates=True,
**kwargs):
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
"""
valid_biotypes = set(['protein_coding'])
if include_polymorphic_pseudogenes:
valid_biotypes.add('polymorphic_pseudogene')
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df | [
"def",
"get_protein_coding_genes",
"(",
"path_or_buffer",
",",
"include_polymorphic_pseudogenes",
"=",
"True",
",",
"remove_duplicates",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"valid_biotypes",
"=",
"set",
"(",
"[",
"'protein_coding'",
"]",
")",
"if",
"... | r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes. | [
"r",
"Get",
"list",
"of",
"all",
"protein",
"-",
"coding",
"genes",
"based",
"on",
"Ensembl",
"GTF",
"file",
".",
"Parameters",
"----------",
"See",
":",
"func",
":",
"get_genes",
"function",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/annotations.py#L425-L448 |
flo-compbio/genometools | genometools/ensembl/annotations.py | get_linc_rna_genes | def get_linc_rna_genes(
path_or_buffer,
remove_duplicates=True,
**kwargs):
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
"""
valid_biotypes = set(['lincRNA'])
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df | python | def get_linc_rna_genes(
path_or_buffer,
remove_duplicates=True,
**kwargs):
r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes.
"""
valid_biotypes = set(['lincRNA'])
df = get_genes(path_or_buffer, valid_biotypes,
remove_duplicates=remove_duplicates, **kwargs)
return df | [
"def",
"get_linc_rna_genes",
"(",
"path_or_buffer",
",",
"remove_duplicates",
"=",
"True",
",",
"*",
"*",
"kwargs",
")",
":",
"valid_biotypes",
"=",
"set",
"(",
"[",
"'lincRNA'",
"]",
")",
"df",
"=",
"get_genes",
"(",
"path_or_buffer",
",",
"valid_biotypes",
... | r"""Get list of all protein-coding genes based on Ensembl GTF file.
Parameters
----------
See :func:`get_genes` function.
Returns
-------
`pandas.DataFrame`
Table with rows corresponding to protein-coding genes. | [
"r",
"Get",
"list",
"of",
"all",
"protein",
"-",
"coding",
"genes",
"based",
"on",
"Ensembl",
"GTF",
"file",
".",
"Parameters",
"----------",
"See",
":",
"func",
":",
"get_genes",
"function",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/annotations.py#L451-L471 |
nkavaldj/myhdl_lib | myhdl_lib/simulation/payload_generator.py | payload_generator | def payload_generator(levels=0, dimensions=None, sequential=True, string=True, max_int=255, max_pkt_len=150, max_dim_size=3):
""" Generates random data in a form of nested lists, to be used as a test packet payload
levels - nesting levels:
0 - list of integers (single packet payload)
1 - list of lists of integers (list of packets)
2 - list of lists of lists of integers (list of packets, each packet containing list of messages)
etc.
dimensions - specifies the size of the lists; the specification can be partial, as what is not specified will be chosen randomly
if level=0:
dimensions=None - list of integers of random length in the range [1,max_pkt_len]
dimensions=5 - list of integers of length 5
if level=1:
dimensions=None - list of random length in the range [1, max_dim_size], each element is an integer list of random length in the range [1, max_pkt_len]
dimensions=5 - list of length 5, each element is an integer list of random length in the range [1, max_pkt_len]
dimensions=[2, 3] - list of 2 elements, each element is an integer list, the first one of length 2, the second on of length 3
if level=2:
dimensions=None - list of random length in the range [1, max_dim_size], each element is a list of random length in the range [1, max_dim_size],
each element of the second level lists is an integer list of random length in the range [1, max_pkt_len]
dimensions=5 - list of length 5, each element is a list of random length in the range [1, max_dim_size], each element of the second level lists
is an integer list of random length in the range [1, max_pkt_len]
dimensions=[2, 3] - list of 2 elements, each element is a list, the first one of length 2, the second on of length 3, each element of the second
level lists is an integer list of random length in the range [1, max_pkt_len]
dimensions=[[3, 5], [6, 7, 8]] - list of 2 elements, each element is a list, the first one of length 2, the second on of length 3, each element of the second
level lists is an integer list of length respectively 3, 5, 6, 7, 8
if level=3:
etc.
It is possible to specify dimensions partially, e.g dimensions=[[3, None], [6, 7, 8]] or dimensions=[None, [6, 7, 8]]. Dimensions that are not specified are
chosen randomly
sequential - if True, the payload is a cyclic sequence if integers in the range [0, max_int]
if False, the payload is a sequence of random integers in the range [0, max_int]
string - if True, the payload is a byte string, e.g. '\x04\x05\x06\x07' as each byte is in the range [0, min(255, max_int)]
if False, the payload is a list of integers in the range [0, max_int]
max_int - Upper limit for the integer range for the payload
max_pkt_len - Upper limit for the randomly chosen payload length
max_dim_size - Upper limit for the randomly chosen dimension sizes
"""
MAX_INT = max_int
if string:
MAX_INT = min(255, max_int)
MAX_DIM_SIZE = max_dim_size
MAX_PKT_LEN = max_pkt_len
def next_i():
''' Generates next number from a cyclic integer sequence [0..MAX_INT]'''
next_i.i = (next_i.i+1)%(MAX_INT+1)
return next_i.i
next_i.i = 0;
def payload(length):
''' Generates payload of given length '''
if (sequential) :
pld = [next_i() for _ in xrange(length)]
else:
pld = [random.randint(0, MAX_INT) for _ in xrange(length)]
if string:
pld = str(bytearray(pld))
return pld
def next_level(level, pld):
''' Generates the next level of nested lists '''
if level>0:
# Next level of nested lists
if pld==None:
pld = random.randint(1, MAX_DIM_SIZE)
if isinstance(pld, int):
pld = pld*[None]
if isinstance(pld, list):
for i in range(len(pld)):
pld[i] = next_level(level-1, pld[i])
return pld
else:
raise TypeError("Expected None, int or list, got {}: {}".format(type(pld), pld))
elif level==0:
# Generate payload
if pld==None:
pld = random.randint(1, MAX_PKT_LEN)
if isinstance(pld, int):
return payload(pld)
else:
raise TypeError("Expected None or int, got {}: {}".format(type(pld), pld))
else:
raise ValueError("Expected int>=0, got {}".format(level))
pld = copy.deepcopy(dimensions)
pld = next_level(levels, pld)
return pld | python | def payload_generator(levels=0, dimensions=None, sequential=True, string=True, max_int=255, max_pkt_len=150, max_dim_size=3):
""" Generates random data in a form of nested lists, to be used as a test packet payload
levels - nesting levels:
0 - list of integers (single packet payload)
1 - list of lists of integers (list of packets)
2 - list of lists of lists of integers (list of packets, each packet containing list of messages)
etc.
dimensions - specifies the size of the lists; the specification can be partial, as what is not specified will be chosen randomly
if level=0:
dimensions=None - list of integers of random length in the range [1,max_pkt_len]
dimensions=5 - list of integers of length 5
if level=1:
dimensions=None - list of random length in the range [1, max_dim_size], each element is an integer list of random length in the range [1, max_pkt_len]
dimensions=5 - list of length 5, each element is an integer list of random length in the range [1, max_pkt_len]
dimensions=[2, 3] - list of 2 elements, each element is an integer list, the first one of length 2, the second on of length 3
if level=2:
dimensions=None - list of random length in the range [1, max_dim_size], each element is a list of random length in the range [1, max_dim_size],
each element of the second level lists is an integer list of random length in the range [1, max_pkt_len]
dimensions=5 - list of length 5, each element is a list of random length in the range [1, max_dim_size], each element of the second level lists
is an integer list of random length in the range [1, max_pkt_len]
dimensions=[2, 3] - list of 2 elements, each element is a list, the first one of length 2, the second on of length 3, each element of the second
level lists is an integer list of random length in the range [1, max_pkt_len]
dimensions=[[3, 5], [6, 7, 8]] - list of 2 elements, each element is a list, the first one of length 2, the second on of length 3, each element of the second
level lists is an integer list of length respectively 3, 5, 6, 7, 8
if level=3:
etc.
It is possible to specify dimensions partially, e.g dimensions=[[3, None], [6, 7, 8]] or dimensions=[None, [6, 7, 8]]. Dimensions that are not specified are
chosen randomly
sequential - if True, the payload is a cyclic sequence if integers in the range [0, max_int]
if False, the payload is a sequence of random integers in the range [0, max_int]
string - if True, the payload is a byte string, e.g. '\x04\x05\x06\x07' as each byte is in the range [0, min(255, max_int)]
if False, the payload is a list of integers in the range [0, max_int]
max_int - Upper limit for the integer range for the payload
max_pkt_len - Upper limit for the randomly chosen payload length
max_dim_size - Upper limit for the randomly chosen dimension sizes
"""
MAX_INT = max_int
if string:
MAX_INT = min(255, max_int)
MAX_DIM_SIZE = max_dim_size
MAX_PKT_LEN = max_pkt_len
def next_i():
''' Generates next number from a cyclic integer sequence [0..MAX_INT]'''
next_i.i = (next_i.i+1)%(MAX_INT+1)
return next_i.i
next_i.i = 0;
def payload(length):
''' Generates payload of given length '''
if (sequential) :
pld = [next_i() for _ in xrange(length)]
else:
pld = [random.randint(0, MAX_INT) for _ in xrange(length)]
if string:
pld = str(bytearray(pld))
return pld
def next_level(level, pld):
''' Generates the next level of nested lists '''
if level>0:
# Next level of nested lists
if pld==None:
pld = random.randint(1, MAX_DIM_SIZE)
if isinstance(pld, int):
pld = pld*[None]
if isinstance(pld, list):
for i in range(len(pld)):
pld[i] = next_level(level-1, pld[i])
return pld
else:
raise TypeError("Expected None, int or list, got {}: {}".format(type(pld), pld))
elif level==0:
# Generate payload
if pld==None:
pld = random.randint(1, MAX_PKT_LEN)
if isinstance(pld, int):
return payload(pld)
else:
raise TypeError("Expected None or int, got {}: {}".format(type(pld), pld))
else:
raise ValueError("Expected int>=0, got {}".format(level))
pld = copy.deepcopy(dimensions)
pld = next_level(levels, pld)
return pld | [
"def",
"payload_generator",
"(",
"levels",
"=",
"0",
",",
"dimensions",
"=",
"None",
",",
"sequential",
"=",
"True",
",",
"string",
"=",
"True",
",",
"max_int",
"=",
"255",
",",
"max_pkt_len",
"=",
"150",
",",
"max_dim_size",
"=",
"3",
")",
":",
"MAX_I... | Generates random data in a form of nested lists, to be used as a test packet payload
levels - nesting levels:
0 - list of integers (single packet payload)
1 - list of lists of integers (list of packets)
2 - list of lists of lists of integers (list of packets, each packet containing list of messages)
etc.
dimensions - specifies the size of the lists; the specification can be partial, as what is not specified will be chosen randomly
if level=0:
dimensions=None - list of integers of random length in the range [1,max_pkt_len]
dimensions=5 - list of integers of length 5
if level=1:
dimensions=None - list of random length in the range [1, max_dim_size], each element is an integer list of random length in the range [1, max_pkt_len]
dimensions=5 - list of length 5, each element is an integer list of random length in the range [1, max_pkt_len]
dimensions=[2, 3] - list of 2 elements, each element is an integer list, the first one of length 2, the second on of length 3
if level=2:
dimensions=None - list of random length in the range [1, max_dim_size], each element is a list of random length in the range [1, max_dim_size],
each element of the second level lists is an integer list of random length in the range [1, max_pkt_len]
dimensions=5 - list of length 5, each element is a list of random length in the range [1, max_dim_size], each element of the second level lists
is an integer list of random length in the range [1, max_pkt_len]
dimensions=[2, 3] - list of 2 elements, each element is a list, the first one of length 2, the second on of length 3, each element of the second
level lists is an integer list of random length in the range [1, max_pkt_len]
dimensions=[[3, 5], [6, 7, 8]] - list of 2 elements, each element is a list, the first one of length 2, the second on of length 3, each element of the second
level lists is an integer list of length respectively 3, 5, 6, 7, 8
if level=3:
etc.
It is possible to specify dimensions partially, e.g dimensions=[[3, None], [6, 7, 8]] or dimensions=[None, [6, 7, 8]]. Dimensions that are not specified are
chosen randomly
sequential - if True, the payload is a cyclic sequence if integers in the range [0, max_int]
if False, the payload is a sequence of random integers in the range [0, max_int]
string - if True, the payload is a byte string, e.g. '\x04\x05\x06\x07' as each byte is in the range [0, min(255, max_int)]
if False, the payload is a list of integers in the range [0, max_int]
max_int - Upper limit for the integer range for the payload
max_pkt_len - Upper limit for the randomly chosen payload length
max_dim_size - Upper limit for the randomly chosen dimension sizes | [
"Generates",
"random",
"data",
"in",
"a",
"form",
"of",
"nested",
"lists",
"to",
"be",
"used",
"as",
"a",
"test",
"packet",
"payload",
"levels",
"-",
"nesting",
"levels",
":",
"0",
"-",
"list",
"of",
"integers",
"(",
"single",
"packet",
"payload",
")",
... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/simulation/payload_generator.py#L4-L93 |
acorg/dark-matter | dark/entrez.py | getSequence | def getSequence(title, db='nucleotide'):
"""
Get information about a sequence from Genbank.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param db: The C{str} name of the Entrez database to consult.
NOTE: this uses the network! Also, there is a 3 requests/second limit
imposed by NCBI on these requests so be careful or your IP will be banned.
"""
titleId = title.split(' ', 1)[0]
try:
gi = titleId.split('|')[1]
except IndexError:
# Assume we have a gi number directly, and make sure it's a string.
gi = str(titleId)
try:
client = Entrez.efetch(db=db, rettype='gb', retmode='text', id=gi)
except URLError:
return None
else:
record = SeqIO.read(client, 'gb')
client.close()
return record | python | def getSequence(title, db='nucleotide'):
"""
Get information about a sequence from Genbank.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param db: The C{str} name of the Entrez database to consult.
NOTE: this uses the network! Also, there is a 3 requests/second limit
imposed by NCBI on these requests so be careful or your IP will be banned.
"""
titleId = title.split(' ', 1)[0]
try:
gi = titleId.split('|')[1]
except IndexError:
# Assume we have a gi number directly, and make sure it's a string.
gi = str(titleId)
try:
client = Entrez.efetch(db=db, rettype='gb', retmode='text', id=gi)
except URLError:
return None
else:
record = SeqIO.read(client, 'gb')
client.close()
return record | [
"def",
"getSequence",
"(",
"title",
",",
"db",
"=",
"'nucleotide'",
")",
":",
"titleId",
"=",
"title",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"0",
"]",
"try",
":",
"gi",
"=",
"titleId",
".",
"split",
"(",
"'|'",
")",
"[",
"1",
"]",
"exce... | Get information about a sequence from Genbank.
@param title: A C{str} sequence title from a BLAST hit. Of the form
'gi|63148399|gb|DQ011818.1| Description...'.
@param db: The C{str} name of the Entrez database to consult.
NOTE: this uses the network! Also, there is a 3 requests/second limit
imposed by NCBI on these requests so be careful or your IP will be banned. | [
"Get",
"information",
"about",
"a",
"sequence",
"from",
"Genbank",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/entrez.py#L7-L32 |
cloudrave/avwx | avwx/models.py | CloudLayerSet.get_ceiling_cloud_layer | def get_ceiling_cloud_layer(self):
"""
Returns the lowest layer of broken or overcast clouds.
:rtype: CloudLayer|None
"""
lowest_layer = None
for layer in self.cloud_layers:
if layer.coverage not in [CloudLayer.BROKEN, CloudLayer.OVERCAST]:
continue
if lowest_layer is None:
lowest_layer = layer
continue
if layer.height > lowest_layer.height:
continue
if layer.height < lowest_layer.height or \
lowest_layer.get_coverage_percentage() < layer.get_coverage_percentage():
lowest_layer = layer
return lowest_layer | python | def get_ceiling_cloud_layer(self):
"""
Returns the lowest layer of broken or overcast clouds.
:rtype: CloudLayer|None
"""
lowest_layer = None
for layer in self.cloud_layers:
if layer.coverage not in [CloudLayer.BROKEN, CloudLayer.OVERCAST]:
continue
if lowest_layer is None:
lowest_layer = layer
continue
if layer.height > lowest_layer.height:
continue
if layer.height < lowest_layer.height or \
lowest_layer.get_coverage_percentage() < layer.get_coverage_percentage():
lowest_layer = layer
return lowest_layer | [
"def",
"get_ceiling_cloud_layer",
"(",
"self",
")",
":",
"lowest_layer",
"=",
"None",
"for",
"layer",
"in",
"self",
".",
"cloud_layers",
":",
"if",
"layer",
".",
"coverage",
"not",
"in",
"[",
"CloudLayer",
".",
"BROKEN",
",",
"CloudLayer",
".",
"OVERCAST",
... | Returns the lowest layer of broken or overcast clouds.
:rtype: CloudLayer|None | [
"Returns",
"the",
"lowest",
"layer",
"of",
"broken",
"or",
"overcast",
"clouds",
".",
":",
"rtype",
":",
"CloudLayer|None"
] | train | https://github.com/cloudrave/avwx/blob/d5a9a1179e0634e94f40d446b89ceecbd1e61e13/avwx/models.py#L42-L59 |
cloudrave/avwx | avwx/models.py | WeatherReport.parse_xml_data | def parse_xml_data(self):
"""
Parses `xml_data` and loads it into object properties.
"""
self.raw_text = self.xml_data.find('raw_text').text
self.station = WeatherStation(self.xml_data.find('station_id').text)
self.station.latitude = float(self.xml_data.find('latitude').text)
self.station.longitude = float(self.xml_data.find('longitude').text)
self.station.elevation = float(self.xml_data.find('elevation_m').text) * 3.28084 | python | def parse_xml_data(self):
"""
Parses `xml_data` and loads it into object properties.
"""
self.raw_text = self.xml_data.find('raw_text').text
self.station = WeatherStation(self.xml_data.find('station_id').text)
self.station.latitude = float(self.xml_data.find('latitude').text)
self.station.longitude = float(self.xml_data.find('longitude').text)
self.station.elevation = float(self.xml_data.find('elevation_m').text) * 3.28084 | [
"def",
"parse_xml_data",
"(",
"self",
")",
":",
"self",
".",
"raw_text",
"=",
"self",
".",
"xml_data",
".",
"find",
"(",
"'raw_text'",
")",
".",
"text",
"self",
".",
"station",
"=",
"WeatherStation",
"(",
"self",
".",
"xml_data",
".",
"find",
"(",
"'st... | Parses `xml_data` and loads it into object properties. | [
"Parses",
"xml_data",
"and",
"loads",
"it",
"into",
"object",
"properties",
"."
] | train | https://github.com/cloudrave/avwx/blob/d5a9a1179e0634e94f40d446b89ceecbd1e61e13/avwx/models.py#L149-L157 |
cloudrave/avwx | avwx/models.py | WeatherReportSet.download_data | def download_data(self, mock_response=None):
"""
Loads XML data into the `xml_data` attribute.
"""
if mock_response is not None:
body = mock_response
else:
api_url = self.get_api_url()
body = urlopen(api_url).read()
xml_root = ElementTree.fromstring(body)
xml_warnings = xml_root.find('warnings')
if len(xml_warnings.attrib) != 0:
print("Data warnings found: %s" % xml_warnings.attrib)
xml_errors = xml_root.find('errors')
if len(xml_errors.attrib) != 0:
raise Exception("Data errors found: %s" % xml_errors.attrib)
self.xml_data = xml_root.find('data') | python | def download_data(self, mock_response=None):
"""
Loads XML data into the `xml_data` attribute.
"""
if mock_response is not None:
body = mock_response
else:
api_url = self.get_api_url()
body = urlopen(api_url).read()
xml_root = ElementTree.fromstring(body)
xml_warnings = xml_root.find('warnings')
if len(xml_warnings.attrib) != 0:
print("Data warnings found: %s" % xml_warnings.attrib)
xml_errors = xml_root.find('errors')
if len(xml_errors.attrib) != 0:
raise Exception("Data errors found: %s" % xml_errors.attrib)
self.xml_data = xml_root.find('data') | [
"def",
"download_data",
"(",
"self",
",",
"mock_response",
"=",
"None",
")",
":",
"if",
"mock_response",
"is",
"not",
"None",
":",
"body",
"=",
"mock_response",
"else",
":",
"api_url",
"=",
"self",
".",
"get_api_url",
"(",
")",
"body",
"=",
"urlopen",
"(... | Loads XML data into the `xml_data` attribute. | [
"Loads",
"XML",
"data",
"into",
"the",
"xml_data",
"attribute",
"."
] | train | https://github.com/cloudrave/avwx/blob/d5a9a1179e0634e94f40d446b89ceecbd1e61e13/avwx/models.py#L215-L231 |
acorg/dark-matter | dark/summarize.py | summarizeReads | def summarizeReads(file_handle, file_type):
"""
open a fasta or fastq file, prints number of of reads,
average length of read, total number of bases, longest,
shortest and median read, total number and average of
individual base (A, T, G, C, N).
"""
base_counts = defaultdict(int)
read_number = 0
total_length = 0
length_list = []
records = SeqIO.parse(file_handle, file_type)
for record in records:
total_length += len(record)
read_number += 1
length_list.append(len(record))
for base in record:
base_counts[base] += 1
result = {
"read_number": read_number,
"total_length": total_length,
"average_length": total_length / read_number if read_number > 0 else 0,
"max_length": max(length_list) if length_list else 0,
"min_length": min(length_list) if length_list else 0,
"median_length": median(length_list) if length_list else 0,
"base_counts": base_counts
}
return result | python | def summarizeReads(file_handle, file_type):
"""
open a fasta or fastq file, prints number of of reads,
average length of read, total number of bases, longest,
shortest and median read, total number and average of
individual base (A, T, G, C, N).
"""
base_counts = defaultdict(int)
read_number = 0
total_length = 0
length_list = []
records = SeqIO.parse(file_handle, file_type)
for record in records:
total_length += len(record)
read_number += 1
length_list.append(len(record))
for base in record:
base_counts[base] += 1
result = {
"read_number": read_number,
"total_length": total_length,
"average_length": total_length / read_number if read_number > 0 else 0,
"max_length": max(length_list) if length_list else 0,
"min_length": min(length_list) if length_list else 0,
"median_length": median(length_list) if length_list else 0,
"base_counts": base_counts
}
return result | [
"def",
"summarizeReads",
"(",
"file_handle",
",",
"file_type",
")",
":",
"base_counts",
"=",
"defaultdict",
"(",
"int",
")",
"read_number",
"=",
"0",
"total_length",
"=",
"0",
"length_list",
"=",
"[",
"]",
"records",
"=",
"SeqIO",
".",
"parse",
"(",
"file_... | open a fasta or fastq file, prints number of of reads,
average length of read, total number of bases, longest,
shortest and median read, total number and average of
individual base (A, T, G, C, N). | [
"open",
"a",
"fasta",
"or",
"fastq",
"file",
"prints",
"number",
"of",
"of",
"reads",
"average",
"length",
"of",
"read",
"total",
"number",
"of",
"bases",
"longest",
"shortest",
"and",
"median",
"read",
"total",
"number",
"and",
"average",
"of",
"individual"... | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/summarize.py#L7-L38 |
acorg/dark-matter | dark/summarize.py | sequenceCategoryLengths | def sequenceCategoryLengths(read, categories, defaultCategory=None,
suppressedCategory='...', minLength=1):
"""
Summarize the nucleotides or AAs found in a read by assigning each to a
category and reporting the lengths of the contiguous category classes
found along the sequence.
@param read: A C{Read} instance or one of its subclasses.
@param categories: A C{dict} mapping nucleotides or AAs to category.
@param defaultCategory: The category to use if a sequence base is not
in C{categories}.
@param suppressedCategory: The category to use to indicate suppressed
sequence regions (i.e., made up of stretches of bases that are less
than C{minLength} in length).
@param minLength: stretches of the read that are less than this C{int}
length will be summed and reported as being in the
C{suppressedCategory} category.
@raise ValueError: If minLength is less than one.
@return: A C{list} of 2-C{tuples}. Each tuple contains a (category, count).
"""
result = []
append = result.append
get = categories.get
first = True
currentCategory = None
currentCount = 0
suppressing = False
suppressedCount = 0
if minLength < 1:
raise ValueError('minLength must be at least 1')
for base in read.sequence:
thisCategory = get(base, defaultCategory)
if first:
first = False
currentCategory = thisCategory
currentCount += 1
else:
if thisCategory == currentCategory:
# This base is still in the same category as the last base.
# Keep counting.
currentCount += 1
else:
# This is a new category.
if currentCount < minLength:
# The category region that was just seen will not be
# emitted.
if suppressing:
# Already suppressing. Suppress the just-seen
# region too.
suppressedCount += currentCount
else:
# Start suppressing.
suppressedCount = currentCount
suppressing = True
else:
if suppressing:
append((suppressedCategory, suppressedCount))
suppressedCount = 0
suppressing = False
append((currentCategory, currentCount))
currentCategory = thisCategory
currentCount = 1
if suppressing:
append((suppressedCategory, suppressedCount + currentCount))
elif currentCount >= minLength:
append((currentCategory, currentCount))
elif currentCount:
append((suppressedCategory, currentCount))
return result | python | def sequenceCategoryLengths(read, categories, defaultCategory=None,
suppressedCategory='...', minLength=1):
"""
Summarize the nucleotides or AAs found in a read by assigning each to a
category and reporting the lengths of the contiguous category classes
found along the sequence.
@param read: A C{Read} instance or one of its subclasses.
@param categories: A C{dict} mapping nucleotides or AAs to category.
@param defaultCategory: The category to use if a sequence base is not
in C{categories}.
@param suppressedCategory: The category to use to indicate suppressed
sequence regions (i.e., made up of stretches of bases that are less
than C{minLength} in length).
@param minLength: stretches of the read that are less than this C{int}
length will be summed and reported as being in the
C{suppressedCategory} category.
@raise ValueError: If minLength is less than one.
@return: A C{list} of 2-C{tuples}. Each tuple contains a (category, count).
"""
result = []
append = result.append
get = categories.get
first = True
currentCategory = None
currentCount = 0
suppressing = False
suppressedCount = 0
if minLength < 1:
raise ValueError('minLength must be at least 1')
for base in read.sequence:
thisCategory = get(base, defaultCategory)
if first:
first = False
currentCategory = thisCategory
currentCount += 1
else:
if thisCategory == currentCategory:
# This base is still in the same category as the last base.
# Keep counting.
currentCount += 1
else:
# This is a new category.
if currentCount < minLength:
# The category region that was just seen will not be
# emitted.
if suppressing:
# Already suppressing. Suppress the just-seen
# region too.
suppressedCount += currentCount
else:
# Start suppressing.
suppressedCount = currentCount
suppressing = True
else:
if suppressing:
append((suppressedCategory, suppressedCount))
suppressedCount = 0
suppressing = False
append((currentCategory, currentCount))
currentCategory = thisCategory
currentCount = 1
if suppressing:
append((suppressedCategory, suppressedCount + currentCount))
elif currentCount >= minLength:
append((currentCategory, currentCount))
elif currentCount:
append((suppressedCategory, currentCount))
return result | [
"def",
"sequenceCategoryLengths",
"(",
"read",
",",
"categories",
",",
"defaultCategory",
"=",
"None",
",",
"suppressedCategory",
"=",
"'...'",
",",
"minLength",
"=",
"1",
")",
":",
"result",
"=",
"[",
"]",
"append",
"=",
"result",
".",
"append",
"get",
"=... | Summarize the nucleotides or AAs found in a read by assigning each to a
category and reporting the lengths of the contiguous category classes
found along the sequence.
@param read: A C{Read} instance or one of its subclasses.
@param categories: A C{dict} mapping nucleotides or AAs to category.
@param defaultCategory: The category to use if a sequence base is not
in C{categories}.
@param suppressedCategory: The category to use to indicate suppressed
sequence regions (i.e., made up of stretches of bases that are less
than C{minLength} in length).
@param minLength: stretches of the read that are less than this C{int}
length will be summed and reported as being in the
C{suppressedCategory} category.
@raise ValueError: If minLength is less than one.
@return: A C{list} of 2-C{tuples}. Each tuple contains a (category, count). | [
"Summarize",
"the",
"nucleotides",
"or",
"AAs",
"found",
"in",
"a",
"read",
"by",
"assigning",
"each",
"to",
"a",
"category",
"and",
"reporting",
"the",
"lengths",
"of",
"the",
"contiguous",
"category",
"classes",
"found",
"along",
"the",
"sequence",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/summarize.py#L41-L113 |
acorg/dark-matter | dark/simplify.py | simplifyTitle | def simplifyTitle(title, target):
"""
Simplify a given sequence title. Given a title, look for the first
occurrence of target anywhere in any of its words. Return a space-separated
string of the words of the title up to and including the occurrence of the
target. Ignore case.
E.g.,
# Suffix
simplifyTitle('Bovine polyomavirus DNA, complete genome', 'virus') ->
'Bovine polyomavirus'
# Prefix
simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'polyoma') ->
'California sea lion polyoma'
# Contained
simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'yoma') ->
'California sea lion polyoma'
title: The string title of the sequence.
target: The word in the title that we should stop at.
"""
targetLen = len(target)
result = []
for word in title.split():
if len(word) >= targetLen:
offset = word.lower().find(target.lower())
if offset > -1:
result.append(word[:offset + targetLen])
break
result.append(word)
return ' '.join(result) | python | def simplifyTitle(title, target):
"""
Simplify a given sequence title. Given a title, look for the first
occurrence of target anywhere in any of its words. Return a space-separated
string of the words of the title up to and including the occurrence of the
target. Ignore case.
E.g.,
# Suffix
simplifyTitle('Bovine polyomavirus DNA, complete genome', 'virus') ->
'Bovine polyomavirus'
# Prefix
simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'polyoma') ->
'California sea lion polyoma'
# Contained
simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'yoma') ->
'California sea lion polyoma'
title: The string title of the sequence.
target: The word in the title that we should stop at.
"""
targetLen = len(target)
result = []
for word in title.split():
if len(word) >= targetLen:
offset = word.lower().find(target.lower())
if offset > -1:
result.append(word[:offset + targetLen])
break
result.append(word)
return ' '.join(result) | [
"def",
"simplifyTitle",
"(",
"title",
",",
"target",
")",
":",
"targetLen",
"=",
"len",
"(",
"target",
")",
"result",
"=",
"[",
"]",
"for",
"word",
"in",
"title",
".",
"split",
"(",
")",
":",
"if",
"len",
"(",
"word",
")",
">=",
"targetLen",
":",
... | Simplify a given sequence title. Given a title, look for the first
occurrence of target anywhere in any of its words. Return a space-separated
string of the words of the title up to and including the occurrence of the
target. Ignore case.
E.g.,
# Suffix
simplifyTitle('Bovine polyomavirus DNA, complete genome', 'virus') ->
'Bovine polyomavirus'
# Prefix
simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'polyoma') ->
'California sea lion polyoma'
# Contained
simplifyTitle('California sea lion polyomavirus 1 CSL6994', 'yoma') ->
'California sea lion polyoma'
title: The string title of the sequence.
target: The word in the title that we should stop at. | [
"Simplify",
"a",
"given",
"sequence",
"title",
".",
"Given",
"a",
"title",
"look",
"for",
"the",
"first",
"occurrence",
"of",
"target",
"anywhere",
"in",
"any",
"of",
"its",
"words",
".",
"Return",
"a",
"space",
"-",
"separated",
"string",
"of",
"the",
"... | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/simplify.py#L1-L36 |
nkavaldj/myhdl_lib | myhdl_lib/simulation/_DUTer.py | DUTer.registerSimulator | def registerSimulator(self, name=None, hdl=None, analyze_cmd=None, elaborate_cmd=None, simulate_cmd=None):
''' Registers an HDL _simulator
name - str, user defined name, used to identify this _simulator record
hdl - str, case insensitive, (verilog, vhdl), the HDL to which the simulated MyHDL code will be converted
analyze_cmd - str, system command that will be run to analyze the generated HDL
elaborate_cmd - str, optional, system command that will be run after the analyze phase
simulate_cmd - str, system command that will be run to simulate the analyzed and elaborated design
Before execution of a command string the following substitutions take place:
{topname} is substituted with the name of the simulated MyHDL function
'''
if not isinstance(name, str) or (name.strip() == ""):
raise ValueError("Invalid _simulator name")
if hdl.lower() not in ("vhdl", "verilog"):
raise ValueError("Invalid hdl {}".format(hdl))
if not isinstance(analyze_cmd, str) or (analyze_cmd.strip() == ""):
raise ValueError("Invalid analyzer command")
if elaborate_cmd is not None:
if not isinstance(elaborate_cmd, str) or (elaborate_cmd.strip() == ""):
raise ValueError("Invalid elaborate_cmd command")
if not isinstance(simulate_cmd, str) or (simulate_cmd.strip() == ""):
raise ValueError("Invalid _simulator command")
self.sim_reg[name] = (hdl.lower(), analyze_cmd, elaborate_cmd, simulate_cmd) | python | def registerSimulator(self, name=None, hdl=None, analyze_cmd=None, elaborate_cmd=None, simulate_cmd=None):
''' Registers an HDL _simulator
name - str, user defined name, used to identify this _simulator record
hdl - str, case insensitive, (verilog, vhdl), the HDL to which the simulated MyHDL code will be converted
analyze_cmd - str, system command that will be run to analyze the generated HDL
elaborate_cmd - str, optional, system command that will be run after the analyze phase
simulate_cmd - str, system command that will be run to simulate the analyzed and elaborated design
Before execution of a command string the following substitutions take place:
{topname} is substituted with the name of the simulated MyHDL function
'''
if not isinstance(name, str) or (name.strip() == ""):
raise ValueError("Invalid _simulator name")
if hdl.lower() not in ("vhdl", "verilog"):
raise ValueError("Invalid hdl {}".format(hdl))
if not isinstance(analyze_cmd, str) or (analyze_cmd.strip() == ""):
raise ValueError("Invalid analyzer command")
if elaborate_cmd is not None:
if not isinstance(elaborate_cmd, str) or (elaborate_cmd.strip() == ""):
raise ValueError("Invalid elaborate_cmd command")
if not isinstance(simulate_cmd, str) or (simulate_cmd.strip() == ""):
raise ValueError("Invalid _simulator command")
self.sim_reg[name] = (hdl.lower(), analyze_cmd, elaborate_cmd, simulate_cmd) | [
"def",
"registerSimulator",
"(",
"self",
",",
"name",
"=",
"None",
",",
"hdl",
"=",
"None",
",",
"analyze_cmd",
"=",
"None",
",",
"elaborate_cmd",
"=",
"None",
",",
"simulate_cmd",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"name",
",",
"st... | Registers an HDL _simulator
name - str, user defined name, used to identify this _simulator record
hdl - str, case insensitive, (verilog, vhdl), the HDL to which the simulated MyHDL code will be converted
analyze_cmd - str, system command that will be run to analyze the generated HDL
elaborate_cmd - str, optional, system command that will be run after the analyze phase
simulate_cmd - str, system command that will be run to simulate the analyzed and elaborated design
Before execution of a command string the following substitutions take place:
{topname} is substituted with the name of the simulated MyHDL function | [
"Registers",
"an",
"HDL",
"_simulator",
"name",
"-",
"str",
"user",
"defined",
"name",
"used",
"to",
"identify",
"this",
"_simulator",
"record",
"hdl",
"-",
"str",
"case",
"insensitive",
"(",
"verilog",
"vhdl",
")",
"the",
"HDL",
"to",
"which",
"the",
"sim... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/simulation/_DUTer.py#L29-L51 |
nkavaldj/myhdl_lib | myhdl_lib/simulation/_DUTer.py | DUTer._getCosimulation | def _getCosimulation(self, func, **kwargs):
''' Returns a co-simulation instance of func.
Uses the _simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters
'''
vals = {}
vals['topname'] = func.func_name
vals['unitname'] = func.func_name.lower()
hdlsim = self._simulator
if not hdlsim:
raise ValueError("No _simulator specified")
if not self.sim_reg.has_key(hdlsim):
raise ValueError("Simulator {} is not registered".format(hdlsim))
hdl, analyze_cmd, elaborate_cmd, simulate_cmd = self.sim_reg[hdlsim]
# Convert to HDL
if hdl == "verilog":
toVerilog(func, **kwargs)
if self._trace:
self._enableTracesVerilog("./tb_{topname}.v".format(**vals))
elif hdl == "vhdl":
toVHDL(func, **kwargs)
# Analyze HDL
os.system(analyze_cmd.format(**vals))
# Elaborate
if elaborate_cmd:
os.system(elaborate_cmd.format(**vals))
# Simulate
return Cosimulation(simulate_cmd.format(**vals), **kwargs) | python | def _getCosimulation(self, func, **kwargs):
''' Returns a co-simulation instance of func.
Uses the _simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters
'''
vals = {}
vals['topname'] = func.func_name
vals['unitname'] = func.func_name.lower()
hdlsim = self._simulator
if not hdlsim:
raise ValueError("No _simulator specified")
if not self.sim_reg.has_key(hdlsim):
raise ValueError("Simulator {} is not registered".format(hdlsim))
hdl, analyze_cmd, elaborate_cmd, simulate_cmd = self.sim_reg[hdlsim]
# Convert to HDL
if hdl == "verilog":
toVerilog(func, **kwargs)
if self._trace:
self._enableTracesVerilog("./tb_{topname}.v".format(**vals))
elif hdl == "vhdl":
toVHDL(func, **kwargs)
# Analyze HDL
os.system(analyze_cmd.format(**vals))
# Elaborate
if elaborate_cmd:
os.system(elaborate_cmd.format(**vals))
# Simulate
return Cosimulation(simulate_cmd.format(**vals), **kwargs) | [
"def",
"_getCosimulation",
"(",
"self",
",",
"func",
",",
"*",
"*",
"kwargs",
")",
":",
"vals",
"=",
"{",
"}",
"vals",
"[",
"'topname'",
"]",
"=",
"func",
".",
"func_name",
"vals",
"[",
"'unitname'",
"]",
"=",
"func",
".",
"func_name",
".",
"lower",
... | Returns a co-simulation instance of func.
Uses the _simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters | [
"Returns",
"a",
"co",
"-",
"simulation",
"instance",
"of",
"func",
".",
"Uses",
"the",
"_simulator",
"specified",
"by",
"self",
".",
"_simulator",
".",
"Enables",
"traces",
"if",
"self",
".",
"_trace",
"is",
"True",
"func",
"-",
"MyHDL",
"function",
"to",
... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/simulation/_DUTer.py#L66-L98 |
nkavaldj/myhdl_lib | myhdl_lib/simulation/_DUTer.py | DUTer._enableTracesVerilog | def _enableTracesVerilog(self, verilogFile):
''' Enables traces in a Verilog file'''
fname, _ = os.path.splitext(verilogFile)
inserted = False
for _, line in enumerate(fileinput.input(verilogFile, inplace = 1)):
sys.stdout.write(line)
if line.startswith("end") and not inserted:
sys.stdout.write('\n\n')
sys.stdout.write('initial begin\n')
sys.stdout.write(' $dumpfile("{}_cosim.vcd");\n'.format(fname))
sys.stdout.write(' $dumpvars(0, dut);\n')
sys.stdout.write('end\n\n')
inserted = True | python | def _enableTracesVerilog(self, verilogFile):
''' Enables traces in a Verilog file'''
fname, _ = os.path.splitext(verilogFile)
inserted = False
for _, line in enumerate(fileinput.input(verilogFile, inplace = 1)):
sys.stdout.write(line)
if line.startswith("end") and not inserted:
sys.stdout.write('\n\n')
sys.stdout.write('initial begin\n')
sys.stdout.write(' $dumpfile("{}_cosim.vcd");\n'.format(fname))
sys.stdout.write(' $dumpvars(0, dut);\n')
sys.stdout.write('end\n\n')
inserted = True | [
"def",
"_enableTracesVerilog",
"(",
"self",
",",
"verilogFile",
")",
":",
"fname",
",",
"_",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"verilogFile",
")",
"inserted",
"=",
"False",
"for",
"_",
",",
"line",
"in",
"enumerate",
"(",
"fileinput",
".",
... | Enables traces in a Verilog file | [
"Enables",
"traces",
"in",
"a",
"Verilog",
"file"
] | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/simulation/_DUTer.py#L101-L113 |
nkavaldj/myhdl_lib | myhdl_lib/simulation/_DUTer.py | DUTer._getDut | def _getDut(self, func, **kwargs):
''' Returns a simulation instance of func.
Uses the simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters
'''
if self._simulator=="myhdl":
if not self._trace:
sim_dut = func(**kwargs)
else:
sim_dut = traceSignals(func, **kwargs)
else:
sim_dut = self._getCosimulation(func, **kwargs)
return sim_dut | python | def _getDut(self, func, **kwargs):
''' Returns a simulation instance of func.
Uses the simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters
'''
if self._simulator=="myhdl":
if not self._trace:
sim_dut = func(**kwargs)
else:
sim_dut = traceSignals(func, **kwargs)
else:
sim_dut = self._getCosimulation(func, **kwargs)
return sim_dut | [
"def",
"_getDut",
"(",
"self",
",",
"func",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_simulator",
"==",
"\"myhdl\"",
":",
"if",
"not",
"self",
".",
"_trace",
":",
"sim_dut",
"=",
"func",
"(",
"*",
"*",
"kwargs",
")",
"else",
":",
"s... | Returns a simulation instance of func.
Uses the simulator specified by self._simulator.
Enables traces if self._trace is True
func - MyHDL function to be simulated
kwargs - dict of func interface assignments: for signals and parameters | [
"Returns",
"a",
"simulation",
"instance",
"of",
"func",
".",
"Uses",
"the",
"simulator",
"specified",
"by",
"self",
".",
"_simulator",
".",
"Enables",
"traces",
"if",
"self",
".",
"_trace",
"is",
"True",
"func",
"-",
"MyHDL",
"function",
"to",
"be",
"simul... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/simulation/_DUTer.py#L116-L131 |
acorg/dark-matter | dark/fastq.py | FastqReads.iter | def iter(self):
"""
Iterate over the sequences in the files in self.files_, yielding each
as an instance of the desired read class.
"""
for _file in self._files:
with asHandle(_file) as fp:
# Use FastqGeneralIterator because it provides access to
# the unconverted quality string (i.e., it doesn't try to
# figure out the numeric quality values, which we don't
# care about at this point).
for sequenceId, sequence, quality in FastqGeneralIterator(fp):
yield self.readClass(sequenceId, sequence, quality) | python | def iter(self):
"""
Iterate over the sequences in the files in self.files_, yielding each
as an instance of the desired read class.
"""
for _file in self._files:
with asHandle(_file) as fp:
# Use FastqGeneralIterator because it provides access to
# the unconverted quality string (i.e., it doesn't try to
# figure out the numeric quality values, which we don't
# care about at this point).
for sequenceId, sequence, quality in FastqGeneralIterator(fp):
yield self.readClass(sequenceId, sequence, quality) | [
"def",
"iter",
"(",
"self",
")",
":",
"for",
"_file",
"in",
"self",
".",
"_files",
":",
"with",
"asHandle",
"(",
"_file",
")",
"as",
"fp",
":",
"# Use FastqGeneralIterator because it provides access to",
"# the unconverted quality string (i.e., it doesn't try to",
"# fi... | Iterate over the sequences in the files in self.files_, yielding each
as an instance of the desired read class. | [
"Iterate",
"over",
"the",
"sequences",
"in",
"the",
"files",
"in",
"self",
".",
"files_",
"yielding",
"each",
"as",
"an",
"instance",
"of",
"the",
"desired",
"read",
"class",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/fastq.py#L26-L38 |
acorg/dark-matter | bin/write-htcondor-job-spec.py | splitFASTA | def splitFASTA(params):
"""
Read the FASTA file named params['fastaFile'] and print out its
sequences into files named 0.fasta, 1.fasta, etc. with
params['seqsPerJob'] sequences per file.
"""
assert params['fastaFile'][-1] == 'a', ('You must specify a file in '
'fasta-format that ends in '
'.fasta')
fileCount = count = seqCount = 0
outfp = None
with open(params['fastaFile']) as infp:
for seq in SeqIO.parse(infp, 'fasta'):
seqCount += 1
if count == params['seqsPerJob']:
outfp.close()
count = 0
if count == 0:
outfp = open('%d.fasta' % fileCount, 'w')
fileCount += 1
count += 1
outfp.write('>%s\n%s\n' % (seq.description, str(seq.seq)))
outfp.close()
return fileCount, seqCount | python | def splitFASTA(params):
"""
Read the FASTA file named params['fastaFile'] and print out its
sequences into files named 0.fasta, 1.fasta, etc. with
params['seqsPerJob'] sequences per file.
"""
assert params['fastaFile'][-1] == 'a', ('You must specify a file in '
'fasta-format that ends in '
'.fasta')
fileCount = count = seqCount = 0
outfp = None
with open(params['fastaFile']) as infp:
for seq in SeqIO.parse(infp, 'fasta'):
seqCount += 1
if count == params['seqsPerJob']:
outfp.close()
count = 0
if count == 0:
outfp = open('%d.fasta' % fileCount, 'w')
fileCount += 1
count += 1
outfp.write('>%s\n%s\n' % (seq.description, str(seq.seq)))
outfp.close()
return fileCount, seqCount | [
"def",
"splitFASTA",
"(",
"params",
")",
":",
"assert",
"params",
"[",
"'fastaFile'",
"]",
"[",
"-",
"1",
"]",
"==",
"'a'",
",",
"(",
"'You must specify a file in '",
"'fasta-format that ends in '",
"'.fasta'",
")",
"fileCount",
"=",
"count",
"=",
"seqCount",
... | Read the FASTA file named params['fastaFile'] and print out its
sequences into files named 0.fasta, 1.fasta, etc. with
params['seqsPerJob'] sequences per file. | [
"Read",
"the",
"FASTA",
"file",
"named",
"params",
"[",
"fastaFile",
"]",
"and",
"print",
"out",
"its",
"sequences",
"into",
"files",
"named",
"0",
".",
"fasta",
"1",
".",
"fasta",
"etc",
".",
"with",
"params",
"[",
"seqsPerJob",
"]",
"sequences",
"per",... | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/bin/write-htcondor-job-spec.py#L44-L68 |
invenia/Arbiter | arbiter/scheduler.py | Scheduler.add_task | def add_task(self, task):
"""
Add a task to the scheduler.
task: The task to add.
"""
if not self._valid_name(task.name):
raise ValueError(task.name)
self._tasks[task.name] = task
incomplete_dependencies = set()
for dependency in task.dependencies:
if not self._valid_name(dependency) or dependency in self._failed:
# there may already be tasks dependent on this one.
self._cascade_failure(task.name)
break
if dependency not in self._completed:
incomplete_dependencies.add(dependency)
else: # task hasn't failed
try:
self._graph.add(task.name, incomplete_dependencies)
except ValueError:
self._cascade_failure(task.name) | python | def add_task(self, task):
"""
Add a task to the scheduler.
task: The task to add.
"""
if not self._valid_name(task.name):
raise ValueError(task.name)
self._tasks[task.name] = task
incomplete_dependencies = set()
for dependency in task.dependencies:
if not self._valid_name(dependency) or dependency in self._failed:
# there may already be tasks dependent on this one.
self._cascade_failure(task.name)
break
if dependency not in self._completed:
incomplete_dependencies.add(dependency)
else: # task hasn't failed
try:
self._graph.add(task.name, incomplete_dependencies)
except ValueError:
self._cascade_failure(task.name) | [
"def",
"add_task",
"(",
"self",
",",
"task",
")",
":",
"if",
"not",
"self",
".",
"_valid_name",
"(",
"task",
".",
"name",
")",
":",
"raise",
"ValueError",
"(",
"task",
".",
"name",
")",
"self",
".",
"_tasks",
"[",
"task",
".",
"name",
"]",
"=",
"... | Add a task to the scheduler.
task: The task to add. | [
"Add",
"a",
"task",
"to",
"the",
"scheduler",
"."
] | train | https://github.com/invenia/Arbiter/blob/51008393ae8797da85bcd67807259a157f941dfd/arbiter/scheduler.py#L68-L94 |
invenia/Arbiter | arbiter/scheduler.py | Scheduler.start_task | def start_task(self, name=None):
"""
Start a task.
Returns the task that was started (or None if no task has been
started).
name: (optional, None) The task to start. If a name is given,
Scheduler will attempt to start the task (and raise an
exception if the task doesn't exist or isn't runnable). If
no name is given, a task will be chosen arbitrarily
"""
if name is None:
for possibility in self._graph.roots:
if possibility not in self._running:
name = possibility
break
else: # all tasks blocked/running/completed/failed
return None
else:
if name not in self._graph.roots or name in self._running:
raise ValueError(name)
self._running.add(name)
return self._tasks[name] | python | def start_task(self, name=None):
"""
Start a task.
Returns the task that was started (or None if no task has been
started).
name: (optional, None) The task to start. If a name is given,
Scheduler will attempt to start the task (and raise an
exception if the task doesn't exist or isn't runnable). If
no name is given, a task will be chosen arbitrarily
"""
if name is None:
for possibility in self._graph.roots:
if possibility not in self._running:
name = possibility
break
else: # all tasks blocked/running/completed/failed
return None
else:
if name not in self._graph.roots or name in self._running:
raise ValueError(name)
self._running.add(name)
return self._tasks[name] | [
"def",
"start_task",
"(",
"self",
",",
"name",
"=",
"None",
")",
":",
"if",
"name",
"is",
"None",
":",
"for",
"possibility",
"in",
"self",
".",
"_graph",
".",
"roots",
":",
"if",
"possibility",
"not",
"in",
"self",
".",
"_running",
":",
"name",
"=",
... | Start a task.
Returns the task that was started (or None if no task has been
started).
name: (optional, None) The task to start. If a name is given,
Scheduler will attempt to start the task (and raise an
exception if the task doesn't exist or isn't runnable). If
no name is given, a task will be chosen arbitrarily | [
"Start",
"a",
"task",
"."
] | train | https://github.com/invenia/Arbiter/blob/51008393ae8797da85bcd67807259a157f941dfd/arbiter/scheduler.py#L96-L121 |
invenia/Arbiter | arbiter/scheduler.py | Scheduler.end_task | def end_task(self, name, success=True):
"""
End a running task. Raises an exception if the task isn't
running.
name: The name of the task to complete.
success: (optional, True) Whether the task was successful.
"""
self._running.remove(name)
if success:
self._completed.add(name)
self._graph.remove(name, strategy=Strategy.orphan)
else:
self._cascade_failure(name) | python | def end_task(self, name, success=True):
"""
End a running task. Raises an exception if the task isn't
running.
name: The name of the task to complete.
success: (optional, True) Whether the task was successful.
"""
self._running.remove(name)
if success:
self._completed.add(name)
self._graph.remove(name, strategy=Strategy.orphan)
else:
self._cascade_failure(name) | [
"def",
"end_task",
"(",
"self",
",",
"name",
",",
"success",
"=",
"True",
")",
":",
"self",
".",
"_running",
".",
"remove",
"(",
"name",
")",
"if",
"success",
":",
"self",
".",
"_completed",
".",
"add",
"(",
"name",
")",
"self",
".",
"_graph",
".",... | End a running task. Raises an exception if the task isn't
running.
name: The name of the task to complete.
success: (optional, True) Whether the task was successful. | [
"End",
"a",
"running",
"task",
".",
"Raises",
"an",
"exception",
"if",
"the",
"task",
"isn",
"t",
"running",
"."
] | train | https://github.com/invenia/Arbiter/blob/51008393ae8797da85bcd67807259a157f941dfd/arbiter/scheduler.py#L123-L137 |
invenia/Arbiter | arbiter/scheduler.py | Scheduler.fail_remaining | def fail_remaining(self):
"""
Mark all unfinished tasks (including currently running ones) as
failed.
"""
self._failed.update(self._graph.nodes)
self._graph = Graph()
self._running = set() | python | def fail_remaining(self):
"""
Mark all unfinished tasks (including currently running ones) as
failed.
"""
self._failed.update(self._graph.nodes)
self._graph = Graph()
self._running = set() | [
"def",
"fail_remaining",
"(",
"self",
")",
":",
"self",
".",
"_failed",
".",
"update",
"(",
"self",
".",
"_graph",
".",
"nodes",
")",
"self",
".",
"_graph",
"=",
"Graph",
"(",
")",
"self",
".",
"_running",
"=",
"set",
"(",
")"
] | Mark all unfinished tasks (including currently running ones) as
failed. | [
"Mark",
"all",
"unfinished",
"tasks",
"(",
"including",
"currently",
"running",
"ones",
")",
"as",
"failed",
"."
] | train | https://github.com/invenia/Arbiter/blob/51008393ae8797da85bcd67807259a157f941dfd/arbiter/scheduler.py#L145-L152 |
invenia/Arbiter | arbiter/scheduler.py | Scheduler._cascade_failure | def _cascade_failure(self, name):
"""
Mark a task (and anything that depends on it) as failed.
name: The name of the offending task
"""
if name in self._graph:
self._failed.update(
self._graph.remove(name, strategy=Strategy.remove)
)
else:
self._failed.add(name) | python | def _cascade_failure(self, name):
"""
Mark a task (and anything that depends on it) as failed.
name: The name of the offending task
"""
if name in self._graph:
self._failed.update(
self._graph.remove(name, strategy=Strategy.remove)
)
else:
self._failed.add(name) | [
"def",
"_cascade_failure",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"in",
"self",
".",
"_graph",
":",
"self",
".",
"_failed",
".",
"update",
"(",
"self",
".",
"_graph",
".",
"remove",
"(",
"name",
",",
"strategy",
"=",
"Strategy",
".",
"remo... | Mark a task (and anything that depends on it) as failed.
name: The name of the offending task | [
"Mark",
"a",
"task",
"(",
"and",
"anything",
"that",
"depends",
"on",
"it",
")",
"as",
"failed",
"."
] | train | https://github.com/invenia/Arbiter/blob/51008393ae8797da85bcd67807259a157f941dfd/arbiter/scheduler.py#L154-L165 |
acorg/dark-matter | bin/get-features.py | main | def main(gi, ranges):
"""
Print the features of the genbank entry given by gi. If ranges is
non-emtpy, only print features that include the ranges.
gi: either a hit from a BLAST record, in the form
'gi|63148399|gb|DQ011818.1|' or a gi number (63148399 in this example).
ranges: a possibly empty list of ranges to print information for. Each
range is a non-descending (start, end) pair of integers.
"""
# TODO: Make it so we can pass a 'db' argument to getSequence.
record = getSequence(gi)
if record is None:
print("Looks like you're offline.")
sys.exit(3)
else:
printed = set()
if ranges:
for (start, end) in ranges:
for index, feature in enumerate(record.features):
if (start < int(feature.location.end) and
end > int(feature.location.start) and
index not in printed):
print(feature)
printed.add(index)
else:
# Print all features.
for feature in record.features:
print(feature) | python | def main(gi, ranges):
"""
Print the features of the genbank entry given by gi. If ranges is
non-emtpy, only print features that include the ranges.
gi: either a hit from a BLAST record, in the form
'gi|63148399|gb|DQ011818.1|' or a gi number (63148399 in this example).
ranges: a possibly empty list of ranges to print information for. Each
range is a non-descending (start, end) pair of integers.
"""
# TODO: Make it so we can pass a 'db' argument to getSequence.
record = getSequence(gi)
if record is None:
print("Looks like you're offline.")
sys.exit(3)
else:
printed = set()
if ranges:
for (start, end) in ranges:
for index, feature in enumerate(record.features):
if (start < int(feature.location.end) and
end > int(feature.location.start) and
index not in printed):
print(feature)
printed.add(index)
else:
# Print all features.
for feature in record.features:
print(feature) | [
"def",
"main",
"(",
"gi",
",",
"ranges",
")",
":",
"# TODO: Make it so we can pass a 'db' argument to getSequence.",
"record",
"=",
"getSequence",
"(",
"gi",
")",
"if",
"record",
"is",
"None",
":",
"print",
"(",
"\"Looks like you're offline.\"",
")",
"sys",
".",
"... | Print the features of the genbank entry given by gi. If ranges is
non-emtpy, only print features that include the ranges.
gi: either a hit from a BLAST record, in the form
'gi|63148399|gb|DQ011818.1|' or a gi number (63148399 in this example).
ranges: a possibly empty list of ranges to print information for. Each
range is a non-descending (start, end) pair of integers. | [
"Print",
"the",
"features",
"of",
"the",
"genbank",
"entry",
"given",
"by",
"gi",
".",
"If",
"ranges",
"is",
"non",
"-",
"emtpy",
"only",
"print",
"features",
"that",
"include",
"the",
"ranges",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/bin/get-features.py#L10-L39 |
flo-compbio/genometools | genometools/gcloud/compute/instance.py | create_instance | def create_instance(credentials, project, zone, name,
startup_script=None, startup_script_url=None,
metadata=None,
machine_type='f1-micro', tags=None,
disk_size_gb=10, wait_until_done=False):
"""Create instance with startup script.
TODO: docstring"""
if startup_script is not None and startup_script_url is not None:
raise ValueError('Cannot specify a startup script string and URL '
'at the same time!')
access_token = credentials.get_access_token()
if metadata is None:
metadata = {}
meta_items = [{'key': k, 'value': v} for k, v in metadata.items()]
if tags is None:
tags = []
if startup_script is not None:
meta_items.insert(
0, {'key': 'startup-script', 'value': startup_script}
)
elif startup_script_url is not None:
meta_items.insert(
0, {'key': 'startup-script-url', 'value': startup_script_url})
payload = {
"name": name,
"zone": "projects/%s/zones/%s" % (project, zone),
"machineType": "projects/%s/zones/%s/machineTypes/%s"
% (project, zone, machine_type),
"metadata": {
"items": meta_items
},
"tags": {
"items": tags
},
"disks": [
{
"type": "PERSISTENT",
"boot": True,
"mode": "READ_WRITE",
"autoDelete": True,
"deviceName": name,
"initializeParams": {
"sourceImage": "projects/ubuntu-os-cloud/global/images/ubuntu-1604-xenial-v20170815a",
"diskType": "projects/%s/zones/%s/diskTypes/pd-standard" % (project, zone),
"diskSizeGb": str(disk_size_gb)
}
}
],
"canIpForward": False,
"networkInterfaces": [
{
"network": "projects/%s/global/networks/default" % project,
"subnetwork": "projects/%s/regions/%s/subnetworks/default" % (project, zone[:-2]),
"accessConfigs": [
{
"name": "External NAT", "type": "ONE_TO_ONE_NAT"
}
]
}
],
"description": "",
"scheduling": {
"preemptible": False,
"onHostMaintenance": "MIGRATE",
"automaticRestart": True
},
"serviceAccounts": [
{
"email": "default",
"scopes": [
'https://www.googleapis.com/auth/compute',
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append"
]
}
]
}
#header = 'Authorization: Bearer 1/fFBGRNJru1FQd44AzqT3Zg'
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
#print('Test:', json.dumps(payload, indent=4, sort_keys=True))
_LOGGER.debug('Access token: %s' % access_token.access_token)
_LOGGER.debug('Payload: %s', json.dumps(payload, sort_keys=True, indent=4))
r = requests.post('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances' % (project, zone),
headers=headers, json=payload)
r.raise_for_status()
op_name = r.json()['name']
_LOGGER.info('Submitted request to create intsance '
'(HTTP code: %d).',
r.status_code)
if wait_until_done:
_LOGGER.info('Waiting until operation is done...')
wait_for_zone_op(access_token, project, zone, op_name)
return op_name | python | def create_instance(credentials, project, zone, name,
startup_script=None, startup_script_url=None,
metadata=None,
machine_type='f1-micro', tags=None,
disk_size_gb=10, wait_until_done=False):
"""Create instance with startup script.
TODO: docstring"""
if startup_script is not None and startup_script_url is not None:
raise ValueError('Cannot specify a startup script string and URL '
'at the same time!')
access_token = credentials.get_access_token()
if metadata is None:
metadata = {}
meta_items = [{'key': k, 'value': v} for k, v in metadata.items()]
if tags is None:
tags = []
if startup_script is not None:
meta_items.insert(
0, {'key': 'startup-script', 'value': startup_script}
)
elif startup_script_url is not None:
meta_items.insert(
0, {'key': 'startup-script-url', 'value': startup_script_url})
payload = {
"name": name,
"zone": "projects/%s/zones/%s" % (project, zone),
"machineType": "projects/%s/zones/%s/machineTypes/%s"
% (project, zone, machine_type),
"metadata": {
"items": meta_items
},
"tags": {
"items": tags
},
"disks": [
{
"type": "PERSISTENT",
"boot": True,
"mode": "READ_WRITE",
"autoDelete": True,
"deviceName": name,
"initializeParams": {
"sourceImage": "projects/ubuntu-os-cloud/global/images/ubuntu-1604-xenial-v20170815a",
"diskType": "projects/%s/zones/%s/diskTypes/pd-standard" % (project, zone),
"diskSizeGb": str(disk_size_gb)
}
}
],
"canIpForward": False,
"networkInterfaces": [
{
"network": "projects/%s/global/networks/default" % project,
"subnetwork": "projects/%s/regions/%s/subnetworks/default" % (project, zone[:-2]),
"accessConfigs": [
{
"name": "External NAT", "type": "ONE_TO_ONE_NAT"
}
]
}
],
"description": "",
"scheduling": {
"preemptible": False,
"onHostMaintenance": "MIGRATE",
"automaticRestart": True
},
"serviceAccounts": [
{
"email": "default",
"scopes": [
'https://www.googleapis.com/auth/compute',
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring.write",
"https://www.googleapis.com/auth/servicecontrol",
"https://www.googleapis.com/auth/service.management.readonly",
"https://www.googleapis.com/auth/trace.append"
]
}
]
}
#header = 'Authorization: Bearer 1/fFBGRNJru1FQd44AzqT3Zg'
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
#print('Test:', json.dumps(payload, indent=4, sort_keys=True))
_LOGGER.debug('Access token: %s' % access_token.access_token)
_LOGGER.debug('Payload: %s', json.dumps(payload, sort_keys=True, indent=4))
r = requests.post('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances' % (project, zone),
headers=headers, json=payload)
r.raise_for_status()
op_name = r.json()['name']
_LOGGER.info('Submitted request to create intsance '
'(HTTP code: %d).',
r.status_code)
if wait_until_done:
_LOGGER.info('Waiting until operation is done...')
wait_for_zone_op(access_token, project, zone, op_name)
return op_name | [
"def",
"create_instance",
"(",
"credentials",
",",
"project",
",",
"zone",
",",
"name",
",",
"startup_script",
"=",
"None",
",",
"startup_script_url",
"=",
"None",
",",
"metadata",
"=",
"None",
",",
"machine_type",
"=",
"'f1-micro'",
",",
"tags",
"=",
"None"... | Create instance with startup script.
TODO: docstring | [
"Create",
"instance",
"with",
"startup",
"script",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/compute/instance.py#L13-L129 |
flo-compbio/genometools | genometools/gcloud/compute/instance.py | delete_instance | def delete_instance(credentials, project, zone, name, wait_until_done=False):
"""Delete an instance.
TODO: docstring
"""
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.delete('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, name),
headers=headers)
r.raise_for_status()
op_name = r.json()['name']
_LOGGER.info('Submitted request to create intsance '
'(HTTP code: %d).',
r.status_code)
if wait_until_done:
_LOGGER.info('Waiting until operation is done...')
wait_for_zone_op(access_token, project, zone, op_name)
return op_name | python | def delete_instance(credentials, project, zone, name, wait_until_done=False):
"""Delete an instance.
TODO: docstring
"""
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.delete('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, name),
headers=headers)
r.raise_for_status()
op_name = r.json()['name']
_LOGGER.info('Submitted request to create intsance '
'(HTTP code: %d).',
r.status_code)
if wait_until_done:
_LOGGER.info('Waiting until operation is done...')
wait_for_zone_op(access_token, project, zone, op_name)
return op_name | [
"def",
"delete_instance",
"(",
"credentials",
",",
"project",
",",
"zone",
",",
"name",
",",
"wait_until_done",
"=",
"False",
")",
":",
"access_token",
"=",
"credentials",
".",
"get_access_token",
"(",
")",
"headers",
"=",
"{",
"'Authorization'",
":",
"'Bearer... | Delete an instance.
TODO: docstring | [
"Delete",
"an",
"instance",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/compute/instance.py#L132-L161 |
flo-compbio/genometools | genometools/gcloud/compute/instance.py | wait_for_instance_deletion | def wait_for_instance_deletion(credentials, project, zone, instance_name,
interval_seconds=5):
"""Wait until an instance is deleted.
We require that initially, the specified instance exists.
TODO: docstring
"""
t0 = time.time()
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
raise AssertionError('Instance "%s" does not exist!' % instance_name)
r.raise_for_status()
_LOGGER.debug('Instance "%s" exists.', instance_name)
while True:
time.sleep(interval_seconds)
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
break
r.raise_for_status()
_LOGGER.debug('Instance "%s" still exists.', instance_name)
t1 = time.time()
t = t1-t0
t_min = t/60.0
_LOGGER.info('Instance was deleted after %.1f s (%.1f m).', t, t_min) | python | def wait_for_instance_deletion(credentials, project, zone, instance_name,
interval_seconds=5):
"""Wait until an instance is deleted.
We require that initially, the specified instance exists.
TODO: docstring
"""
t0 = time.time()
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
raise AssertionError('Instance "%s" does not exist!' % instance_name)
r.raise_for_status()
_LOGGER.debug('Instance "%s" exists.', instance_name)
while True:
time.sleep(interval_seconds)
access_token = credentials.get_access_token()
headers = {
'Authorization': 'Bearer %s' % access_token.access_token
}
r = requests.get('https://www.googleapis.com/compute/v1/'
'projects/%s/zones/%s/instances/%s'
% (project, zone, instance_name),
headers=headers)
if r.status_code == 404:
break
r.raise_for_status()
_LOGGER.debug('Instance "%s" still exists.', instance_name)
t1 = time.time()
t = t1-t0
t_min = t/60.0
_LOGGER.info('Instance was deleted after %.1f s (%.1f m).', t, t_min) | [
"def",
"wait_for_instance_deletion",
"(",
"credentials",
",",
"project",
",",
"zone",
",",
"instance_name",
",",
"interval_seconds",
"=",
"5",
")",
":",
"t0",
"=",
"time",
".",
"time",
"(",
")",
"access_token",
"=",
"credentials",
".",
"get_access_token",
"(",... | Wait until an instance is deleted.
We require that initially, the specified instance exists.
TODO: docstring | [
"Wait",
"until",
"an",
"instance",
"is",
"deleted",
".",
"We",
"require",
"that",
"initially",
"the",
"specified",
"instance",
"exists",
".",
"TODO",
":",
"docstring"
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/gcloud/compute/instance.py#L164-L209 |
acorg/dark-matter | dark/sam.py | samReferencesToStr | def samReferencesToStr(filenameOrSamfile, indent=0):
"""
List SAM/BAM file reference names and lengths.
@param filenameOrSamfile: Either a C{str} SAM/BAM file name or an
instance of C{pysam.AlignmentFile}.
@param indent: An C{int} number of spaces to indent each line.
@return: A C{str} describing known reference names and their lengths.
"""
indent = ' ' * indent
def _references(sam):
result = []
for i in range(sam.nreferences):
result.append('%s%s (length %d)' % (
indent, sam.get_reference_name(i), sam.lengths[i]))
return '\n'.join(result)
if isinstance(filenameOrSamfile, six.string_types):
with samfile(filenameOrSamfile) as sam:
return _references(sam)
else:
return _references(sam) | python | def samReferencesToStr(filenameOrSamfile, indent=0):
"""
List SAM/BAM file reference names and lengths.
@param filenameOrSamfile: Either a C{str} SAM/BAM file name or an
instance of C{pysam.AlignmentFile}.
@param indent: An C{int} number of spaces to indent each line.
@return: A C{str} describing known reference names and their lengths.
"""
indent = ' ' * indent
def _references(sam):
result = []
for i in range(sam.nreferences):
result.append('%s%s (length %d)' % (
indent, sam.get_reference_name(i), sam.lengths[i]))
return '\n'.join(result)
if isinstance(filenameOrSamfile, six.string_types):
with samfile(filenameOrSamfile) as sam:
return _references(sam)
else:
return _references(sam) | [
"def",
"samReferencesToStr",
"(",
"filenameOrSamfile",
",",
"indent",
"=",
"0",
")",
":",
"indent",
"=",
"' '",
"*",
"indent",
"def",
"_references",
"(",
"sam",
")",
":",
"result",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"sam",
".",
"nreferences... | List SAM/BAM file reference names and lengths.
@param filenameOrSamfile: Either a C{str} SAM/BAM file name or an
instance of C{pysam.AlignmentFile}.
@param indent: An C{int} number of spaces to indent each line.
@return: A C{str} describing known reference names and their lengths. | [
"List",
"SAM",
"/",
"BAM",
"file",
"reference",
"names",
"and",
"lengths",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L43-L65 |
acorg/dark-matter | dark/sam.py | _hardClip | def _hardClip(sequence, quality, cigartuples):
"""
Hard clip (if necessary) a sequence.
@param sequence: A C{str} nucleotide sequence.
@param quality: A C{str} quality string, or a C{list} of C{int} quality
values as returned by pysam, or C{None} if the SAM file had a '*'
for the quality string (which pysam converts to C{None}).
@param cigartuples: An iterable of (operation, length) tuples, detailing
the alignment, as per the SAM specification.
@return: A 3-tuple consisting of
1) a hard-clipped C{str} sequence if hard-clipping is indicated by
the CIGAR operations.
2) a hard-clipped quality C{str} or C{list} (depending on what
type we were passed) if hard-clipping is indicated by the CIGAR
operations.
3) a Boolean, C{True} if hard clipping was performed by this
function or C{False} if the hard clipping had already been
done.
"""
hardClipCount = cigarLength = 0
for (operation, length) in cigartuples:
hardClipCount += operation == CHARD_CLIP
cigarLength += length if operation in _CONSUMES_QUERY else 0
sequenceLength = len(sequence)
if quality is not None:
assert sequenceLength == len(quality)
clipLeft = clipRight = 0
clippedSequence = sequence
clippedQuality = quality
if sequenceLength > cigarLength:
alreadyClipped = False
else:
assert sequenceLength == cigarLength
alreadyClipped = True
if hardClipCount == 0:
pass
elif hardClipCount == 1:
# Hard clip either at the start or the end.
if cigartuples[0][0] == CHARD_CLIP:
if not alreadyClipped:
clipLeft = cigartuples[0][1]
clippedSequence = sequence[clipLeft:]
if quality is not None:
clippedQuality = quality[clipLeft:]
elif cigartuples[-1][0] == CHARD_CLIP:
if not alreadyClipped:
clipRight = cigartuples[-1][1]
clippedSequence = sequence[:-clipRight]
if quality is not None:
clippedQuality = quality[:-clipRight]
else:
raise ValueError(
'Invalid CIGAR tuples (%s) contains hard-clipping operation '
'that is neither at the start nor the end of the sequence.' %
(cigartuples,))
elif hardClipCount == 2:
# Hard clip at both the start and end.
assert cigartuples[0][0] == cigartuples[-1][0] == CHARD_CLIP
if not alreadyClipped:
clipLeft, clipRight = cigartuples[0][1], cigartuples[-1][1]
clippedSequence = sequence[clipLeft:-clipRight]
if quality is not None:
clippedQuality = quality[clipLeft:-clipRight]
else:
raise ValueError(
'Invalid CIGAR tuples (%s) specifies hard-clipping %d times (2 '
'is the maximum).' % (cigartuples, hardClipCount))
weClipped = bool(clipLeft or clipRight)
if weClipped:
assert not alreadyClipped
if len(clippedSequence) + clipLeft + clipRight != sequenceLength:
raise ValueError(
'Sequence %r (length %d) clipped to %r (length %d), but the '
'difference between these two lengths (%d) is not equal to '
'the sum (%d) of the left and right clip lengths (%d and %d '
'respectively). CIGAR tuples: %s' %
(sequence, len(sequence),
clippedSequence, len(clippedSequence),
abs(len(sequence) - len(clippedSequence)),
clipLeft + clipRight, clipLeft, clipRight, cigartuples))
else:
assert len(clippedSequence) == sequenceLength
if quality is not None:
assert len(clippedQuality) == sequenceLength
return clippedSequence, clippedQuality, weClipped | python | def _hardClip(sequence, quality, cigartuples):
"""
Hard clip (if necessary) a sequence.
@param sequence: A C{str} nucleotide sequence.
@param quality: A C{str} quality string, or a C{list} of C{int} quality
values as returned by pysam, or C{None} if the SAM file had a '*'
for the quality string (which pysam converts to C{None}).
@param cigartuples: An iterable of (operation, length) tuples, detailing
the alignment, as per the SAM specification.
@return: A 3-tuple consisting of
1) a hard-clipped C{str} sequence if hard-clipping is indicated by
the CIGAR operations.
2) a hard-clipped quality C{str} or C{list} (depending on what
type we were passed) if hard-clipping is indicated by the CIGAR
operations.
3) a Boolean, C{True} if hard clipping was performed by this
function or C{False} if the hard clipping had already been
done.
"""
hardClipCount = cigarLength = 0
for (operation, length) in cigartuples:
hardClipCount += operation == CHARD_CLIP
cigarLength += length if operation in _CONSUMES_QUERY else 0
sequenceLength = len(sequence)
if quality is not None:
assert sequenceLength == len(quality)
clipLeft = clipRight = 0
clippedSequence = sequence
clippedQuality = quality
if sequenceLength > cigarLength:
alreadyClipped = False
else:
assert sequenceLength == cigarLength
alreadyClipped = True
if hardClipCount == 0:
pass
elif hardClipCount == 1:
# Hard clip either at the start or the end.
if cigartuples[0][0] == CHARD_CLIP:
if not alreadyClipped:
clipLeft = cigartuples[0][1]
clippedSequence = sequence[clipLeft:]
if quality is not None:
clippedQuality = quality[clipLeft:]
elif cigartuples[-1][0] == CHARD_CLIP:
if not alreadyClipped:
clipRight = cigartuples[-1][1]
clippedSequence = sequence[:-clipRight]
if quality is not None:
clippedQuality = quality[:-clipRight]
else:
raise ValueError(
'Invalid CIGAR tuples (%s) contains hard-clipping operation '
'that is neither at the start nor the end of the sequence.' %
(cigartuples,))
elif hardClipCount == 2:
# Hard clip at both the start and end.
assert cigartuples[0][0] == cigartuples[-1][0] == CHARD_CLIP
if not alreadyClipped:
clipLeft, clipRight = cigartuples[0][1], cigartuples[-1][1]
clippedSequence = sequence[clipLeft:-clipRight]
if quality is not None:
clippedQuality = quality[clipLeft:-clipRight]
else:
raise ValueError(
'Invalid CIGAR tuples (%s) specifies hard-clipping %d times (2 '
'is the maximum).' % (cigartuples, hardClipCount))
weClipped = bool(clipLeft or clipRight)
if weClipped:
assert not alreadyClipped
if len(clippedSequence) + clipLeft + clipRight != sequenceLength:
raise ValueError(
'Sequence %r (length %d) clipped to %r (length %d), but the '
'difference between these two lengths (%d) is not equal to '
'the sum (%d) of the left and right clip lengths (%d and %d '
'respectively). CIGAR tuples: %s' %
(sequence, len(sequence),
clippedSequence, len(clippedSequence),
abs(len(sequence) - len(clippedSequence)),
clipLeft + clipRight, clipLeft, clipRight, cigartuples))
else:
assert len(clippedSequence) == sequenceLength
if quality is not None:
assert len(clippedQuality) == sequenceLength
return clippedSequence, clippedQuality, weClipped | [
"def",
"_hardClip",
"(",
"sequence",
",",
"quality",
",",
"cigartuples",
")",
":",
"hardClipCount",
"=",
"cigarLength",
"=",
"0",
"for",
"(",
"operation",
",",
"length",
")",
"in",
"cigartuples",
":",
"hardClipCount",
"+=",
"operation",
"==",
"CHARD_CLIP",
"... | Hard clip (if necessary) a sequence.
@param sequence: A C{str} nucleotide sequence.
@param quality: A C{str} quality string, or a C{list} of C{int} quality
values as returned by pysam, or C{None} if the SAM file had a '*'
for the quality string (which pysam converts to C{None}).
@param cigartuples: An iterable of (operation, length) tuples, detailing
the alignment, as per the SAM specification.
@return: A 3-tuple consisting of
1) a hard-clipped C{str} sequence if hard-clipping is indicated by
the CIGAR operations.
2) a hard-clipped quality C{str} or C{list} (depending on what
type we were passed) if hard-clipping is indicated by the CIGAR
operations.
3) a Boolean, C{True} if hard clipping was performed by this
function or C{False} if the hard clipping had already been
done. | [
"Hard",
"clip",
"(",
"if",
"necessary",
")",
"a",
"sequence",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L68-L159 |
acorg/dark-matter | dark/sam.py | SAMFilter.addFilteringOptions | def addFilteringOptions(parser, samfileIsPositionalArg=False):
"""
Add options to an argument parser for filtering SAM/BAM.
@param samfileIsPositionalArg: If C{True} the SAM/BAM file must
be given as the final argument on the command line (without
being preceded by --samfile).
@param parser: An C{argparse.ArgumentParser} instance.
"""
parser.add_argument(
'%ssamfile' % ('' if samfileIsPositionalArg else '--'),
required=True,
help='The SAM/BAM file to filter.')
parser.add_argument(
'--referenceId', metavar='ID', nargs='+', action='append',
help=('A reference sequence id whose alignments should be kept '
'(alignments against other references will be dropped). '
'If omitted, alignments against all references will be '
'kept. May be repeated.'))
parser.add_argument(
'--dropUnmapped', default=False, action='store_true',
help='If given, unmapped matches will not be output.')
parser.add_argument(
'--dropSecondary', default=False, action='store_true',
help='If given, secondary matches will not be output.')
parser.add_argument(
'--dropSupplementary', default=False, action='store_true',
help='If given, supplementary matches will not be output.')
parser.add_argument(
'--dropDuplicates', default=False, action='store_true',
help=('If given, matches flagged as optical or PCR duplicates '
'will not be output.'))
parser.add_argument(
'--keepQCFailures', default=False, action='store_true',
help=('If given, reads that are considered quality control '
'failures will be included in the output.'))
parser.add_argument(
'--minScore', type=float,
help=('If given, alignments with --scoreTag (default AS) values '
'less than this value will not be output. If given, '
'alignments that do not have a score will not be output.'))
parser.add_argument(
'--maxScore', type=float,
help=('If given, alignments with --scoreTag (default AS) values '
'greater than this value will not be output. If given, '
'alignments that do not have a score will not be output.'))
parser.add_argument(
'--scoreTag', default='AS',
help=('The alignment tag to extract for --minScore and --maxScore '
'comparisons.')) | python | def addFilteringOptions(parser, samfileIsPositionalArg=False):
"""
Add options to an argument parser for filtering SAM/BAM.
@param samfileIsPositionalArg: If C{True} the SAM/BAM file must
be given as the final argument on the command line (without
being preceded by --samfile).
@param parser: An C{argparse.ArgumentParser} instance.
"""
parser.add_argument(
'%ssamfile' % ('' if samfileIsPositionalArg else '--'),
required=True,
help='The SAM/BAM file to filter.')
parser.add_argument(
'--referenceId', metavar='ID', nargs='+', action='append',
help=('A reference sequence id whose alignments should be kept '
'(alignments against other references will be dropped). '
'If omitted, alignments against all references will be '
'kept. May be repeated.'))
parser.add_argument(
'--dropUnmapped', default=False, action='store_true',
help='If given, unmapped matches will not be output.')
parser.add_argument(
'--dropSecondary', default=False, action='store_true',
help='If given, secondary matches will not be output.')
parser.add_argument(
'--dropSupplementary', default=False, action='store_true',
help='If given, supplementary matches will not be output.')
parser.add_argument(
'--dropDuplicates', default=False, action='store_true',
help=('If given, matches flagged as optical or PCR duplicates '
'will not be output.'))
parser.add_argument(
'--keepQCFailures', default=False, action='store_true',
help=('If given, reads that are considered quality control '
'failures will be included in the output.'))
parser.add_argument(
'--minScore', type=float,
help=('If given, alignments with --scoreTag (default AS) values '
'less than this value will not be output. If given, '
'alignments that do not have a score will not be output.'))
parser.add_argument(
'--maxScore', type=float,
help=('If given, alignments with --scoreTag (default AS) values '
'greater than this value will not be output. If given, '
'alignments that do not have a score will not be output.'))
parser.add_argument(
'--scoreTag', default='AS',
help=('The alignment tag to extract for --minScore and --maxScore '
'comparisons.')) | [
"def",
"addFilteringOptions",
"(",
"parser",
",",
"samfileIsPositionalArg",
"=",
"False",
")",
":",
"parser",
".",
"add_argument",
"(",
"'%ssamfile'",
"%",
"(",
"''",
"if",
"samfileIsPositionalArg",
"else",
"'--'",
")",
",",
"required",
"=",
"True",
",",
"help... | Add options to an argument parser for filtering SAM/BAM.
@param samfileIsPositionalArg: If C{True} the SAM/BAM file must
be given as the final argument on the command line (without
being preceded by --samfile).
@param parser: An C{argparse.ArgumentParser} instance. | [
"Add",
"options",
"to",
"an",
"argument",
"parser",
"for",
"filtering",
"SAM",
"/",
"BAM",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L211-L269 |
acorg/dark-matter | dark/sam.py | SAMFilter.parseFilteringOptions | def parseFilteringOptions(cls, args, filterRead=None, storeQueryIds=False):
"""
Parse command line options (added in C{addSAMFilteringOptions}.
@param args: The command line arguments, as returned by
C{argparse.parse_args}.
@param filterRead: A one-argument function that accepts a read
and returns C{None} if the read should be omitted in filtering
or else a C{Read} instance.
@param storeQueryIds: If C{True}, query ids will be stored as the
SAM/BAM file is read.
@return: A C{SAMFilter} instance.
"""
referenceIds = (set(chain.from_iterable(args.referenceId))
if args.referenceId else None)
return cls(
args.samfile,
filterRead=filterRead,
referenceIds=referenceIds,
storeQueryIds=storeQueryIds,
dropUnmapped=args.dropUnmapped,
dropSecondary=args.dropSecondary,
dropSupplementary=args.dropSupplementary,
dropDuplicates=args.dropDuplicates,
keepQCFailures=args.keepQCFailures,
minScore=args.minScore,
maxScore=args.maxScore) | python | def parseFilteringOptions(cls, args, filterRead=None, storeQueryIds=False):
"""
Parse command line options (added in C{addSAMFilteringOptions}.
@param args: The command line arguments, as returned by
C{argparse.parse_args}.
@param filterRead: A one-argument function that accepts a read
and returns C{None} if the read should be omitted in filtering
or else a C{Read} instance.
@param storeQueryIds: If C{True}, query ids will be stored as the
SAM/BAM file is read.
@return: A C{SAMFilter} instance.
"""
referenceIds = (set(chain.from_iterable(args.referenceId))
if args.referenceId else None)
return cls(
args.samfile,
filterRead=filterRead,
referenceIds=referenceIds,
storeQueryIds=storeQueryIds,
dropUnmapped=args.dropUnmapped,
dropSecondary=args.dropSecondary,
dropSupplementary=args.dropSupplementary,
dropDuplicates=args.dropDuplicates,
keepQCFailures=args.keepQCFailures,
minScore=args.minScore,
maxScore=args.maxScore) | [
"def",
"parseFilteringOptions",
"(",
"cls",
",",
"args",
",",
"filterRead",
"=",
"None",
",",
"storeQueryIds",
"=",
"False",
")",
":",
"referenceIds",
"=",
"(",
"set",
"(",
"chain",
".",
"from_iterable",
"(",
"args",
".",
"referenceId",
")",
")",
"if",
"... | Parse command line options (added in C{addSAMFilteringOptions}.
@param args: The command line arguments, as returned by
C{argparse.parse_args}.
@param filterRead: A one-argument function that accepts a read
and returns C{None} if the read should be omitted in filtering
or else a C{Read} instance.
@param storeQueryIds: If C{True}, query ids will be stored as the
SAM/BAM file is read.
@return: A C{SAMFilter} instance. | [
"Parse",
"command",
"line",
"options",
"(",
"added",
"in",
"C",
"{",
"addSAMFilteringOptions",
"}",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L272-L299 |
acorg/dark-matter | dark/sam.py | SAMFilter.alignments | def alignments(self):
"""
Get alignments from the SAM/BAM file, subject to filtering.
"""
referenceIds = self.referenceIds
dropUnmapped = self.dropUnmapped
dropSecondary = self.dropSecondary
dropSupplementary = self.dropSupplementary
dropDuplicates = self.dropDuplicates
keepQCFailures = self.keepQCFailures
storeQueryIds = self.storeQueryIds
filterRead = self.filterRead
minScore = self.minScore
maxScore = self.maxScore
scoreTag = self.scoreTag
if storeQueryIds:
self.queryIds = queryIds = set()
lastAlignment = None
count = 0
with samfile(self.filename) as samAlignment:
for count, alignment in enumerate(samAlignment.fetch(), start=1):
if storeQueryIds:
queryIds.add(alignment.query_name)
if minScore is not None or maxScore is not None:
try:
score = alignment.get_tag(scoreTag)
except KeyError:
continue
else:
if ((minScore is not None and score < minScore) or
(maxScore is not None and score > maxScore)):
continue
# Secondary and supplementary alignments may have a '*'
# (pysam returns this as None) SEQ field, indicating that
# the previous sequence should be used. This is best
# practice according to section 2.5.2 of
# https://samtools.github.io/hts-specs/SAMv1.pdf So we use
# the last alignment query and quality strings if we get
# None as a query sequence.
if alignment.query_sequence is None:
if lastAlignment is None:
raise InvalidSAM(
'pysam produced an alignment (number %d) with no '
'query sequence without previously giving an '
'alignment with a sequence.' % count)
# Use the previous query sequence and quality. I'm not
# making the call to _hardClip dependent on
# alignment.cigartuples (as in the else clause below)
# because I don't think it's possible for
# alignment.cigartuples to be None in this case. If we
# have a second match on a query, then it must be
# aligned to something (i.e., it cannot be unmapped
# with no CIGAR string). The assertion will tell us if
# this is ever not the case.
assert alignment.cigartuples
(alignment.query_sequence,
alignment.query_qualities, _) = _hardClip(
lastAlignment.query_sequence,
lastAlignment.query_qualities,
alignment.cigartuples)
else:
lastAlignment = alignment
if alignment.cigartuples:
(alignment.query_sequence,
alignment.query_qualities, _) = _hardClip(
alignment.query_sequence,
alignment.query_qualities,
alignment.cigartuples)
if ((filterRead is None or
filterRead(Read(alignment.query_name,
alignment.query_sequence,
alignment.qual))) and
not (
(referenceIds and
alignment.reference_name not in referenceIds) or
(alignment.is_unmapped and dropUnmapped) or
(alignment.is_secondary and dropSecondary) or
(alignment.is_supplementary and dropSupplementary) or
(alignment.is_duplicate and dropDuplicates) or
(alignment.is_qcfail and not keepQCFailures))):
yield alignment
self.alignmentCount = count | python | def alignments(self):
"""
Get alignments from the SAM/BAM file, subject to filtering.
"""
referenceIds = self.referenceIds
dropUnmapped = self.dropUnmapped
dropSecondary = self.dropSecondary
dropSupplementary = self.dropSupplementary
dropDuplicates = self.dropDuplicates
keepQCFailures = self.keepQCFailures
storeQueryIds = self.storeQueryIds
filterRead = self.filterRead
minScore = self.minScore
maxScore = self.maxScore
scoreTag = self.scoreTag
if storeQueryIds:
self.queryIds = queryIds = set()
lastAlignment = None
count = 0
with samfile(self.filename) as samAlignment:
for count, alignment in enumerate(samAlignment.fetch(), start=1):
if storeQueryIds:
queryIds.add(alignment.query_name)
if minScore is not None or maxScore is not None:
try:
score = alignment.get_tag(scoreTag)
except KeyError:
continue
else:
if ((minScore is not None and score < minScore) or
(maxScore is not None and score > maxScore)):
continue
# Secondary and supplementary alignments may have a '*'
# (pysam returns this as None) SEQ field, indicating that
# the previous sequence should be used. This is best
# practice according to section 2.5.2 of
# https://samtools.github.io/hts-specs/SAMv1.pdf So we use
# the last alignment query and quality strings if we get
# None as a query sequence.
if alignment.query_sequence is None:
if lastAlignment is None:
raise InvalidSAM(
'pysam produced an alignment (number %d) with no '
'query sequence without previously giving an '
'alignment with a sequence.' % count)
# Use the previous query sequence and quality. I'm not
# making the call to _hardClip dependent on
# alignment.cigartuples (as in the else clause below)
# because I don't think it's possible for
# alignment.cigartuples to be None in this case. If we
# have a second match on a query, then it must be
# aligned to something (i.e., it cannot be unmapped
# with no CIGAR string). The assertion will tell us if
# this is ever not the case.
assert alignment.cigartuples
(alignment.query_sequence,
alignment.query_qualities, _) = _hardClip(
lastAlignment.query_sequence,
lastAlignment.query_qualities,
alignment.cigartuples)
else:
lastAlignment = alignment
if alignment.cigartuples:
(alignment.query_sequence,
alignment.query_qualities, _) = _hardClip(
alignment.query_sequence,
alignment.query_qualities,
alignment.cigartuples)
if ((filterRead is None or
filterRead(Read(alignment.query_name,
alignment.query_sequence,
alignment.qual))) and
not (
(referenceIds and
alignment.reference_name not in referenceIds) or
(alignment.is_unmapped and dropUnmapped) or
(alignment.is_secondary and dropSecondary) or
(alignment.is_supplementary and dropSupplementary) or
(alignment.is_duplicate and dropDuplicates) or
(alignment.is_qcfail and not keepQCFailures))):
yield alignment
self.alignmentCount = count | [
"def",
"alignments",
"(",
"self",
")",
":",
"referenceIds",
"=",
"self",
".",
"referenceIds",
"dropUnmapped",
"=",
"self",
".",
"dropUnmapped",
"dropSecondary",
"=",
"self",
".",
"dropSecondary",
"dropSupplementary",
"=",
"self",
".",
"dropSupplementary",
"dropDup... | Get alignments from the SAM/BAM file, subject to filtering. | [
"Get",
"alignments",
"from",
"the",
"SAM",
"/",
"BAM",
"file",
"subject",
"to",
"filtering",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L301-L388 |
acorg/dark-matter | dark/sam.py | SAMFilter.referenceLengths | def referenceLengths(self):
"""
Get the lengths of wanted references.
@raise UnknownReference: If a reference id is not present in the
SAM/BAM file.
@return: A C{dict} of C{str} reference id to C{int} length with a key
for each reference id in C{self.referenceIds} or for all references
if C{self.referenceIds} is C{None}.
"""
result = {}
with samfile(self.filename) as sam:
if self.referenceIds:
for referenceId in self.referenceIds:
tid = sam.get_tid(referenceId)
if tid == -1:
raise UnknownReference(
'Reference %r is not present in the SAM/BAM file.'
% referenceId)
else:
result[referenceId] = sam.lengths[tid]
else:
result = dict(zip(sam.references, sam.lengths))
return result | python | def referenceLengths(self):
"""
Get the lengths of wanted references.
@raise UnknownReference: If a reference id is not present in the
SAM/BAM file.
@return: A C{dict} of C{str} reference id to C{int} length with a key
for each reference id in C{self.referenceIds} or for all references
if C{self.referenceIds} is C{None}.
"""
result = {}
with samfile(self.filename) as sam:
if self.referenceIds:
for referenceId in self.referenceIds:
tid = sam.get_tid(referenceId)
if tid == -1:
raise UnknownReference(
'Reference %r is not present in the SAM/BAM file.'
% referenceId)
else:
result[referenceId] = sam.lengths[tid]
else:
result = dict(zip(sam.references, sam.lengths))
return result | [
"def",
"referenceLengths",
"(",
"self",
")",
":",
"result",
"=",
"{",
"}",
"with",
"samfile",
"(",
"self",
".",
"filename",
")",
"as",
"sam",
":",
"if",
"self",
".",
"referenceIds",
":",
"for",
"referenceId",
"in",
"self",
".",
"referenceIds",
":",
"ti... | Get the lengths of wanted references.
@raise UnknownReference: If a reference id is not present in the
SAM/BAM file.
@return: A C{dict} of C{str} reference id to C{int} length with a key
for each reference id in C{self.referenceIds} or for all references
if C{self.referenceIds} is C{None}. | [
"Get",
"the",
"lengths",
"of",
"wanted",
"references",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L390-L414 |
acorg/dark-matter | dark/sam.py | PaddedSAM.queries | def queries(self, rcSuffix='', rcNeeded=False, padChar='-',
queryInsertionChar='N', unknownQualityChar='!',
allowDuplicateIds=False, addAlignment=False):
"""
Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}.
"""
referenceLength = self.referenceLength
# Hold the count for each id so we can add /1, /2 etc to duplicate
# ids (unless --allowDuplicateIds was given).
idCount = Counter()
MATCH_OPERATIONS = {CMATCH, CEQUAL, CDIFF}
for lineNumber, alignment in enumerate(
self.samFilter.alignments(), start=1):
query = alignment.query_sequence
quality = ''.join(chr(q + 33) for q in alignment.query_qualities)
if alignment.is_reverse:
if rcNeeded:
query = DNARead('id', query).reverseComplement().sequence
quality = quality[::-1]
if rcSuffix:
alignment.query_name += rcSuffix
# Adjust the query id if it's a duplicate and we're not allowing
# duplicates.
if allowDuplicateIds:
queryId = alignment.query_name
else:
count = idCount[alignment.query_name]
idCount[alignment.query_name] += 1
queryId = alignment.query_name + (
'' if count == 0 else '/%d' % count)
referenceStart = alignment.reference_start
atStart = True
queryIndex = 0
referenceIndex = referenceStart
alignedSequence = ''
alignedQuality = ''
for operation, length in alignment.cigartuples:
# The operations are tested in the order they appear in
# https://samtools.github.io/hts-specs/SAMv1.pdf It would be
# more efficient to test them in order of frequency of
# occurrence.
if operation in MATCH_OPERATIONS:
atStart = False
alignedSequence += query[queryIndex:queryIndex + length]
alignedQuality += quality[queryIndex:queryIndex + length]
elif operation == CINS:
# Insertion to the reference. This consumes query bases but
# we don't output them because the reference cannot be
# changed. I.e., these bases in the query would need to be
# inserted into the reference. Remove these bases from the
# query but record what would have been inserted into the
# reference.
atStart = False
self.referenceInsertions[queryId].append(
(referenceIndex,
query[queryIndex:queryIndex + length]))
elif operation == CDEL:
# Delete from the reference. Some bases from the reference
# would need to be deleted to continue the match. So we put
# an insertion into the query to compensate.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CREF_SKIP:
# Skipped reference. Opens a gap in the query. For
# mRNA-to-genome alignment, an N operation represents an
# intron. For other types of alignments, the
# interpretation of N is not defined. So this is unlikely
# to occur.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CSOFT_CLIP:
# Bases in the query that are not part of the match. We
# remove these from the query if they protrude before the
# start or after the end of the reference. According to the
# SAM docs, 'S' operations may only have 'H' operations
# between them and the ends of the CIGAR string.
if atStart:
# Don't set atStart=False, in case there's another 'S'
# operation.
unwantedLeft = length - referenceStart
if unwantedLeft > 0:
# The query protrudes left. Copy its right part.
alignedSequence += query[
queryIndex + unwantedLeft:queryIndex + length]
alignedQuality += quality[
queryIndex + unwantedLeft:queryIndex + length]
referenceStart = 0
else:
referenceStart -= length
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
else:
unwantedRight = (
(referenceStart + len(alignedSequence) + length) -
referenceLength)
if unwantedRight > 0:
# The query protrudes right. Copy its left part.
alignedSequence += query[
queryIndex:queryIndex + length - unwantedRight]
alignedQuality += quality[
queryIndex:queryIndex + length - unwantedRight]
else:
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
elif operation == CHARD_CLIP:
# Some bases have been completely removed from the query.
# This (H) can only be present as the first and/or last
# operation. There is nothing to do as the bases are simply
# not present in the query string in the SAM/BAM file.
pass
elif operation == CPAD:
# This is "silent deletion from the padded reference",
# which consumes neither query nor reference.
atStart = False
else:
raise ValueError('Unknown CIGAR operation:', operation)
if operation in _CONSUMES_QUERY:
queryIndex += length
if operation in _CONSUMES_REFERENCE:
referenceIndex += length
if queryIndex != len(query):
# Oops, we did not consume the entire query.
raise ValueError(
'Query %r not fully consumed when parsing CIGAR string. '
'Query %r (len %d), final query index %d, CIGAR: %r' %
(alignment.query_name, query, len(query), queryIndex,
alignment.cigartuples))
# We cannot test we consumed the entire reference. The CIGAR
# string applies to (and exhausts) the query but is silent
# about the part of the reference that lies to the right of the
# aligned query.
# Put gap characters before and after the aligned sequence so that
# it is offset properly and matches the length of the reference.
padRightLength = (referenceLength -
(referenceStart + len(alignedSequence)))
paddedSequence = (padChar * referenceStart +
alignedSequence +
padChar * padRightLength)
paddedQuality = (unknownQualityChar * referenceStart +
alignedQuality +
unknownQualityChar * padRightLength)
read = Read(queryId, paddedSequence, paddedQuality)
if addAlignment:
read.alignment = alignment
yield read | python | def queries(self, rcSuffix='', rcNeeded=False, padChar='-',
queryInsertionChar='N', unknownQualityChar='!',
allowDuplicateIds=False, addAlignment=False):
"""
Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}.
"""
referenceLength = self.referenceLength
# Hold the count for each id so we can add /1, /2 etc to duplicate
# ids (unless --allowDuplicateIds was given).
idCount = Counter()
MATCH_OPERATIONS = {CMATCH, CEQUAL, CDIFF}
for lineNumber, alignment in enumerate(
self.samFilter.alignments(), start=1):
query = alignment.query_sequence
quality = ''.join(chr(q + 33) for q in alignment.query_qualities)
if alignment.is_reverse:
if rcNeeded:
query = DNARead('id', query).reverseComplement().sequence
quality = quality[::-1]
if rcSuffix:
alignment.query_name += rcSuffix
# Adjust the query id if it's a duplicate and we're not allowing
# duplicates.
if allowDuplicateIds:
queryId = alignment.query_name
else:
count = idCount[alignment.query_name]
idCount[alignment.query_name] += 1
queryId = alignment.query_name + (
'' if count == 0 else '/%d' % count)
referenceStart = alignment.reference_start
atStart = True
queryIndex = 0
referenceIndex = referenceStart
alignedSequence = ''
alignedQuality = ''
for operation, length in alignment.cigartuples:
# The operations are tested in the order they appear in
# https://samtools.github.io/hts-specs/SAMv1.pdf It would be
# more efficient to test them in order of frequency of
# occurrence.
if operation in MATCH_OPERATIONS:
atStart = False
alignedSequence += query[queryIndex:queryIndex + length]
alignedQuality += quality[queryIndex:queryIndex + length]
elif operation == CINS:
# Insertion to the reference. This consumes query bases but
# we don't output them because the reference cannot be
# changed. I.e., these bases in the query would need to be
# inserted into the reference. Remove these bases from the
# query but record what would have been inserted into the
# reference.
atStart = False
self.referenceInsertions[queryId].append(
(referenceIndex,
query[queryIndex:queryIndex + length]))
elif operation == CDEL:
# Delete from the reference. Some bases from the reference
# would need to be deleted to continue the match. So we put
# an insertion into the query to compensate.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CREF_SKIP:
# Skipped reference. Opens a gap in the query. For
# mRNA-to-genome alignment, an N operation represents an
# intron. For other types of alignments, the
# interpretation of N is not defined. So this is unlikely
# to occur.
atStart = False
alignedSequence += queryInsertionChar * length
alignedQuality += unknownQualityChar * length
elif operation == CSOFT_CLIP:
# Bases in the query that are not part of the match. We
# remove these from the query if they protrude before the
# start or after the end of the reference. According to the
# SAM docs, 'S' operations may only have 'H' operations
# between them and the ends of the CIGAR string.
if atStart:
# Don't set atStart=False, in case there's another 'S'
# operation.
unwantedLeft = length - referenceStart
if unwantedLeft > 0:
# The query protrudes left. Copy its right part.
alignedSequence += query[
queryIndex + unwantedLeft:queryIndex + length]
alignedQuality += quality[
queryIndex + unwantedLeft:queryIndex + length]
referenceStart = 0
else:
referenceStart -= length
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
else:
unwantedRight = (
(referenceStart + len(alignedSequence) + length) -
referenceLength)
if unwantedRight > 0:
# The query protrudes right. Copy its left part.
alignedSequence += query[
queryIndex:queryIndex + length - unwantedRight]
alignedQuality += quality[
queryIndex:queryIndex + length - unwantedRight]
else:
alignedSequence += query[
queryIndex:queryIndex + length]
alignedQuality += quality[
queryIndex:queryIndex + length]
elif operation == CHARD_CLIP:
# Some bases have been completely removed from the query.
# This (H) can only be present as the first and/or last
# operation. There is nothing to do as the bases are simply
# not present in the query string in the SAM/BAM file.
pass
elif operation == CPAD:
# This is "silent deletion from the padded reference",
# which consumes neither query nor reference.
atStart = False
else:
raise ValueError('Unknown CIGAR operation:', operation)
if operation in _CONSUMES_QUERY:
queryIndex += length
if operation in _CONSUMES_REFERENCE:
referenceIndex += length
if queryIndex != len(query):
# Oops, we did not consume the entire query.
raise ValueError(
'Query %r not fully consumed when parsing CIGAR string. '
'Query %r (len %d), final query index %d, CIGAR: %r' %
(alignment.query_name, query, len(query), queryIndex,
alignment.cigartuples))
# We cannot test we consumed the entire reference. The CIGAR
# string applies to (and exhausts) the query but is silent
# about the part of the reference that lies to the right of the
# aligned query.
# Put gap characters before and after the aligned sequence so that
# it is offset properly and matches the length of the reference.
padRightLength = (referenceLength -
(referenceStart + len(alignedSequence)))
paddedSequence = (padChar * referenceStart +
alignedSequence +
padChar * padRightLength)
paddedQuality = (unknownQualityChar * referenceStart +
alignedQuality +
unknownQualityChar * padRightLength)
read = Read(queryId, paddedSequence, paddedQuality)
if addAlignment:
read.alignment = alignment
yield read | [
"def",
"queries",
"(",
"self",
",",
"rcSuffix",
"=",
"''",
",",
"rcNeeded",
"=",
"False",
",",
"padChar",
"=",
"'-'",
",",
"queryInsertionChar",
"=",
"'N'",
",",
"unknownQualityChar",
"=",
"'!'",
",",
"allowDuplicateIds",
"=",
"False",
",",
"addAlignment",
... | Produce padded (with gaps) queries according to the CIGAR string and
reference sequence length for each matching query sequence.
@param rcSuffix: A C{str} to add to the end of query names that are
reverse complemented. This is added before the /1, /2, etc., that
are added for duplicated ids (if there are duplicates and
C{allowDuplicateIds} is C{False}.
@param rcNeeded: If C{True}, queries that are flagged as matching when
reverse complemented should have reverse complementing when
preparing the output sequences. This must be used if the program
that created the SAM/BAM input flags reversed matches but does not
also store the reverse complemented query.
@param padChar: A C{str} of length one to use to pad queries with to
make them the same length as the reference sequence.
@param queryInsertionChar: A C{str} of length one to use to insert
into queries when the CIGAR string indicates that the alignment
of a query would cause a deletion in the reference. This character
is inserted as a 'missing' query character (i.e., a base that can
be assumed to have been lost due to an error) whose existence is
necessary for the match to continue.
@param unknownQualityChar: The character to put into the quality
string when unknown bases are inserted in the query or the query
is padded on the left/right with gaps.
@param allowDuplicateIds: If C{True}, repeated query ids (due to
secondary or supplemental matches) will not have /1, /2, etc.
appended to their ids. So repeated ids may appear in the yielded
FASTA.
@param addAlignment: If C{True} the reads yielded by the returned
generator will also have an C{alignment} attribute, being the
C{pysam.AlignedSegment} for the query.
@raises InvalidSAM: If a query has an empty SEQ field and either there
is no previous alignment or the alignment is not marked as
secondary or supplementary.
@return: A generator that yields C{Read} instances that are padded
with gap characters to align them to the length of the reference
sequence. See C{addAlignment}, above, to yield reads with the
corresponding C{pysam.AlignedSegment}. | [
"Produce",
"padded",
"(",
"with",
"gaps",
")",
"queries",
"according",
"to",
"the",
"CIGAR",
"string",
"and",
"reference",
"sequence",
"length",
"for",
"each",
"matching",
"query",
"sequence",
"."
] | train | https://github.com/acorg/dark-matter/blob/c78a1bf262667fa5db3548fa7066c4ec14d0551d/dark/sam.py#L449-L653 |
nkavaldj/myhdl_lib | myhdl_lib/mux.py | mux | def mux(sel, ls_di, do):
""" Multiplexes a list of input signals to an output signal
do = sl_di[sel]
sel - select index
ls_di - list of input signals
do - output signals
"""
N = len(ls_di)
@always_comb
def _mux():
do.next = 0
for i in range(N):
if i == sel:
do.next = ls_di[i]
return _mux | python | def mux(sel, ls_di, do):
""" Multiplexes a list of input signals to an output signal
do = sl_di[sel]
sel - select index
ls_di - list of input signals
do - output signals
"""
N = len(ls_di)
@always_comb
def _mux():
do.next = 0
for i in range(N):
if i == sel:
do.next = ls_di[i]
return _mux | [
"def",
"mux",
"(",
"sel",
",",
"ls_di",
",",
"do",
")",
":",
"N",
"=",
"len",
"(",
"ls_di",
")",
"@",
"always_comb",
"def",
"_mux",
"(",
")",
":",
"do",
".",
"next",
"=",
"0",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"if",
"i",
"==",
... | Multiplexes a list of input signals to an output signal
do = sl_di[sel]
sel - select index
ls_di - list of input signals
do - output signals | [
"Multiplexes",
"a",
"list",
"of",
"input",
"signals",
"to",
"an",
"output",
"signal",
"do",
"=",
"sl_di",
"[",
"sel",
"]"
] | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/mux.py#L4-L20 |
nkavaldj/myhdl_lib | myhdl_lib/mux.py | demux | def demux(sel, di, ls_do):
""" Demultiplexes an input signal to a list of output signals
ls_do[sel] = di
sel - select index
di - input signal
ls_do - list of output signals
"""
N = len(ls_do)
@always_comb
def _demux():
for i in range(N):
ls_do[i].next = 0
if i == sel:
ls_do[i].next = di
return _demux | python | def demux(sel, di, ls_do):
""" Demultiplexes an input signal to a list of output signals
ls_do[sel] = di
sel - select index
di - input signal
ls_do - list of output signals
"""
N = len(ls_do)
@always_comb
def _demux():
for i in range(N):
ls_do[i].next = 0
if i == sel:
ls_do[i].next = di
return _demux | [
"def",
"demux",
"(",
"sel",
",",
"di",
",",
"ls_do",
")",
":",
"N",
"=",
"len",
"(",
"ls_do",
")",
"@",
"always_comb",
"def",
"_demux",
"(",
")",
":",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"ls_do",
"[",
"i",
"]",
".",
"next",
"=",
... | Demultiplexes an input signal to a list of output signals
ls_do[sel] = di
sel - select index
di - input signal
ls_do - list of output signals | [
"Demultiplexes",
"an",
"input",
"signal",
"to",
"a",
"list",
"of",
"output",
"signals",
"ls_do",
"[",
"sel",
"]",
"=",
"di"
] | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/mux.py#L23-L38 |
nkavaldj/myhdl_lib | myhdl_lib/mux.py | ls_mux | def ls_mux(sel, lsls_di, ls_do):
""" Multiplexes a list of input signal structures to an output structure.
A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n]
ls_do[0] = lsls_di[sel][0]
ls_do[1] = lsls_di[sel][1]
...
ls_do[n] = lsls_di[sel][n]
sel - select index
lsls_di - list of input signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]]
ls_do - output signal structure: [sig, sig, ..., sig]
"""
N = len(ls_do)
lsls_in = [list(x) for x in zip(*lsls_di)]
return [mux(sel, lsls_in[i], ls_do[i]) for i in range(N)] | python | def ls_mux(sel, lsls_di, ls_do):
""" Multiplexes a list of input signal structures to an output structure.
A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n]
ls_do[0] = lsls_di[sel][0]
ls_do[1] = lsls_di[sel][1]
...
ls_do[n] = lsls_di[sel][n]
sel - select index
lsls_di - list of input signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]]
ls_do - output signal structure: [sig, sig, ..., sig]
"""
N = len(ls_do)
lsls_in = [list(x) for x in zip(*lsls_di)]
return [mux(sel, lsls_in[i], ls_do[i]) for i in range(N)] | [
"def",
"ls_mux",
"(",
"sel",
",",
"lsls_di",
",",
"ls_do",
")",
":",
"N",
"=",
"len",
"(",
"ls_do",
")",
"lsls_in",
"=",
"[",
"list",
"(",
"x",
")",
"for",
"x",
"in",
"zip",
"(",
"*",
"lsls_di",
")",
"]",
"return",
"[",
"mux",
"(",
"sel",
","... | Multiplexes a list of input signal structures to an output structure.
A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n]
ls_do[0] = lsls_di[sel][0]
ls_do[1] = lsls_di[sel][1]
...
ls_do[n] = lsls_di[sel][n]
sel - select index
lsls_di - list of input signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]]
ls_do - output signal structure: [sig, sig, ..., sig] | [
"Multiplexes",
"a",
"list",
"of",
"input",
"signal",
"structures",
"to",
"an",
"output",
"structure",
".",
"A",
"structure",
"is",
"represented",
"by",
"a",
"list",
"of",
"signals",
":",
"[",
"signal_1",
"signal_2",
"...",
"signal_n",
"]",
"ls_do",
"[",
"0... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/mux.py#L41-L55 |
nkavaldj/myhdl_lib | myhdl_lib/mux.py | ls_demux | def ls_demux(sel, ls_di, lsls_do):
""" Demultiplexes an input signal structure to list of output structures.
A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n]
lsls_do[sel][0] = ls_di[0]
lsls_do[sel][1] = ls_di[1]
...
lsls_do[sel][n] = ls_di[n]
sel - select index
ls_di - input signal structure: [sig, sig, ..., sig]
lsls_do - list of output signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]]
"""
N = len (ls_di)
lsls_out = [list(x) for x in zip(*lsls_do)]
return [demux(sel, ls_di[i], lsls_out[i])for i in range(N)] | python | def ls_demux(sel, ls_di, lsls_do):
""" Demultiplexes an input signal structure to list of output structures.
A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n]
lsls_do[sel][0] = ls_di[0]
lsls_do[sel][1] = ls_di[1]
...
lsls_do[sel][n] = ls_di[n]
sel - select index
ls_di - input signal structure: [sig, sig, ..., sig]
lsls_do - list of output signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]]
"""
N = len (ls_di)
lsls_out = [list(x) for x in zip(*lsls_do)]
return [demux(sel, ls_di[i], lsls_out[i])for i in range(N)] | [
"def",
"ls_demux",
"(",
"sel",
",",
"ls_di",
",",
"lsls_do",
")",
":",
"N",
"=",
"len",
"(",
"ls_di",
")",
"lsls_out",
"=",
"[",
"list",
"(",
"x",
")",
"for",
"x",
"in",
"zip",
"(",
"*",
"lsls_do",
")",
"]",
"return",
"[",
"demux",
"(",
"sel",
... | Demultiplexes an input signal structure to list of output structures.
A structure is represented by a list of signals: [signal_1, signal_2, ..., signal_n]
lsls_do[sel][0] = ls_di[0]
lsls_do[sel][1] = ls_di[1]
...
lsls_do[sel][n] = ls_di[n]
sel - select index
ls_di - input signal structure: [sig, sig, ..., sig]
lsls_do - list of output signal structures: [[sig, sig, ..., sig], [sig, sig, ..., sig], ..., [sig, sig, ..., sig]] | [
"Demultiplexes",
"an",
"input",
"signal",
"structure",
"to",
"list",
"of",
"output",
"structures",
".",
"A",
"structure",
"is",
"represented",
"by",
"a",
"list",
"of",
"signals",
":",
"[",
"signal_1",
"signal_2",
"...",
"signal_n",
"]",
"lsls_do",
"[",
"sel"... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/mux.py#L58-L72 |
nkavaldj/myhdl_lib | myhdl_lib/mux.py | bitslice_select | def bitslice_select(offset, bv_di, bv_do):
''' Selects a bit-slice from a bit-vector
offset - (i) bit offset of the slice
bv_di - (i) bit vector where the slice is taken from
bv_do - (o) selected slice; the length of this bit-vector defines the number of bit in the slice
bv_do = bv_di[len(bv_do)+offset:offset]
'''
LEN_I = len(bv_di)
LEN_O = len(bv_do)
assert LEN_I >= LEN_O, "bitslice_select: expects len(bv_di) >= len(bv_do), but len(bv_di)={}, len(bv_do)".format(LEN_I, LEN_O)
OFFSET_MAX = LEN_I - LEN_O + 1
@always_comb
def _slice():
bv_do.next = 0
for i in range(OFFSET_MAX):
if i==offset:
for b in range(LEN_O):
bv_do.next[b] = bv_di[i+b]
return _slice | python | def bitslice_select(offset, bv_di, bv_do):
''' Selects a bit-slice from a bit-vector
offset - (i) bit offset of the slice
bv_di - (i) bit vector where the slice is taken from
bv_do - (o) selected slice; the length of this bit-vector defines the number of bit in the slice
bv_do = bv_di[len(bv_do)+offset:offset]
'''
LEN_I = len(bv_di)
LEN_O = len(bv_do)
assert LEN_I >= LEN_O, "bitslice_select: expects len(bv_di) >= len(bv_do), but len(bv_di)={}, len(bv_do)".format(LEN_I, LEN_O)
OFFSET_MAX = LEN_I - LEN_O + 1
@always_comb
def _slice():
bv_do.next = 0
for i in range(OFFSET_MAX):
if i==offset:
for b in range(LEN_O):
bv_do.next[b] = bv_di[i+b]
return _slice | [
"def",
"bitslice_select",
"(",
"offset",
",",
"bv_di",
",",
"bv_do",
")",
":",
"LEN_I",
"=",
"len",
"(",
"bv_di",
")",
"LEN_O",
"=",
"len",
"(",
"bv_do",
")",
"assert",
"LEN_I",
">=",
"LEN_O",
",",
"\"bitslice_select: expects len(bv_di) >= len(bv_do), but len(bv... | Selects a bit-slice from a bit-vector
offset - (i) bit offset of the slice
bv_di - (i) bit vector where the slice is taken from
bv_do - (o) selected slice; the length of this bit-vector defines the number of bit in the slice
bv_do = bv_di[len(bv_do)+offset:offset] | [
"Selects",
"a",
"bit",
"-",
"slice",
"from",
"a",
"bit",
"-",
"vector",
"offset",
"-",
"(",
"i",
")",
"bit",
"offset",
"of",
"the",
"slice",
"bv_di",
"-",
"(",
"i",
")",
"bit",
"vector",
"where",
"the",
"slice",
"is",
"taken",
"from",
"bv_do",
"-",... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/mux.py#L75-L98 |
nkavaldj/myhdl_lib | myhdl_lib/mux.py | byteslice_select | def byteslice_select(offset, bv_di, bv_do):
''' Selects a slice of length 8*n aligned on a byte from a bit-vector
offset - (i) byte offset of the slice
bv_di - (i) bit vector where the slice is taken from; must len(bv_di) = 8*m
bv_do - (o) selected slice; must len(bv_do) = 8*n, n<=m; len(bv_do) defines the number of bit in the slice
'''
LEN_I = len(bv_di)
LEN_O = len(bv_do)
assert (LEN_I % 8)==0, "byteslice_select: expects len(bv_di)=8*x, but len(bv_di)={} bits".format(LEN_I)
assert (LEN_O % 8)==0, "byteslice_select: expects len(bv_do)=8*x, but len(bv_do)={} bits".format(LEN_O)
bit_offset = Signal(intbv(0)[len(offset)+3:])
@always_comb
def _offset():
bit_offset.next = offset << 3
_slice = bitslice_select(bit_offset, bv_di, bv_do)
return _offset, _slice | python | def byteslice_select(offset, bv_di, bv_do):
''' Selects a slice of length 8*n aligned on a byte from a bit-vector
offset - (i) byte offset of the slice
bv_di - (i) bit vector where the slice is taken from; must len(bv_di) = 8*m
bv_do - (o) selected slice; must len(bv_do) = 8*n, n<=m; len(bv_do) defines the number of bit in the slice
'''
LEN_I = len(bv_di)
LEN_O = len(bv_do)
assert (LEN_I % 8)==0, "byteslice_select: expects len(bv_di)=8*x, but len(bv_di)={} bits".format(LEN_I)
assert (LEN_O % 8)==0, "byteslice_select: expects len(bv_do)=8*x, but len(bv_do)={} bits".format(LEN_O)
bit_offset = Signal(intbv(0)[len(offset)+3:])
@always_comb
def _offset():
bit_offset.next = offset << 3
_slice = bitslice_select(bit_offset, bv_di, bv_do)
return _offset, _slice | [
"def",
"byteslice_select",
"(",
"offset",
",",
"bv_di",
",",
"bv_do",
")",
":",
"LEN_I",
"=",
"len",
"(",
"bv_di",
")",
"LEN_O",
"=",
"len",
"(",
"bv_do",
")",
"assert",
"(",
"LEN_I",
"%",
"8",
")",
"==",
"0",
",",
"\"byteslice_select: expects len(bv_di)... | Selects a slice of length 8*n aligned on a byte from a bit-vector
offset - (i) byte offset of the slice
bv_di - (i) bit vector where the slice is taken from; must len(bv_di) = 8*m
bv_do - (o) selected slice; must len(bv_do) = 8*n, n<=m; len(bv_do) defines the number of bit in the slice | [
"Selects",
"a",
"slice",
"of",
"length",
"8",
"*",
"n",
"aligned",
"on",
"a",
"byte",
"from",
"a",
"bit",
"-",
"vector",
"offset",
"-",
"(",
"i",
")",
"byte",
"offset",
"of",
"the",
"slice",
"bv_di",
"-",
"(",
"i",
")",
"bit",
"vector",
"where",
... | train | https://github.com/nkavaldj/myhdl_lib/blob/9902afd2031e7847373f692821b2135fd0810aa8/myhdl_lib/mux.py#L101-L121 |
flo-compbio/genometools | genometools/ncbi/sra/find_experiment_runs.py | get_argument_parser | def get_argument_parser():
"""Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx.
"""
file_mv = cli.file_mv
desc = 'Find all runs (SRR..) associated with an SRA experiment (SRX...).'
parser = cli.get_argument_parser(desc=desc)
parser.add_argument(
'-e', '--experiment-file', type=str, required=True, metavar=file_mv,
help='File with SRA experiment IDs (starting with "SRX").'
)
parser.add_argument(
'-o', '--output-file', type=str, required=True, metavar=file_mv,
help='The output file.'
)
cli.add_reporting_args(parser)
return parser | python | def get_argument_parser():
"""Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx.
"""
file_mv = cli.file_mv
desc = 'Find all runs (SRR..) associated with an SRA experiment (SRX...).'
parser = cli.get_argument_parser(desc=desc)
parser.add_argument(
'-e', '--experiment-file', type=str, required=True, metavar=file_mv,
help='File with SRA experiment IDs (starting with "SRX").'
)
parser.add_argument(
'-o', '--output-file', type=str, required=True, metavar=file_mv,
help='The output file.'
)
cli.add_reporting_args(parser)
return parser | [
"def",
"get_argument_parser",
"(",
")",
":",
"file_mv",
"=",
"cli",
".",
"file_mv",
"desc",
"=",
"'Find all runs (SRR..) associated with an SRA experiment (SRX...).'",
"parser",
"=",
"cli",
".",
"get_argument_parser",
"(",
"desc",
"=",
"desc",
")",
"parser",
".",
"a... | Function to obtain the argument parser.
Returns
-------
A fully configured `argparse.ArgumentParser` object.
Notes
-----
This function is used by the `sphinx-argparse` extension for sphinx. | [
"Function",
"to",
"obtain",
"the",
"argument",
"parser",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ncbi/sra/find_experiment_runs.py#L38-L68 |
flo-compbio/genometools | genometools/ncbi/sra/find_experiment_runs.py | main | def main(args=None):
"""Download all .sra from NCBI SRA for a given experiment ID.
Parameters
----------
args: argparse.Namespace object, optional
The argument values. If not specified, the values will be obtained by
parsing the command line arguments using the `argparse` module.
Returns
-------
int
Exit code (0 if no error occurred).
"""
if args is None:
# parse command-line arguments
parser = get_argument_parser()
args = parser.parse_args()
experiment_file = args.experiment_file
output_file = args.output_file
# log_file = args.log_file
# quiet = args.quiet
# verbose = args.verbose
# logger = misc.get_logger(log_file=log_file, quiet=quiet,
# verbose=verbose)
host = 'ftp-trace.ncbi.nlm.nih.gov'
user = 'anonymous'
password = 'anonymous'
# output_dir = download_dir + experiment_id + '/'
# make sure output directory exists
# misc.make_sure_dir_exists(output_dir)
# logger.info('Created output directory: "%s".', output_dir)
experiments = misc.read_single(experiment_file)
runs = []
with ftputil.FTPHost(host, user, password) as ftp_host:
for exp in experiments:
exp_dir = '/sra/sra-instant/reads/ByExp/sra/SRX/%s/%s/' \
% (exp[:6], exp)
ftp_host.chdir(exp_dir)
run_folders = ftp_host.listdir(ftp_host.curdir)
# logging.info('Found %d run folders.',len(run_folders))
for folder in run_folders:
files = ftp_host.listdir(folder)
assert len(files) == 1
runs.append((exp, folder))
with open(output_file, 'wb') as ofh:
writer = csv.writer(ofh, dialect='excel-tab',
lineterminator=os.linesep,
quoting=csv.QUOTE_NONE)
for r in runs:
writer.writerow(r)
return 0 | python | def main(args=None):
"""Download all .sra from NCBI SRA for a given experiment ID.
Parameters
----------
args: argparse.Namespace object, optional
The argument values. If not specified, the values will be obtained by
parsing the command line arguments using the `argparse` module.
Returns
-------
int
Exit code (0 if no error occurred).
"""
if args is None:
# parse command-line arguments
parser = get_argument_parser()
args = parser.parse_args()
experiment_file = args.experiment_file
output_file = args.output_file
# log_file = args.log_file
# quiet = args.quiet
# verbose = args.verbose
# logger = misc.get_logger(log_file=log_file, quiet=quiet,
# verbose=verbose)
host = 'ftp-trace.ncbi.nlm.nih.gov'
user = 'anonymous'
password = 'anonymous'
# output_dir = download_dir + experiment_id + '/'
# make sure output directory exists
# misc.make_sure_dir_exists(output_dir)
# logger.info('Created output directory: "%s".', output_dir)
experiments = misc.read_single(experiment_file)
runs = []
with ftputil.FTPHost(host, user, password) as ftp_host:
for exp in experiments:
exp_dir = '/sra/sra-instant/reads/ByExp/sra/SRX/%s/%s/' \
% (exp[:6], exp)
ftp_host.chdir(exp_dir)
run_folders = ftp_host.listdir(ftp_host.curdir)
# logging.info('Found %d run folders.',len(run_folders))
for folder in run_folders:
files = ftp_host.listdir(folder)
assert len(files) == 1
runs.append((exp, folder))
with open(output_file, 'wb') as ofh:
writer = csv.writer(ofh, dialect='excel-tab',
lineterminator=os.linesep,
quoting=csv.QUOTE_NONE)
for r in runs:
writer.writerow(r)
return 0 | [
"def",
"main",
"(",
"args",
"=",
"None",
")",
":",
"if",
"args",
"is",
"None",
":",
"# parse command-line arguments",
"parser",
"=",
"get_argument_parser",
"(",
")",
"args",
"=",
"parser",
".",
"parse_args",
"(",
")",
"experiment_file",
"=",
"args",
".",
"... | Download all .sra from NCBI SRA for a given experiment ID.
Parameters
----------
args: argparse.Namespace object, optional
The argument values. If not specified, the values will be obtained by
parsing the command line arguments using the `argparse` module.
Returns
-------
int
Exit code (0 if no error occurred). | [
"Download",
"all",
".",
"sra",
"from",
"NCBI",
"SRA",
"for",
"a",
"given",
"experiment",
"ID",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ncbi/sra/find_experiment_runs.py#L79-L140 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager._one_to_many_query | def _one_to_many_query(cls, query_obj, search4, model_attrib):
"""extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model
"""
model = model_attrib.parent.class_
if isinstance(search4, str):
query_obj = query_obj.join(model).filter(model_attrib.like(search4))
elif isinstance(search4, int):
query_obj = query_obj.join(model).filter(model_attrib == search4)
elif isinstance(search4, Iterable):
query_obj = query_obj.join(model).filter(model_attrib.in_(search4))
return query_obj | python | def _one_to_many_query(cls, query_obj, search4, model_attrib):
"""extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model
"""
model = model_attrib.parent.class_
if isinstance(search4, str):
query_obj = query_obj.join(model).filter(model_attrib.like(search4))
elif isinstance(search4, int):
query_obj = query_obj.join(model).filter(model_attrib == search4)
elif isinstance(search4, Iterable):
query_obj = query_obj.join(model).filter(model_attrib.in_(search4))
return query_obj | [
"def",
"_one_to_many_query",
"(",
"cls",
",",
"query_obj",
",",
"search4",
",",
"model_attrib",
")",
":",
"model",
"=",
"model_attrib",
".",
"parent",
".",
"class_",
"if",
"isinstance",
"(",
"search4",
",",
"str",
")",
":",
"query_obj",
"=",
"query_obj",
"... | extends and returns a SQLAlchemy query object to allow one-to-many queries
:param query_obj: SQL Alchemy query object
:param str search4: search string
:param model_attrib: attribute in model | [
"extends",
"and",
"returns",
"a",
"SQLAlchemy",
"query",
"object",
"to",
"allow",
"one",
"-",
"to",
"-",
"many",
"queries"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L115-L133 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.keyword | def keyword(self, name=None, identifier=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Keyword)
model_queries_config = (
(name, models.Keyword.name),
(identifier, models.Keyword.identifier)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.Keyword.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def keyword(self, name=None, identifier=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Keyword)
model_queries_config = (
(name, models.Keyword.name),
(identifier, models.Keyword.identifier)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.Keyword.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"keyword",
"(",
"self",
",",
"name",
"=",
"None",
",",
"identifier",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",... | Method to query :class:`.models.Keyword` objects in database
:param name: keyword name(s)
:type name: str or tuple(str) or None
:param identifier: keyword identifier(s)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type identifier: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Keyword`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Keyword`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"Keyword",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L135-L171 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.entry | def entry(self,
name=None,
dataset=None,
recommended_full_name=None,
recommended_short_name=None,
gene_name=None,
taxid=None,
accession=None,
organism_host=None,
feature_type=None,
function_=None,
ec_number=None,
db_reference=None,
alternative_name=None,
disease_comment=None,
disease_name=None,
tissue_specificity=None,
pmid=None,
keyword=None,
subcellular_location=None,
tissue_in_reference=None,
sequence=None,
limit=None,
as_df=False):
"""Method to query :class:`.models.Entry` objects in database
An entry is the root element in UniProt datasets. Everything is linked to entry and can be accessed from
:param name: UniProt entry name(s)
:type name: str or tuple(str) or None
:param dataset: Swiss-Prot or TrEMBL
:type name: str or tuple(str) or None
:param recommended_full_name: recommended full protein name(s)
:type recommended_full_name: str or tuple(str) or None
:param recommended_short_name: recommended short protein name(s)
:type recommended_short_name: str or tuple(str) or None
:param tissue_in_reference: tissue(s) mentioned in reference
:type tissue_in_reference: str or tuple(str) or None
:param subcellular_location: subcellular location(s)
:type subcellular_location: str or tuple(str) or None
:param keyword: keyword(s)
:type keyword: str or tuple(str) or None
:param pmid: PubMed identifier(s)
:type pmid: int or tuple(int) or None
:param tissue_specificity: tissue specificit(y/ies)
:type tissue_specificity: str or tuple(str) or None
:param disease_comment: disease_comment(s)
:type disease_comment: str or tuple(str) or None
:param alternative_name: alternative name(s)
:type alternative_name: str or tuple(str) or None
:param db_reference: cross reference identifier(s)
:type db_reference: str or tuple(str) or None
:param ec_number: enzyme classification number(s), e.g. 1.1.1.1
:type ec_number: str or tuple(str) or None
:param function_: description of protein function(s)
:type function_: str or tuple(str) or None
:param feature_type: feature type(s)
:type feature_type: str or tuple(str) or None
:param organism_host: organism host(s) as taxid(s)
:type organism_host: int or tuple(int) or None
:param accession: UniProt accession number(s)
:type accession: str or tuple(str) or None
:param disease_name: disease name(s)
:type disease_name: str or tuple(str) or None
:param gene_name: gene name(s)
:type gene_name: str or tuple(str) or None
:param taxid: NCBI taxonomy identifier(s)
:type taxid: int or tuple(int) or None
:param sequence: Amino acid sequence(s)
:type sequence: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Entry`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Entry`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Entry)
model_queries_config = (
(dataset, models.Entry.dataset),
(name, models.Entry.name),
(recommended_full_name, models.Entry.recommended_full_name),
(recommended_short_name, models.Entry.recommended_short_name),
(gene_name, models.Entry.gene_name),
(taxid, models.Entry.taxid),
)
q = self.get_model_queries(q, model_queries_config)
one_to_many_queries_config = (
(accession, models.Accession.accession),
(organism_host, models.OrganismHost.taxid),
(feature_type, models.Feature.type_),
(function_, models.Function.text),
(ec_number, models.ECNumber.ec_number),
(db_reference, models.DbReference.identifier),
(alternative_name, models.AlternativeFullName.name),
(disease_comment, models.DiseaseComment.comment),
(tissue_specificity, models.TissueSpecificity.comment),
(sequence, models.Sequence.sequence),
)
q = self.get_one_to_many_queries(q, one_to_many_queries_config)
many_to_many_queries_config = (
(pmid, models.Entry.pmids, models.Pmid.pmid),
(keyword, models.Entry.keywords, models.Keyword.name),
(subcellular_location, models.Entry.subcellular_locations, models.SubcellularLocation.location),
(tissue_in_reference, models.Entry.tissue_in_references, models.TissueInReference.tissue)
)
q = self.get_many_to_many_queries(q, many_to_many_queries_config)
if disease_name:
q = q.join(models.Entry.disease_comments).join(models.DiseaseComment.disease)
if isinstance(disease_name, str):
q = q.filter(models.Disease.name.like(disease_name))
elif isinstance(disease_name, Iterable):
q = q.filter(models.Disease.name.in_(disease_name))
return self._limit_and_df(q, limit, as_df) | python | def entry(self,
name=None,
dataset=None,
recommended_full_name=None,
recommended_short_name=None,
gene_name=None,
taxid=None,
accession=None,
organism_host=None,
feature_type=None,
function_=None,
ec_number=None,
db_reference=None,
alternative_name=None,
disease_comment=None,
disease_name=None,
tissue_specificity=None,
pmid=None,
keyword=None,
subcellular_location=None,
tissue_in_reference=None,
sequence=None,
limit=None,
as_df=False):
"""Method to query :class:`.models.Entry` objects in database
An entry is the root element in UniProt datasets. Everything is linked to entry and can be accessed from
:param name: UniProt entry name(s)
:type name: str or tuple(str) or None
:param dataset: Swiss-Prot or TrEMBL
:type name: str or tuple(str) or None
:param recommended_full_name: recommended full protein name(s)
:type recommended_full_name: str or tuple(str) or None
:param recommended_short_name: recommended short protein name(s)
:type recommended_short_name: str or tuple(str) or None
:param tissue_in_reference: tissue(s) mentioned in reference
:type tissue_in_reference: str or tuple(str) or None
:param subcellular_location: subcellular location(s)
:type subcellular_location: str or tuple(str) or None
:param keyword: keyword(s)
:type keyword: str or tuple(str) or None
:param pmid: PubMed identifier(s)
:type pmid: int or tuple(int) or None
:param tissue_specificity: tissue specificit(y/ies)
:type tissue_specificity: str or tuple(str) or None
:param disease_comment: disease_comment(s)
:type disease_comment: str or tuple(str) or None
:param alternative_name: alternative name(s)
:type alternative_name: str or tuple(str) or None
:param db_reference: cross reference identifier(s)
:type db_reference: str or tuple(str) or None
:param ec_number: enzyme classification number(s), e.g. 1.1.1.1
:type ec_number: str or tuple(str) or None
:param function_: description of protein function(s)
:type function_: str or tuple(str) or None
:param feature_type: feature type(s)
:type feature_type: str or tuple(str) or None
:param organism_host: organism host(s) as taxid(s)
:type organism_host: int or tuple(int) or None
:param accession: UniProt accession number(s)
:type accession: str or tuple(str) or None
:param disease_name: disease name(s)
:type disease_name: str or tuple(str) or None
:param gene_name: gene name(s)
:type gene_name: str or tuple(str) or None
:param taxid: NCBI taxonomy identifier(s)
:type taxid: int or tuple(int) or None
:param sequence: Amino acid sequence(s)
:type sequence: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Entry`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Entry`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Entry)
model_queries_config = (
(dataset, models.Entry.dataset),
(name, models.Entry.name),
(recommended_full_name, models.Entry.recommended_full_name),
(recommended_short_name, models.Entry.recommended_short_name),
(gene_name, models.Entry.gene_name),
(taxid, models.Entry.taxid),
)
q = self.get_model_queries(q, model_queries_config)
one_to_many_queries_config = (
(accession, models.Accession.accession),
(organism_host, models.OrganismHost.taxid),
(feature_type, models.Feature.type_),
(function_, models.Function.text),
(ec_number, models.ECNumber.ec_number),
(db_reference, models.DbReference.identifier),
(alternative_name, models.AlternativeFullName.name),
(disease_comment, models.DiseaseComment.comment),
(tissue_specificity, models.TissueSpecificity.comment),
(sequence, models.Sequence.sequence),
)
q = self.get_one_to_many_queries(q, one_to_many_queries_config)
many_to_many_queries_config = (
(pmid, models.Entry.pmids, models.Pmid.pmid),
(keyword, models.Entry.keywords, models.Keyword.name),
(subcellular_location, models.Entry.subcellular_locations, models.SubcellularLocation.location),
(tissue_in_reference, models.Entry.tissue_in_references, models.TissueInReference.tissue)
)
q = self.get_many_to_many_queries(q, many_to_many_queries_config)
if disease_name:
q = q.join(models.Entry.disease_comments).join(models.DiseaseComment.disease)
if isinstance(disease_name, str):
q = q.filter(models.Disease.name.like(disease_name))
elif isinstance(disease_name, Iterable):
q = q.filter(models.Disease.name.in_(disease_name))
return self._limit_and_df(q, limit, as_df) | [
"def",
"entry",
"(",
"self",
",",
"name",
"=",
"None",
",",
"dataset",
"=",
"None",
",",
"recommended_full_name",
"=",
"None",
",",
"recommended_short_name",
"=",
"None",
",",
"gene_name",
"=",
"None",
",",
"taxid",
"=",
"None",
",",
"accession",
"=",
"N... | Method to query :class:`.models.Entry` objects in database
An entry is the root element in UniProt datasets. Everything is linked to entry and can be accessed from
:param name: UniProt entry name(s)
:type name: str or tuple(str) or None
:param dataset: Swiss-Prot or TrEMBL
:type name: str or tuple(str) or None
:param recommended_full_name: recommended full protein name(s)
:type recommended_full_name: str or tuple(str) or None
:param recommended_short_name: recommended short protein name(s)
:type recommended_short_name: str or tuple(str) or None
:param tissue_in_reference: tissue(s) mentioned in reference
:type tissue_in_reference: str or tuple(str) or None
:param subcellular_location: subcellular location(s)
:type subcellular_location: str or tuple(str) or None
:param keyword: keyword(s)
:type keyword: str or tuple(str) or None
:param pmid: PubMed identifier(s)
:type pmid: int or tuple(int) or None
:param tissue_specificity: tissue specificit(y/ies)
:type tissue_specificity: str or tuple(str) or None
:param disease_comment: disease_comment(s)
:type disease_comment: str or tuple(str) or None
:param alternative_name: alternative name(s)
:type alternative_name: str or tuple(str) or None
:param db_reference: cross reference identifier(s)
:type db_reference: str or tuple(str) or None
:param ec_number: enzyme classification number(s), e.g. 1.1.1.1
:type ec_number: str or tuple(str) or None
:param function_: description of protein function(s)
:type function_: str or tuple(str) or None
:param feature_type: feature type(s)
:type feature_type: str or tuple(str) or None
:param organism_host: organism host(s) as taxid(s)
:type organism_host: int or tuple(int) or None
:param accession: UniProt accession number(s)
:type accession: str or tuple(str) or None
:param disease_name: disease name(s)
:type disease_name: str or tuple(str) or None
:param gene_name: gene name(s)
:type gene_name: str or tuple(str) or None
:param taxid: NCBI taxonomy identifier(s)
:type taxid: int or tuple(int) or None
:param sequence: Amino acid sequence(s)
:type sequence: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Entry`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Entry`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"Entry",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L173-L318 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.disease | def disease(self,
identifier=None,
ref_id=None,
ref_type=None,
name=None,
acronym=None,
description=None,
entry_name=None,
limit=None,
as_df=False
):
"""Method to query :class:`.models.Disease` objects in database
:param identifier: disease UniProt identifier(s)
:type identifier: str or tuple(str) or None
:param ref_id: identifier(s) of referenced database
:type ref_id: str or tuple(str) or None
:param ref_type: database name(s)
:type ref_type: str or tuple(str) or None
:param name: disease name(s)
:type name: str or tuple(str) or None
:param acronym: disease acronym(s)
:type acronym: str or tuple(str) or None
:param description: disease description(s)
:type description: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Disease`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Disease`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Disease)
model_queries_config = (
(identifier, models.Disease.identifier),
(ref_id, models.Disease.ref_id),
(ref_type, models.Disease.ref_type),
(name, models.Disease.name),
(acronym, models.Disease.acronym),
(description, models.Disease.description)
)
q = self.get_model_queries(q, model_queries_config)
if entry_name:
q = q.session.query(models.Disease).join(models.DiseaseComment).join(models.Entry)
if isinstance(entry_name, str):
q = q.filter(models.Entry.name == entry_name)
elif isinstance(entry_name, Iterable):
q = q.filter(models.Entry.name.in_(entry_name))
return self._limit_and_df(q, limit, as_df) | python | def disease(self,
identifier=None,
ref_id=None,
ref_type=None,
name=None,
acronym=None,
description=None,
entry_name=None,
limit=None,
as_df=False
):
"""Method to query :class:`.models.Disease` objects in database
:param identifier: disease UniProt identifier(s)
:type identifier: str or tuple(str) or None
:param ref_id: identifier(s) of referenced database
:type ref_id: str or tuple(str) or None
:param ref_type: database name(s)
:type ref_type: str or tuple(str) or None
:param name: disease name(s)
:type name: str or tuple(str) or None
:param acronym: disease acronym(s)
:type acronym: str or tuple(str) or None
:param description: disease description(s)
:type description: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Disease`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Disease`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Disease)
model_queries_config = (
(identifier, models.Disease.identifier),
(ref_id, models.Disease.ref_id),
(ref_type, models.Disease.ref_type),
(name, models.Disease.name),
(acronym, models.Disease.acronym),
(description, models.Disease.description)
)
q = self.get_model_queries(q, model_queries_config)
if entry_name:
q = q.session.query(models.Disease).join(models.DiseaseComment).join(models.Entry)
if isinstance(entry_name, str):
q = q.filter(models.Entry.name == entry_name)
elif isinstance(entry_name, Iterable):
q = q.filter(models.Entry.name.in_(entry_name))
return self._limit_and_df(q, limit, as_df) | [
"def",
"disease",
"(",
"self",
",",
"identifier",
"=",
"None",
",",
"ref_id",
"=",
"None",
",",
"ref_type",
"=",
"None",
",",
"name",
"=",
"None",
",",
"acronym",
"=",
"None",
",",
"description",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"lim... | Method to query :class:`.models.Disease` objects in database
:param identifier: disease UniProt identifier(s)
:type identifier: str or tuple(str) or None
:param ref_id: identifier(s) of referenced database
:type ref_id: str or tuple(str) or None
:param ref_type: database name(s)
:type ref_type: str or tuple(str) or None
:param name: disease name(s)
:type name: str or tuple(str) or None
:param acronym: disease acronym(s)
:type acronym: str or tuple(str) or None
:param description: disease description(s)
:type description: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Disease`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Disease`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"Disease",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L320-L387 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.disease_comment | def disease_comment(self, comment=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.DiseaseComment` objects in database
:param comment: Comment(s) to disease
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DiseaseComment`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DiseaseComment`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.DiseaseComment)
q = self.get_model_queries(q, ((comment, models.DiseaseComment.comment),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def disease_comment(self, comment=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.DiseaseComment` objects in database
:param comment: Comment(s) to disease
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DiseaseComment`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DiseaseComment`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.DiseaseComment)
q = self.get_model_queries(q, ((comment, models.DiseaseComment.comment),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"disease_comment",
"(",
"self",
",",
"comment",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"DiseaseComment",
... | Method to query :class:`.models.DiseaseComment` objects in database
:param comment: Comment(s) to disease
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DiseaseComment`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DiseaseComment`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"DiseaseComment",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L389-L417 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.other_gene_name | def other_gene_name(self, type_=None, name=None, entry_name=None, limit=None, as_df=None):
"""Method to query :class:`.models.OtherGeneName` objects in database
:param type_: type(s) of gene name e.g. *synonym*
:type type_: str or tuple(str) or None
:param name: other gene name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.OtherGeneName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.OtherGeneName`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.OtherGeneName)
model_queries_config = (
(type_, models.OtherGeneName.type_),
(name, models.OtherGeneName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def other_gene_name(self, type_=None, name=None, entry_name=None, limit=None, as_df=None):
"""Method to query :class:`.models.OtherGeneName` objects in database
:param type_: type(s) of gene name e.g. *synonym*
:type type_: str or tuple(str) or None
:param name: other gene name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.OtherGeneName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.OtherGeneName`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.OtherGeneName)
model_queries_config = (
(type_, models.OtherGeneName.type_),
(name, models.OtherGeneName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"other_gene_name",
"(",
"self",
",",
"type_",
"=",
"None",
",",
"name",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"None",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models... | Method to query :class:`.models.OtherGeneName` objects in database
:param type_: type(s) of gene name e.g. *synonym*
:type type_: str or tuple(str) or None
:param name: other gene name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.OtherGeneName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.OtherGeneName`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"OtherGeneName",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L419-L455 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.alternative_full_name | def alternative_full_name(self, name=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.AlternativeFullName` objects in database
:param name: alternative full name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeFullName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeFullName`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.AlternativeFullName)
model_queries_config = (
(name, models.AlternativeFullName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def alternative_full_name(self, name=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.AlternativeFullName` objects in database
:param name: alternative full name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeFullName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeFullName`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.AlternativeFullName)
model_queries_config = (
(name, models.AlternativeFullName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"alternative_full_name",
"(",
"self",
",",
"name",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"AlternativeFull... | Method to query :class:`.models.AlternativeFullName` objects in database
:param name: alternative full name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeFullName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeFullName`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"AlternativeFullName",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L457-L488 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.alternative_short_name | def alternative_short_name(self, name=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.AlternativeShortlName` objects in database
:param name: alternative short name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeShortName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeShortName`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.AlternativeShortName)
model_queries_config = (
(name, models.AlternativeShortName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def alternative_short_name(self, name=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.AlternativeShortlName` objects in database
:param name: alternative short name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeShortName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeShortName`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.AlternativeShortName)
model_queries_config = (
(name, models.AlternativeShortName.name),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"alternative_short_name",
"(",
"self",
",",
"name",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"AlternativeSho... | Method to query :class:`.models.AlternativeShortlName` objects in database
:param name: alternative short name(s)
:type name: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.AlternativeShortName`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.AlternativeShortName`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"AlternativeShortlName",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L490-L521 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.accession | def accession(self, accession=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Accession` objects in database
:param accession: UniProt Accession number(s)
:type accession: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Accession`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Accession`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Accession)
model_queries_config = (
(accession, models.Accession.accession),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def accession(self, accession=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Accession` objects in database
:param accession: UniProt Accession number(s)
:type accession: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Accession`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Accession`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Accession)
model_queries_config = (
(accession, models.Accession.accession),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"accession",
"(",
"self",
",",
"accession",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"Accession",
")",
"... | Method to query :class:`.models.Accession` objects in database
:param accession: UniProt Accession number(s)
:type accession: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Accession`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Accession`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"Accession",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L523-L554 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.pmid | def pmid(self,
pmid=None,
entry_name=None,
first=None,
last=None,
volume=None,
name=None,
date=None,
title=None,
limit=None,
as_df=False
):
"""Method to query :class:`.models.Pmid` objects in database
:param pmid: PubMed identifier(s)
:type pmid: int or tuple(int) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param first: first page(s)
:type first: str or tuple(str) or None
:param last: last page(s)
:type last: str or tuple(str) or None
:param volume: volume(s)
:type volume: int or tuple(int) or None
:param name: name(s) of journal
:type name: str or tuple(str) or None
:param date: publication year(s)
:type date: int or tuple(int) or None
:param title: title(s) of publication
:type title: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Pmid`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Pmid`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Pmid)
model_queries_config = (
(pmid, models.Pmid.pmid),
(last, models.Pmid.last),
(first, models.Pmid.first),
(volume, models.Pmid.volume),
(name, models.Pmid.name),
(date, models.Pmid.date),
(title, models.Pmid.title)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.Pmid.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def pmid(self,
pmid=None,
entry_name=None,
first=None,
last=None,
volume=None,
name=None,
date=None,
title=None,
limit=None,
as_df=False
):
"""Method to query :class:`.models.Pmid` objects in database
:param pmid: PubMed identifier(s)
:type pmid: int or tuple(int) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param first: first page(s)
:type first: str or tuple(str) or None
:param last: last page(s)
:type last: str or tuple(str) or None
:param volume: volume(s)
:type volume: int or tuple(int) or None
:param name: name(s) of journal
:type name: str or tuple(str) or None
:param date: publication year(s)
:type date: int or tuple(int) or None
:param title: title(s) of publication
:type title: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Pmid`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Pmid`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Pmid)
model_queries_config = (
(pmid, models.Pmid.pmid),
(last, models.Pmid.last),
(first, models.Pmid.first),
(volume, models.Pmid.volume),
(name, models.Pmid.name),
(date, models.Pmid.date),
(title, models.Pmid.title)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.Pmid.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"pmid",
"(",
"self",
",",
"pmid",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"first",
"=",
"None",
",",
"last",
"=",
"None",
",",
"volume",
"=",
"None",
",",
"name",
"=",
"None",
",",
"date",
"=",
"None",
",",
"title",
"=",
"None",
... | Method to query :class:`.models.Pmid` objects in database
:param pmid: PubMed identifier(s)
:type pmid: int or tuple(int) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param first: first page(s)
:type first: str or tuple(str) or None
:param last: last page(s)
:type last: str or tuple(str) or None
:param volume: volume(s)
:type volume: int or tuple(int) or None
:param name: name(s) of journal
:type name: str or tuple(str) or None
:param date: publication year(s)
:type date: int or tuple(int) or None
:param title: title(s) of publication
:type title: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Pmid`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Pmid`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"Pmid",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L556-L622 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.organism_host | def organism_host(self, taxid=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.OrganismHost` objects in database
:param taxid: NCBI taxonomy identifier(s)
:type taxid: int or tuple(int) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.OrganismHost`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.OrganismHost`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.OrganismHost)
q = self.get_model_queries(q, ((taxid, models.OrganismHost.taxid),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def organism_host(self, taxid=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.OrganismHost` objects in database
:param taxid: NCBI taxonomy identifier(s)
:type taxid: int or tuple(int) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.OrganismHost`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.OrganismHost`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.OrganismHost)
q = self.get_model_queries(q, ((taxid, models.OrganismHost.taxid),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"organism_host",
"(",
"self",
",",
"taxid",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"OrganismHost",
")",
... | Method to query :class:`.models.OrganismHost` objects in database
:param taxid: NCBI taxonomy identifier(s)
:type taxid: int or tuple(int) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.OrganismHost`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.OrganismHost`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"OrganismHost",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L624-L652 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.db_reference | def db_reference(self, type_=None, identifier=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.DbReference` objects in database
Check list of available databases with on :py:attr:`.dbreference_types`
:param type_: type(s) (or name(s)) of database
:type type_: str or tuple(str) or None
:param identifier: unique identifier(s) in specific database (type)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DbReference`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DbReference`) or :class:`pandas.DataFrame`
**Links**
- `UniProt dbxref <http://www.uniprot.org/docs/dbxref>`_
"""
q = self.session.query(models.DbReference)
model_queries_config = (
(type_, models.DbReference.type_),
(identifier, models.DbReference.identifier)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def db_reference(self, type_=None, identifier=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.DbReference` objects in database
Check list of available databases with on :py:attr:`.dbreference_types`
:param type_: type(s) (or name(s)) of database
:type type_: str or tuple(str) or None
:param identifier: unique identifier(s) in specific database (type)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DbReference`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DbReference`) or :class:`pandas.DataFrame`
**Links**
- `UniProt dbxref <http://www.uniprot.org/docs/dbxref>`_
"""
q = self.session.query(models.DbReference)
model_queries_config = (
(type_, models.DbReference.type_),
(identifier, models.DbReference.identifier)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"db_reference",
"(",
"self",
",",
"type_",
"=",
"None",
",",
"identifier",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"mo... | Method to query :class:`.models.DbReference` objects in database
Check list of available databases with on :py:attr:`.dbreference_types`
:param type_: type(s) (or name(s)) of database
:type type_: str or tuple(str) or None
:param identifier: unique identifier(s) in specific database (type)
:type identifier: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.DbReference`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.DbReference`) or :class:`pandas.DataFrame`
**Links**
- `UniProt dbxref <http://www.uniprot.org/docs/dbxref>`_ | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"DbReference",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L654-L695 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.feature | def feature(self, type_=None, identifier=None, description=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Feature` objects in database
Check available features types with ``pyuniprot.query().feature_types``
:param type_: type(s) of feature
:type type_: str or tuple(str) or None
:param identifier: feature identifier(s)
:type identifier: str or tuple(str) or None
:param description: description(s) of feature(s)
:type description: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Feature`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Feature`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Feature)
model_queries_config = (
(type_, models.Feature.type_),
(identifier, models.Feature.identifier),
(description, models.Feature.description)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def feature(self, type_=None, identifier=None, description=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Feature` objects in database
Check available features types with ``pyuniprot.query().feature_types``
:param type_: type(s) of feature
:type type_: str or tuple(str) or None
:param identifier: feature identifier(s)
:type identifier: str or tuple(str) or None
:param description: description(s) of feature(s)
:type description: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Feature`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Feature`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Feature)
model_queries_config = (
(type_, models.Feature.type_),
(identifier, models.Feature.identifier),
(description, models.Feature.description)
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"feature",
"(",
"self",
",",
"type_",
"=",
"None",
",",
"identifier",
"=",
"None",
",",
"description",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"sess... | Method to query :class:`.models.Feature` objects in database
Check available features types with ``pyuniprot.query().feature_types``
:param type_: type(s) of feature
:type type_: str or tuple(str) or None
:param identifier: feature identifier(s)
:type identifier: str or tuple(str) or None
:param description: description(s) of feature(s)
:type description: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Feature`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Feature`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"Feature",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L697-L738 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.function | def function(self, text=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Function` objects in database
:param text: description(s) of function(s)
:type text: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Function`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Function`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Function)
model_queries_config = (
(text, models.Function.text),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def function(self, text=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.Function` objects in database
:param text: description(s) of function(s)
:type text: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Function`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Function`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.Function)
model_queries_config = (
(text, models.Function.text),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"function",
"(",
"self",
",",
"text",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"Function",
")",
"model_q... | Method to query :class:`.models.Function` objects in database
:param text: description(s) of function(s)
:type text: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.Function`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.Function`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"Function",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L740-L771 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.ec_number | def ec_number(self, ec_number=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.ECNumber` objects in database
:param ec_number: Enzyme Commission number(s)
:type ec_number: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.ECNumber`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.ECNumber`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.ECNumber)
q = self.get_model_queries(q, ((ec_number, models.ECNumber.ec_number),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def ec_number(self, ec_number=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.ECNumber` objects in database
:param ec_number: Enzyme Commission number(s)
:type ec_number: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.ECNumber`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.ECNumber`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.ECNumber)
q = self.get_model_queries(q, ((ec_number, models.ECNumber.ec_number),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"ec_number",
"(",
"self",
",",
"ec_number",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"ECNumber",
")",
"q... | Method to query :class:`.models.ECNumber` objects in database
:param ec_number: Enzyme Commission number(s)
:type ec_number: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.ECNumber`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.ECNumber`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"ECNumber",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L773-L801 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.subcellular_location | def subcellular_location(self, location=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.SubcellularLocation` objects in database
:param location: subcellular location(s)
:type location: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.SubcellularLocation`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.SubcellularLocation`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.SubcellularLocation)
q = self.get_model_queries(q, ((location, models.SubcellularLocation.location),))
q = self.get_many_to_many_queries(q, ((entry_name, models.SubcellularLocation.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def subcellular_location(self, location=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.SubcellularLocation` objects in database
:param location: subcellular location(s)
:type location: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.SubcellularLocation`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.SubcellularLocation`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.SubcellularLocation)
q = self.get_model_queries(q, ((location, models.SubcellularLocation.location),))
q = self.get_many_to_many_queries(q, ((entry_name, models.SubcellularLocation.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"subcellular_location",
"(",
"self",
",",
"location",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"SubcellularL... | Method to query :class:`.models.SubcellularLocation` objects in database
:param location: subcellular location(s)
:type location: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.SubcellularLocation`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.SubcellularLocation`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"SubcellularLocation",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L833-L861 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.tissue_specificity | def tissue_specificity(self, comment=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.TissueSpecificity` objects in database
Provides information on the expression of a gene at the mRNA or protein level in cells or in tissues of
multicellular organisms. By default, the information is derived from experiments at the mRNA level, unless
specified ‘at protein level
:param comment: Comment(s) describing tissue specificity
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.TissueSpecificity`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.TissueSpecificity`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.TissueSpecificity)
q = self.get_model_queries(q, ((comment, models.TissueSpecificity.comment),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def tissue_specificity(self, comment=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.TissueSpecificity` objects in database
Provides information on the expression of a gene at the mRNA or protein level in cells or in tissues of
multicellular organisms. By default, the information is derived from experiments at the mRNA level, unless
specified ‘at protein level
:param comment: Comment(s) describing tissue specificity
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.TissueSpecificity`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.TissueSpecificity`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.TissueSpecificity)
q = self.get_model_queries(q, ((comment, models.TissueSpecificity.comment),))
q = self.get_one_to_many_queries(q, ((entry_name, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"tissue_specificity",
"(",
"self",
",",
"comment",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"TissueSpecifici... | Method to query :class:`.models.TissueSpecificity` objects in database
Provides information on the expression of a gene at the mRNA or protein level in cells or in tissues of
multicellular organisms. By default, the information is derived from experiments at the mRNA level, unless
specified ‘at protein level
:param comment: Comment(s) describing tissue specificity
:type comment: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.TissueSpecificity`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.TissueSpecificity`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"TissueSpecificity",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L863-L895 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.tissue_in_reference | def tissue_in_reference(self, tissue=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.TissueInReference` objects in database
:param tissue: tissue(s) linked to reference
:type tissue: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.TissueInReference`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.TissueInReference`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.TissueInReference)
model_queries_config = (
(tissue, models.TissueInReference.tissue),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.TissueInReference.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | python | def tissue_in_reference(self, tissue=None, entry_name=None, limit=None, as_df=False):
"""Method to query :class:`.models.TissueInReference` objects in database
:param tissue: tissue(s) linked to reference
:type tissue: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.TissueInReference`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.TissueInReference`) or :class:`pandas.DataFrame`
"""
q = self.session.query(models.TissueInReference)
model_queries_config = (
(tissue, models.TissueInReference.tissue),
)
q = self.get_model_queries(q, model_queries_config)
q = self.get_many_to_many_queries(q, ((entry_name, models.TissueInReference.entries, models.Entry.name),))
return self._limit_and_df(q, limit, as_df) | [
"def",
"tissue_in_reference",
"(",
"self",
",",
"tissue",
"=",
"None",
",",
"entry_name",
"=",
"None",
",",
"limit",
"=",
"None",
",",
"as_df",
"=",
"False",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"TissueInReferen... | Method to query :class:`.models.TissueInReference` objects in database
:param tissue: tissue(s) linked to reference
:type tissue: str or tuple(str) or None
:param entry_name: name(s) in :class:`.models.Entry`
:type entry_name: str or tuple(str) or None
:param limit:
- if `isinstance(limit,int)==True` -> limit
- if `isinstance(limit,tuple)==True` -> format:= tuple(page_number, results_per_page)
- if limit == None -> all results
:type limit: int or tuple(int) or None
:param bool as_df: if `True` results are returned as :class:`pandas.DataFrame`
:return:
- if `as_df == False` -> list(:class:`.models.TissueInReference`)
- if `as_df == True` -> :class:`pandas.DataFrame`
:rtype: list(:class:`.models.TissueInReference`) or :class:`pandas.DataFrame` | [
"Method",
"to",
"query",
":",
"class",
":",
".",
"models",
".",
"TissueInReference",
"objects",
"in",
"database"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L897-L928 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.dbreference_types | def dbreference_types(self):
"""Distinct database reference types (``type_``) in :class:`.models.DbReference`
:return: List of strings for all available database cross reference types used in model DbReference
:rtype: list[str]
"""
q = self.session.query(distinct(models.DbReference.type_))
return [x[0] for x in q.all()] | python | def dbreference_types(self):
"""Distinct database reference types (``type_``) in :class:`.models.DbReference`
:return: List of strings for all available database cross reference types used in model DbReference
:rtype: list[str]
"""
q = self.session.query(distinct(models.DbReference.type_))
return [x[0] for x in q.all()] | [
"def",
"dbreference_types",
"(",
"self",
")",
":",
"q",
"=",
"self",
".",
"session",
".",
"query",
"(",
"distinct",
"(",
"models",
".",
"DbReference",
".",
"type_",
")",
")",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"q",
".",
"all",
"... | Distinct database reference types (``type_``) in :class:`.models.DbReference`
:return: List of strings for all available database cross reference types used in model DbReference
:rtype: list[str] | [
"Distinct",
"database",
"reference",
"types",
"(",
"type_",
")",
"in",
":",
"class",
":",
".",
"models",
".",
"DbReference"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L931-L938 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.taxids | def taxids(self):
"""Distinct NCBI taxonomy identifiers (``taxid``) in :class:`.models.Entry`
:return: NCBI taxonomy identifiers
:rtype: list[int]
"""
r = self.session.query(distinct(models.Entry.taxid)).all()
return [x[0] for x in r] | python | def taxids(self):
"""Distinct NCBI taxonomy identifiers (``taxid``) in :class:`.models.Entry`
:return: NCBI taxonomy identifiers
:rtype: list[int]
"""
r = self.session.query(distinct(models.Entry.taxid)).all()
return [x[0] for x in r] | [
"def",
"taxids",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"session",
".",
"query",
"(",
"distinct",
"(",
"models",
".",
"Entry",
".",
"taxid",
")",
")",
".",
"all",
"(",
")",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"r",
"]"
... | Distinct NCBI taxonomy identifiers (``taxid``) in :class:`.models.Entry`
:return: NCBI taxonomy identifiers
:rtype: list[int] | [
"Distinct",
"NCBI",
"taxonomy",
"identifiers",
"(",
"taxid",
")",
"in",
":",
"class",
":",
".",
"models",
".",
"Entry"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L941-L948 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.datasets | def datasets(self):
"""Distinct datasets (``dataset``) in :class:`.models.Entry`
Distinct datasets are SwissProt or/and TrEMBL
:return: all distinct dataset types
:rtype: list[str]
"""
r = self.session.query(distinct(models.Entry.dataset)).all()
return [x[0] for x in r] | python | def datasets(self):
"""Distinct datasets (``dataset``) in :class:`.models.Entry`
Distinct datasets are SwissProt or/and TrEMBL
:return: all distinct dataset types
:rtype: list[str]
"""
r = self.session.query(distinct(models.Entry.dataset)).all()
return [x[0] for x in r] | [
"def",
"datasets",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"session",
".",
"query",
"(",
"distinct",
"(",
"models",
".",
"Entry",
".",
"dataset",
")",
")",
".",
"all",
"(",
")",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"r",
... | Distinct datasets (``dataset``) in :class:`.models.Entry`
Distinct datasets are SwissProt or/and TrEMBL
:return: all distinct dataset types
:rtype: list[str] | [
"Distinct",
"datasets",
"(",
"dataset",
")",
"in",
":",
"class",
":",
".",
"models",
".",
"Entry"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L951-L960 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.feature_types | def feature_types(self):
"""Distinct types (``type_``) in :class:`.models.Feature`
:return: all distinct feature types
:rtype: list[str]
"""
r = self.session.query(distinct(models.Feature.type_)).all()
return [x[0] for x in r] | python | def feature_types(self):
"""Distinct types (``type_``) in :class:`.models.Feature`
:return: all distinct feature types
:rtype: list[str]
"""
r = self.session.query(distinct(models.Feature.type_)).all()
return [x[0] for x in r] | [
"def",
"feature_types",
"(",
"self",
")",
":",
"r",
"=",
"self",
".",
"session",
".",
"query",
"(",
"distinct",
"(",
"models",
".",
"Feature",
".",
"type_",
")",
")",
".",
"all",
"(",
")",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"r... | Distinct types (``type_``) in :class:`.models.Feature`
:return: all distinct feature types
:rtype: list[str] | [
"Distinct",
"types",
"(",
"type_",
")",
"in",
":",
"class",
":",
".",
"models",
".",
"Feature"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L963-L970 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.subcellular_locations | def subcellular_locations(self):
"""Distinct subcellular locations (``location`` in :class:`.models.SubcellularLocation`)
:return: all distinct subcellular locations
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.SubcellularLocation.location).all()] | python | def subcellular_locations(self):
"""Distinct subcellular locations (``location`` in :class:`.models.SubcellularLocation`)
:return: all distinct subcellular locations
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.SubcellularLocation.location).all()] | [
"def",
"subcellular_locations",
"(",
"self",
")",
":",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"SubcellularLocation",
".",
"location",
")",
".",
"all",
"(",
")",
"]"
] | Distinct subcellular locations (``location`` in :class:`.models.SubcellularLocation`)
:return: all distinct subcellular locations
:rtype: list[str] | [
"Distinct",
"subcellular",
"locations",
"(",
"location",
"in",
":",
"class",
":",
".",
"models",
".",
"SubcellularLocation",
")"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L973-L979 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.tissues_in_references | def tissues_in_references(self):
"""Distinct tissues (``tissue`` in :class:`.models.TissueInReference`)
:return: all distinct tissues in references
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.TissueInReference.tissue).all()] | python | def tissues_in_references(self):
"""Distinct tissues (``tissue`` in :class:`.models.TissueInReference`)
:return: all distinct tissues in references
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.TissueInReference.tissue).all()] | [
"def",
"tissues_in_references",
"(",
"self",
")",
":",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"TissueInReference",
".",
"tissue",
")",
".",
"all",
"(",
")",
"]"
] | Distinct tissues (``tissue`` in :class:`.models.TissueInReference`)
:return: all distinct tissues in references
:rtype: list[str] | [
"Distinct",
"tissues",
"(",
"tissue",
"in",
":",
"class",
":",
".",
"models",
".",
"TissueInReference",
")"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L982-L988 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.keywords | def keywords(self):
"""Distinct keywords (``name`` in :class:`.models.Keyword`)
:returns: all distinct keywords
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.Keyword.name).all()] | python | def keywords(self):
"""Distinct keywords (``name`` in :class:`.models.Keyword`)
:returns: all distinct keywords
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.Keyword.name).all()] | [
"def",
"keywords",
"(",
"self",
")",
":",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"Keyword",
".",
"name",
")",
".",
"all",
"(",
")",
"]"
] | Distinct keywords (``name`` in :class:`.models.Keyword`)
:returns: all distinct keywords
:rtype: list[str] | [
"Distinct",
"keywords",
"(",
"name",
"in",
":",
"class",
":",
".",
"models",
".",
"Keyword",
")"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L991-L997 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.diseases | def diseases(self):
"""Distinct diseases (``name`` in :class:`.models.Disease`)
:returns: all distinct disease names
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.Disease.name).all()] | python | def diseases(self):
"""Distinct diseases (``name`` in :class:`.models.Disease`)
:returns: all distinct disease names
:rtype: list[str]
"""
return [x[0] for x in self.session.query(models.Disease.name).all()] | [
"def",
"diseases",
"(",
"self",
")",
":",
"return",
"[",
"x",
"[",
"0",
"]",
"for",
"x",
"in",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"Disease",
".",
"name",
")",
".",
"all",
"(",
")",
"]"
] | Distinct diseases (``name`` in :class:`.models.Disease`)
:returns: all distinct disease names
:rtype: list[str] | [
"Distinct",
"diseases",
"(",
"name",
"in",
":",
"class",
":",
".",
"models",
".",
"Disease",
")"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L1000-L1006 |
cebel/pyuniprot | src/pyuniprot/manager/query.py | QueryManager.version | def version(self):
"""Version of UniPort knowledgebase
:returns: dictionary with version info
:rtype: dict
"""
return [x for x in self.session.query(models.Version).all()] | python | def version(self):
"""Version of UniPort knowledgebase
:returns: dictionary with version info
:rtype: dict
"""
return [x for x in self.session.query(models.Version).all()] | [
"def",
"version",
"(",
"self",
")",
":",
"return",
"[",
"x",
"for",
"x",
"in",
"self",
".",
"session",
".",
"query",
"(",
"models",
".",
"Version",
")",
".",
"all",
"(",
")",
"]"
] | Version of UniPort knowledgebase
:returns: dictionary with version info
:rtype: dict | [
"Version",
"of",
"UniPort",
"knowledgebase"
] | train | https://github.com/cebel/pyuniprot/blob/9462a6042c7c9295415a5eb589b77b27cb7c142b/src/pyuniprot/manager/query.py#L1009-L1015 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.reserve | def reserve(self, force=False):
""" Reserve port.
:param force: True - take forcefully, False - fail if port is reserved by other user
"""
if not force:
try:
self.api.call_rc('ixPortTakeOwnership {}'.format(self.uri))
except Exception as _:
raise TgnError('Failed to take ownership for port {} current owner is {}'.format(self, self.owner))
else:
self.api.call_rc('ixPortTakeOwnership {} force'.format(self.uri)) | python | def reserve(self, force=False):
""" Reserve port.
:param force: True - take forcefully, False - fail if port is reserved by other user
"""
if not force:
try:
self.api.call_rc('ixPortTakeOwnership {}'.format(self.uri))
except Exception as _:
raise TgnError('Failed to take ownership for port {} current owner is {}'.format(self, self.owner))
else:
self.api.call_rc('ixPortTakeOwnership {} force'.format(self.uri)) | [
"def",
"reserve",
"(",
"self",
",",
"force",
"=",
"False",
")",
":",
"if",
"not",
"force",
":",
"try",
":",
"self",
".",
"api",
".",
"call_rc",
"(",
"'ixPortTakeOwnership {}'",
".",
"format",
"(",
"self",
".",
"uri",
")",
")",
"except",
"Exception",
... | Reserve port.
:param force: True - take forcefully, False - fail if port is reserved by other user | [
"Reserve",
"port",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L169-L181 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.write | def write(self):
""" Write configuration to chassis.
Raise StreamWarningsError if configuration warnings found.
"""
self.ix_command('write')
stream_warnings = self.streamRegion.generateWarningList()
warnings_list = (self.api.call('join ' + ' {' + stream_warnings + '} ' + ' LiStSeP').split('LiStSeP')
if self.streamRegion.generateWarningList() else [])
for warning in warnings_list:
if warning:
raise StreamWarningsError(warning) | python | def write(self):
""" Write configuration to chassis.
Raise StreamWarningsError if configuration warnings found.
"""
self.ix_command('write')
stream_warnings = self.streamRegion.generateWarningList()
warnings_list = (self.api.call('join ' + ' {' + stream_warnings + '} ' + ' LiStSeP').split('LiStSeP')
if self.streamRegion.generateWarningList() else [])
for warning in warnings_list:
if warning:
raise StreamWarningsError(warning) | [
"def",
"write",
"(",
"self",
")",
":",
"self",
".",
"ix_command",
"(",
"'write'",
")",
"stream_warnings",
"=",
"self",
".",
"streamRegion",
".",
"generateWarningList",
"(",
")",
"warnings_list",
"=",
"(",
"self",
".",
"api",
".",
"call",
"(",
"'join '",
... | Write configuration to chassis.
Raise StreamWarningsError if configuration warnings found. | [
"Write",
"configuration",
"to",
"chassis",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L187-L199 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.load_config | def load_config(self, config_file_name):
""" Load configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:param config_file_name: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine.
"""
config_file_name = config_file_name.replace('\\', '/')
ext = path.splitext(config_file_name)[-1].lower()
if ext == '.prt':
self.api.call_rc('port import "{}" {}'.format(config_file_name, self.uri))
elif ext == '.str':
self.reset()
self.api.call_rc('stream import "{}" {}'.format(config_file_name, self.uri))
else:
raise ValueError('Configuration file type {} not supported.'.format(ext))
self.write()
self.discover() | python | def load_config(self, config_file_name):
""" Load configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:param config_file_name: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine.
"""
config_file_name = config_file_name.replace('\\', '/')
ext = path.splitext(config_file_name)[-1].lower()
if ext == '.prt':
self.api.call_rc('port import "{}" {}'.format(config_file_name, self.uri))
elif ext == '.str':
self.reset()
self.api.call_rc('stream import "{}" {}'.format(config_file_name, self.uri))
else:
raise ValueError('Configuration file type {} not supported.'.format(ext))
self.write()
self.discover() | [
"def",
"load_config",
"(",
"self",
",",
"config_file_name",
")",
":",
"config_file_name",
"=",
"config_file_name",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"ext",
"=",
"path",
".",
"splitext",
"(",
"config_file_name",
")",
"[",
"-",
"1",
"]",
".",
... | Load configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:param config_file_name: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine. | [
"Load",
"configuration",
"file",
"from",
"prt",
"or",
"str",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L211-L231 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.save_config | def save_config(self, config_file_name):
""" Save configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:param config_file_name: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine.
"""
config_file_name = config_file_name.replace('\\', '/')
ext = path.splitext(config_file_name)[-1].lower()
if ext == '.prt':
self.api.call_rc('port export "{}" {}'.format(config_file_name, self.uri))
elif ext == '.str':
# self.reset()
self.api.call_rc('stream export "{}" {}'.format(config_file_name, self.uri))
else:
raise ValueError('Configuration file type {} not supported.'.format(ext)) | python | def save_config(self, config_file_name):
""" Save configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:param config_file_name: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine.
"""
config_file_name = config_file_name.replace('\\', '/')
ext = path.splitext(config_file_name)[-1].lower()
if ext == '.prt':
self.api.call_rc('port export "{}" {}'.format(config_file_name, self.uri))
elif ext == '.str':
# self.reset()
self.api.call_rc('stream export "{}" {}'.format(config_file_name, self.uri))
else:
raise ValueError('Configuration file type {} not supported.'.format(ext)) | [
"def",
"save_config",
"(",
"self",
",",
"config_file_name",
")",
":",
"config_file_name",
"=",
"config_file_name",
".",
"replace",
"(",
"'\\\\'",
",",
"'/'",
")",
"ext",
"=",
"path",
".",
"splitext",
"(",
"config_file_name",
")",
"[",
"-",
"1",
"]",
".",
... | Save configuration file from prt or str.
Configuration file type is extracted from the file suffix - prt or str.
:param config_file_name: full path to the configuration file.
IxTclServer must have access to the file location. either:
The config file is on shared folder.
IxTclServer run on the client machine. | [
"Save",
"configuration",
"file",
"from",
"prt",
"or",
"str",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L233-L251 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.start_transmit | def start_transmit(self, blocking=False):
""" Start transmit on port.
:param blocking: True - wait for traffic end, False - return after traffic start.
"""
self.session.start_transmit(blocking, False, self) | python | def start_transmit(self, blocking=False):
""" Start transmit on port.
:param blocking: True - wait for traffic end, False - return after traffic start.
"""
self.session.start_transmit(blocking, False, self) | [
"def",
"start_transmit",
"(",
"self",
",",
"blocking",
"=",
"False",
")",
":",
"self",
".",
"session",
".",
"start_transmit",
"(",
"blocking",
",",
"False",
",",
"self",
")"
] | Start transmit on port.
:param blocking: True - wait for traffic end, False - return after traffic start. | [
"Start",
"transmit",
"on",
"port",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L268-L274 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.stop_capture | def stop_capture(self, cap_file_name=None, cap_file_format=IxeCapFileFormat.mem):
""" Stop capture on port.
:param cap_file_name: prefix for the capture file name.
Capture file will be saved as pcap file named 'prefix' + 'URI'.pcap.
:param cap_file_format: exported file format
:return: number of captured frames
"""
return self.session.stop_capture(cap_file_name, cap_file_format, self)[self] | python | def stop_capture(self, cap_file_name=None, cap_file_format=IxeCapFileFormat.mem):
""" Stop capture on port.
:param cap_file_name: prefix for the capture file name.
Capture file will be saved as pcap file named 'prefix' + 'URI'.pcap.
:param cap_file_format: exported file format
:return: number of captured frames
"""
return self.session.stop_capture(cap_file_name, cap_file_format, self)[self] | [
"def",
"stop_capture",
"(",
"self",
",",
"cap_file_name",
"=",
"None",
",",
"cap_file_format",
"=",
"IxeCapFileFormat",
".",
"mem",
")",
":",
"return",
"self",
".",
"session",
".",
"stop_capture",
"(",
"cap_file_name",
",",
"cap_file_format",
",",
"self",
")",... | Stop capture on port.
:param cap_file_name: prefix for the capture file name.
Capture file will be saved as pcap file named 'prefix' + 'URI'.pcap.
:param cap_file_format: exported file format
:return: number of captured frames | [
"Stop",
"capture",
"on",
"port",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L286-L295 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.get_cap_frames | def get_cap_frames(self, *frame_nums):
""" Stop capture on ports.
:param frame_nums: list of frame numbers to read.
:return: list of captured frames.
"""
frames = []
for frame_num in frame_nums:
if self.captureBuffer.getframe(frame_num) == '0':
frames.append(self.captureBuffer.frame)
else:
frames.append(None)
return frames | python | def get_cap_frames(self, *frame_nums):
""" Stop capture on ports.
:param frame_nums: list of frame numbers to read.
:return: list of captured frames.
"""
frames = []
for frame_num in frame_nums:
if self.captureBuffer.getframe(frame_num) == '0':
frames.append(self.captureBuffer.frame)
else:
frames.append(None)
return frames | [
"def",
"get_cap_frames",
"(",
"self",
",",
"*",
"frame_nums",
")",
":",
"frames",
"=",
"[",
"]",
"for",
"frame_num",
"in",
"frame_nums",
":",
"if",
"self",
".",
"captureBuffer",
".",
"getframe",
"(",
"frame_num",
")",
"==",
"'0'",
":",
"frames",
".",
"... | Stop capture on ports.
:param frame_nums: list of frame numbers to read.
:return: list of captured frames. | [
"Stop",
"capture",
"on",
"ports",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L300-L313 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.clear_port_stats | def clear_port_stats(self):
""" Clear only port stats (leave stream and packet group stats).
Do not use - still working with Ixia to resolve.
"""
stat = IxeStat(self)
stat.ix_set_default()
stat.enableValidStats = True
stat.ix_set()
stat.write() | python | def clear_port_stats(self):
""" Clear only port stats (leave stream and packet group stats).
Do not use - still working with Ixia to resolve.
"""
stat = IxeStat(self)
stat.ix_set_default()
stat.enableValidStats = True
stat.ix_set()
stat.write() | [
"def",
"clear_port_stats",
"(",
"self",
")",
":",
"stat",
"=",
"IxeStat",
"(",
"self",
")",
"stat",
".",
"ix_set_default",
"(",
")",
"stat",
".",
"enableValidStats",
"=",
"True",
"stat",
".",
"ix_set",
"(",
")",
"stat",
".",
"write",
"(",
")"
] | Clear only port stats (leave stream and packet group stats).
Do not use - still working with Ixia to resolve. | [
"Clear",
"only",
"port",
"stats",
"(",
"leave",
"stream",
"and",
"packet",
"group",
"stats",
")",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L319-L328 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.set_phy_mode | def set_phy_mode(self, mode=IxePhyMode.ignore):
""" Set phy mode to copper or fiber.
:param mode: requested PHY mode.
"""
if isinstance(mode, IxePhyMode):
if mode.value:
self.api.call_rc('port setPhyMode {} {}'.format(mode.value, self.uri))
else:
self.api.call_rc('port setPhyMode {} {}'.format(mode, self.uri)) | python | def set_phy_mode(self, mode=IxePhyMode.ignore):
""" Set phy mode to copper or fiber.
:param mode: requested PHY mode.
"""
if isinstance(mode, IxePhyMode):
if mode.value:
self.api.call_rc('port setPhyMode {} {}'.format(mode.value, self.uri))
else:
self.api.call_rc('port setPhyMode {} {}'.format(mode, self.uri)) | [
"def",
"set_phy_mode",
"(",
"self",
",",
"mode",
"=",
"IxePhyMode",
".",
"ignore",
")",
":",
"if",
"isinstance",
"(",
"mode",
",",
"IxePhyMode",
")",
":",
"if",
"mode",
".",
"value",
":",
"self",
".",
"api",
".",
"call_rc",
"(",
"'port setPhyMode {} {}'"... | Set phy mode to copper or fiber.
:param mode: requested PHY mode. | [
"Set",
"phy",
"mode",
"to",
"copper",
"or",
"fiber",
".",
":",
"param",
"mode",
":",
"requested",
"PHY",
"mode",
"."
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L344-L352 |
shmir/PyIxExplorer | ixexplorer/ixe_port.py | IxePort.set_transmit_mode | def set_transmit_mode(self, mode):
""" set port transmit mode
:param mode: request transmit mode
:type mode: ixexplorer.ixe_port.IxeTransmitMode
"""
self.api.call_rc('port setTransmitMode {} {}'.format(mode, self.uri)) | python | def set_transmit_mode(self, mode):
""" set port transmit mode
:param mode: request transmit mode
:type mode: ixexplorer.ixe_port.IxeTransmitMode
"""
self.api.call_rc('port setTransmitMode {} {}'.format(mode, self.uri)) | [
"def",
"set_transmit_mode",
"(",
"self",
",",
"mode",
")",
":",
"self",
".",
"api",
".",
"call_rc",
"(",
"'port setTransmitMode {} {}'",
".",
"format",
"(",
"mode",
",",
"self",
".",
"uri",
")",
")"
] | set port transmit mode
:param mode: request transmit mode
:type mode: ixexplorer.ixe_port.IxeTransmitMode | [
"set",
"port",
"transmit",
"mode"
] | train | https://github.com/shmir/PyIxExplorer/blob/d6946b9ce0e8961507cc912062e10c365d4beee2/ixexplorer/ixe_port.py#L372-L379 |
lewiscollard/django-flexible-images | flexible_images/util.py | get_image_sizes | def get_image_sizes(image):
"""Given an ImageField `image`, returns a list of images sizes in this
form:
[
{
"url": "http://example.com/xxx.jpg",
"width": 1440,
"height": 960
},
[...]
]"""
# It is possible to have the same width appear more than once, if
# THUMBNAIL_UPSCALE is set to False and the image's width is less than the
# largest value in FLEXIBLE_IMAGE_SIZES. So keep track of widths and
# don't output more than one image with the same width (which would result
# in an invalid `srcset` attribute).
sizes = []
seen_widths = []
for size in settings_sizes():
img = get_thumbnail_shim(image, size)
if img.width in seen_widths:
continue
seen_widths.append(img.width)
sizes.append({
"url": img.url,
"width": img.width,
"height": img.height,
})
return sizes | python | def get_image_sizes(image):
"""Given an ImageField `image`, returns a list of images sizes in this
form:
[
{
"url": "http://example.com/xxx.jpg",
"width": 1440,
"height": 960
},
[...]
]"""
# It is possible to have the same width appear more than once, if
# THUMBNAIL_UPSCALE is set to False and the image's width is less than the
# largest value in FLEXIBLE_IMAGE_SIZES. So keep track of widths and
# don't output more than one image with the same width (which would result
# in an invalid `srcset` attribute).
sizes = []
seen_widths = []
for size in settings_sizes():
img = get_thumbnail_shim(image, size)
if img.width in seen_widths:
continue
seen_widths.append(img.width)
sizes.append({
"url": img.url,
"width": img.width,
"height": img.height,
})
return sizes | [
"def",
"get_image_sizes",
"(",
"image",
")",
":",
"# It is possible to have the same width appear more than once, if",
"# THUMBNAIL_UPSCALE is set to False and the image's width is less than the",
"# largest value in FLEXIBLE_IMAGE_SIZES. So keep track of widths and",
"# don't output more than one... | Given an ImageField `image`, returns a list of images sizes in this
form:
[
{
"url": "http://example.com/xxx.jpg",
"width": 1440,
"height": 960
},
[...]
] | [
"Given",
"an",
"ImageField",
"image",
"returns",
"a",
"list",
"of",
"images",
"sizes",
"in",
"this",
"form",
":"
] | train | https://github.com/lewiscollard/django-flexible-images/blob/d9e369586b70067f137b785c0e727d7244c0b17f/flexible_images/util.py#L64-L98 |
lewiscollard/django-flexible-images | flexible_images/util.py | get_template_context | def get_template_context(src, container="div", classes="", inner_classes="", alt="", background_image=False, no_css=False, aria_hidden=False):
"""Returns a template context for a flexible image template
tag implementation."""
context = {
"container": container,
"classes": classes,
"aspect_padding_bottom": aspect_ratio_percent(src),
"alt": alt,
"background_image": background_image,
"no_css": no_css,
"inner_classes": inner_classes,
"aria_hidden": aria_hidden,
}
# We can't do any of the srcset (or JS switching fallback) if we don't
# have a thumbnail library installed.
if not get_thumbnail_engine():
context["image"] = src
return context
sizes = get_image_sizes(src)
context["image_sizes"] = sizes
# Set the first image in the list as the one to be rendered initially
# (pre-JS-fallback). `if sizes` might not be a necessary check...
context["image"] = sizes[0]
context["image_sizes_json"] = json.dumps(sizes)
srcset_items = ["{} {}w".format(size["url"], size["width"]) for size in sizes]
context["image_sizes_srcset"] = ", ".join(srcset_items)
return context | python | def get_template_context(src, container="div", classes="", inner_classes="", alt="", background_image=False, no_css=False, aria_hidden=False):
"""Returns a template context for a flexible image template
tag implementation."""
context = {
"container": container,
"classes": classes,
"aspect_padding_bottom": aspect_ratio_percent(src),
"alt": alt,
"background_image": background_image,
"no_css": no_css,
"inner_classes": inner_classes,
"aria_hidden": aria_hidden,
}
# We can't do any of the srcset (or JS switching fallback) if we don't
# have a thumbnail library installed.
if not get_thumbnail_engine():
context["image"] = src
return context
sizes = get_image_sizes(src)
context["image_sizes"] = sizes
# Set the first image in the list as the one to be rendered initially
# (pre-JS-fallback). `if sizes` might not be a necessary check...
context["image"] = sizes[0]
context["image_sizes_json"] = json.dumps(sizes)
srcset_items = ["{} {}w".format(size["url"], size["width"]) for size in sizes]
context["image_sizes_srcset"] = ", ".join(srcset_items)
return context | [
"def",
"get_template_context",
"(",
"src",
",",
"container",
"=",
"\"div\"",
",",
"classes",
"=",
"\"\"",
",",
"inner_classes",
"=",
"\"\"",
",",
"alt",
"=",
"\"\"",
",",
"background_image",
"=",
"False",
",",
"no_css",
"=",
"False",
",",
"aria_hidden",
"=... | Returns a template context for a flexible image template
tag implementation. | [
"Returns",
"a",
"template",
"context",
"for",
"a",
"flexible",
"image",
"template",
"tag",
"implementation",
"."
] | train | https://github.com/lewiscollard/django-flexible-images/blob/d9e369586b70067f137b785c0e727d7244c0b17f/flexible_images/util.py#L101-L132 |
thunder-project/thunder-registration | registration/algorithms/crosscorr.py | CrossCorr.fit | def fit(self, images, reference=None):
"""
Estimate registration model using cross-correlation.
Use cross correlation to compute displacements between
images or volumes and reference. Displacements will be
2D for images and 3D for volumes.
Parameters
----------
images : array-like or thunder images
The sequence of images / volumes to register.
reference : array-like
A reference image to align to.
"""
images = check_images(images)
reference = check_reference(images, reference)
def func(item):
key, image = item
return asarray([key, self._get(image, reference)])
transformations = images.map(func, with_keys=True).toarray()
if images.shape[0] == 1:
transformations = [transformations]
algorithm = self.__class__.__name__
return RegistrationModel(dict(transformations), algorithm=algorithm) | python | def fit(self, images, reference=None):
"""
Estimate registration model using cross-correlation.
Use cross correlation to compute displacements between
images or volumes and reference. Displacements will be
2D for images and 3D for volumes.
Parameters
----------
images : array-like or thunder images
The sequence of images / volumes to register.
reference : array-like
A reference image to align to.
"""
images = check_images(images)
reference = check_reference(images, reference)
def func(item):
key, image = item
return asarray([key, self._get(image, reference)])
transformations = images.map(func, with_keys=True).toarray()
if images.shape[0] == 1:
transformations = [transformations]
algorithm = self.__class__.__name__
return RegistrationModel(dict(transformations), algorithm=algorithm) | [
"def",
"fit",
"(",
"self",
",",
"images",
",",
"reference",
"=",
"None",
")",
":",
"images",
"=",
"check_images",
"(",
"images",
")",
"reference",
"=",
"check_reference",
"(",
"images",
",",
"reference",
")",
"def",
"func",
"(",
"item",
")",
":",
"key"... | Estimate registration model using cross-correlation.
Use cross correlation to compute displacements between
images or volumes and reference. Displacements will be
2D for images and 3D for volumes.
Parameters
----------
images : array-like or thunder images
The sequence of images / volumes to register.
reference : array-like
A reference image to align to. | [
"Estimate",
"registration",
"model",
"using",
"cross",
"-",
"correlation",
"."
] | train | https://github.com/thunder-project/thunder-registration/blob/286f091e6c402d592e9686d4821e0fd80dbf86ec/registration/algorithms/crosscorr.py#L27-L55 |
thunder-project/thunder-registration | registration/algorithms/crosscorr.py | CrossCorr.fit_and_transform | def fit_and_transform(self, images, reference=None):
"""
Estimate and apply registration model using cross-correlation.
Use cross correlation to compute displacements between
images or volumes and reference, and apply the
estimated model to the data. Displacements will be
2D for images and 3D for volumes.
Parameters
----------
images : array-like or thunder images
The sequence of images / volumes to register.
reference : array-like
A reference image to align to.
"""
images = check_images(images)
check_reference(images, reference)
def func(image):
t = self._get(image, reference)
return t.apply(image)
return images.map(func) | python | def fit_and_transform(self, images, reference=None):
"""
Estimate and apply registration model using cross-correlation.
Use cross correlation to compute displacements between
images or volumes and reference, and apply the
estimated model to the data. Displacements will be
2D for images and 3D for volumes.
Parameters
----------
images : array-like or thunder images
The sequence of images / volumes to register.
reference : array-like
A reference image to align to.
"""
images = check_images(images)
check_reference(images, reference)
def func(image):
t = self._get(image, reference)
return t.apply(image)
return images.map(func) | [
"def",
"fit_and_transform",
"(",
"self",
",",
"images",
",",
"reference",
"=",
"None",
")",
":",
"images",
"=",
"check_images",
"(",
"images",
")",
"check_reference",
"(",
"images",
",",
"reference",
")",
"def",
"func",
"(",
"image",
")",
":",
"t",
"=",
... | Estimate and apply registration model using cross-correlation.
Use cross correlation to compute displacements between
images or volumes and reference, and apply the
estimated model to the data. Displacements will be
2D for images and 3D for volumes.
Parameters
----------
images : array-like or thunder images
The sequence of images / volumes to register.
reference : array-like
A reference image to align to. | [
"Estimate",
"and",
"apply",
"registration",
"model",
"using",
"cross",
"-",
"correlation",
"."
] | train | https://github.com/thunder-project/thunder-registration/blob/286f091e6c402d592e9686d4821e0fd80dbf86ec/registration/algorithms/crosscorr.py#L57-L81 |
grabbles/grabbit | grabbit/extensions/writable.py | build_path | def build_path(entities, path_patterns, strict=False):
"""
Constructs a path given a set of entities and a list of potential
filename patterns to use.
Args:
entities (dict): A dictionary mapping entity names to entity values.
path_patterns (str, list): One or more filename patterns to write
the file to. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
carets. Default values can be assigned by specifying a string after
the pipe operator. E.g., (e.g., {type<image>|bold} would only match
the pattern if the entity 'type' was passed and its value is
"image", otherwise the default value "bold" will be used).
Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'
Result 2: 'sub-01/var-SES/1045.csv'
strict (bool): If True, all passed entities must be matched inside a
pattern in order to be a valid match. If False, extra entities will
be ignored so long as all mandatory entities are found.
Returns:
A constructed path for this file based on the provided patterns.
"""
if isinstance(path_patterns, string_types):
path_patterns = [path_patterns]
# Loop over available patherns, return first one that matches all
for pattern in path_patterns:
# If strict, all entities must be contained in the pattern
if strict:
defined = re.findall('\{(.*?)(?:<[^>]+>)?\}', pattern)
if set(entities.keys()) - set(defined):
continue
# Iterate through the provided path patterns
new_path = pattern
optional_patterns = re.findall('\[(.*?)\]', pattern)
# First build from optional patterns if possible
for optional_pattern in optional_patterns:
optional_chunk = replace_entities(entities, optional_pattern) or ''
new_path = new_path.replace('[%s]' % optional_pattern,
optional_chunk)
# Replace remaining entities
new_path = replace_entities(entities, new_path)
if new_path:
return new_path
return None | python | def build_path(entities, path_patterns, strict=False):
"""
Constructs a path given a set of entities and a list of potential
filename patterns to use.
Args:
entities (dict): A dictionary mapping entity names to entity values.
path_patterns (str, list): One or more filename patterns to write
the file to. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
carets. Default values can be assigned by specifying a string after
the pipe operator. E.g., (e.g., {type<image>|bold} would only match
the pattern if the entity 'type' was passed and its value is
"image", otherwise the default value "bold" will be used).
Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'
Result 2: 'sub-01/var-SES/1045.csv'
strict (bool): If True, all passed entities must be matched inside a
pattern in order to be a valid match. If False, extra entities will
be ignored so long as all mandatory entities are found.
Returns:
A constructed path for this file based on the provided patterns.
"""
if isinstance(path_patterns, string_types):
path_patterns = [path_patterns]
# Loop over available patherns, return first one that matches all
for pattern in path_patterns:
# If strict, all entities must be contained in the pattern
if strict:
defined = re.findall('\{(.*?)(?:<[^>]+>)?\}', pattern)
if set(entities.keys()) - set(defined):
continue
# Iterate through the provided path patterns
new_path = pattern
optional_patterns = re.findall('\[(.*?)\]', pattern)
# First build from optional patterns if possible
for optional_pattern in optional_patterns:
optional_chunk = replace_entities(entities, optional_pattern) or ''
new_path = new_path.replace('[%s]' % optional_pattern,
optional_chunk)
# Replace remaining entities
new_path = replace_entities(entities, new_path)
if new_path:
return new_path
return None | [
"def",
"build_path",
"(",
"entities",
",",
"path_patterns",
",",
"strict",
"=",
"False",
")",
":",
"if",
"isinstance",
"(",
"path_patterns",
",",
"string_types",
")",
":",
"path_patterns",
"=",
"[",
"path_patterns",
"]",
"# Loop over available patherns, return first... | Constructs a path given a set of entities and a list of potential
filename patterns to use.
Args:
entities (dict): A dictionary mapping entity names to entity values.
path_patterns (str, list): One or more filename patterns to write
the file to. Entities should be represented by the name
surrounded by curly braces. Optional portions of the patterns
should be denoted by square brackets. Entities that require a
specific value for the pattern to match can pass them inside
carets. Default values can be assigned by specifying a string after
the pipe operator. E.g., (e.g., {type<image>|bold} would only match
the pattern if the entity 'type' was passed and its value is
"image", otherwise the default value "bold" will be used).
Example 1: 'sub-{subject}/[var-{name}/]{id}.csv'
Result 2: 'sub-01/var-SES/1045.csv'
strict (bool): If True, all passed entities must be matched inside a
pattern in order to be a valid match. If False, extra entities will
be ignored so long as all mandatory entities are found.
Returns:
A constructed path for this file based on the provided patterns. | [
"Constructs",
"a",
"path",
"given",
"a",
"set",
"of",
"entities",
"and",
"a",
"list",
"of",
"potential",
"filename",
"patterns",
"to",
"use",
"."
] | train | https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/extensions/writable.py#L55-L104 |
mikebryant/django-autoconfig | django_autoconfig/environment_settings/autoconfig.py | get_settings_from_environment | def get_settings_from_environment(environ):
'''Deduce settings from environment variables'''
settings = {}
for name, value in environ.items():
if not name.startswith('DJANGO_'):
continue
name = name.replace('DJANGO_', '', 1)
if _ignore_setting(name):
continue
try:
settings[name] = ast.literal_eval(value)
except (SyntaxError, ValueError) as err:
LOGGER.warn("Unable to parse setting %s=%s (%s)", name, value, err)
return settings | python | def get_settings_from_environment(environ):
'''Deduce settings from environment variables'''
settings = {}
for name, value in environ.items():
if not name.startswith('DJANGO_'):
continue
name = name.replace('DJANGO_', '', 1)
if _ignore_setting(name):
continue
try:
settings[name] = ast.literal_eval(value)
except (SyntaxError, ValueError) as err:
LOGGER.warn("Unable to parse setting %s=%s (%s)", name, value, err)
return settings | [
"def",
"get_settings_from_environment",
"(",
"environ",
")",
":",
"settings",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"environ",
".",
"items",
"(",
")",
":",
"if",
"not",
"name",
".",
"startswith",
"(",
"'DJANGO_'",
")",
":",
"continue",
"name"... | Deduce settings from environment variables | [
"Deduce",
"settings",
"from",
"environment",
"variables"
] | train | https://github.com/mikebryant/django-autoconfig/blob/1ce793e95b024dccb189de1dc111dc4ae6c2f3a6/django_autoconfig/environment_settings/autoconfig.py#L12-L25 |
biocommons/bioutils | src/bioutils/vmc_digest.py | vmc_digest | def vmc_digest(data, digest_size=DEFAULT_DIGEST_SIZE):
"""Returns the VMC Digest as a Digest object, which has both bytes and str (
URL-safe, Base 64) representations.
>>> d = vmc_digest("")
# I can't figure out how to make this test work on Py 2 and 3 :-(
>>> d # doctest: +SKIP
b'\xcf\x83\xe15~\xef\xb8\xbd\xf1T(P\xd6m\x80'
>>> str(d)
'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc'
>>> len(d), len(str(d))
(24, 32)
>>> str(vmc_digest("", 24))
'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc'
>>> vmc_digest("", 17)
Traceback (most recent call last):
...
ValueError: digest_size must be a multiple of 3
>>> vmc_digest("", 66)
Traceback (most recent call last):
...
ValueError: digest_size must be between 0 and 63 (bytes)
SHA-512 is 2x faster than SHA1 on modern 64-bit platforms.
However, few appliations require 512 bits (64 bytes) of keyspace.
That larger size translates into proportionally larger key size
requirements, with attendant performance implications. By
truncating the SHA-512 digest [1], users may obtain a tunable
level of collision avoidance.
The string returned by this function is Base 64 encoded with
URL-safe characters [2], making it suitable for use with URLs or
filesystem paths. Base 64 encoding results in an output string
that is 4/3 the size of the input. If the length of the input
string is not divisible by 3, the output is right-padded with
equal signs (=), which have no information content. Therefore,
this function requires that digest_size is evenly divisible by 3.
(The resulting vmc_digest will be 4/3*digest_size bytes.)
According to [3], the probability of a collision using b bits with
m messages (sequences) is:
P(b, m) = m^2 / 2^(b+1).
Note that the collision probability depends on the number of
messages, but not their size. Solving for the number of messages:
m(b, P) = sqrt(P * 2^(b+1))
Solving for the number of *bits*:
b(m, P) = log2(m^2/P) - 1
For various values of m and P, the number of *bytes* required
according to b(m,P) rounded to next multiple of 3 is:
+-------+----------+----------+----------+----------+----------+----------+
| #m | P<=1e-24 | P<=1e-21 | P<=1e-18 | P<=1e-15 | P<=1e-12 | P<=1e-09 |
+-------+----------+----------+----------+----------+----------+----------+
| 1e+06 | 15 | 12 | 12 | 9 | 9 | 9 |
| 1e+09 | 15 | 15 | 12 | 12 | 9 | 9 |
| 1e+12 | 15 | 15 | 15 | 12 | 12 | 9 |
| 1e+15 | 18 | 15 | 15 | 15 | 12 | 12 |
| 1e+18 | 18 | 18 | 15 | 15 | 15 | 12 |
| 1e+21 | 21 | 18 | 18 | 15 | 15 | 15 |
| 1e+24 | 21 | 21 | 18 | 18 | 15 | 15 |
+-------+----------+----------+----------+----------+----------+----------+
For example, given 1e+18 expected messages and a desired collision
probability < 1e-15, we use digest_size = 15 (bytes).
[1] http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
[2] https://tools.ietf.org/html/rfc3548#section-4
[3] http://stackoverflow.com/a/4014407/342839
[4] http://stackoverflow.com/a/22029380/342839
[5] http://preshing.com/20110504/hash-collision-probabilities/
[6] https://en.wikipedia.org/wiki/Birthday_problem
"""
# TODO: Consider relaxing %3 constraint and stripping padding
if digest_size % 3 != 0:
raise ValueError("digest_size must be a multiple of 3")
if not 0 <= digest_size <= 63:
raise ValueError("digest_size must be between 0 and 63 (bytes)")
sha512 = Digest(hashlib.sha512(data.encode(ENC)).digest())
return sha512[:digest_size] | python | def vmc_digest(data, digest_size=DEFAULT_DIGEST_SIZE):
"""Returns the VMC Digest as a Digest object, which has both bytes and str (
URL-safe, Base 64) representations.
>>> d = vmc_digest("")
# I can't figure out how to make this test work on Py 2 and 3 :-(
>>> d # doctest: +SKIP
b'\xcf\x83\xe15~\xef\xb8\xbd\xf1T(P\xd6m\x80'
>>> str(d)
'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc'
>>> len(d), len(str(d))
(24, 32)
>>> str(vmc_digest("", 24))
'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc'
>>> vmc_digest("", 17)
Traceback (most recent call last):
...
ValueError: digest_size must be a multiple of 3
>>> vmc_digest("", 66)
Traceback (most recent call last):
...
ValueError: digest_size must be between 0 and 63 (bytes)
SHA-512 is 2x faster than SHA1 on modern 64-bit platforms.
However, few appliations require 512 bits (64 bytes) of keyspace.
That larger size translates into proportionally larger key size
requirements, with attendant performance implications. By
truncating the SHA-512 digest [1], users may obtain a tunable
level of collision avoidance.
The string returned by this function is Base 64 encoded with
URL-safe characters [2], making it suitable for use with URLs or
filesystem paths. Base 64 encoding results in an output string
that is 4/3 the size of the input. If the length of the input
string is not divisible by 3, the output is right-padded with
equal signs (=), which have no information content. Therefore,
this function requires that digest_size is evenly divisible by 3.
(The resulting vmc_digest will be 4/3*digest_size bytes.)
According to [3], the probability of a collision using b bits with
m messages (sequences) is:
P(b, m) = m^2 / 2^(b+1).
Note that the collision probability depends on the number of
messages, but not their size. Solving for the number of messages:
m(b, P) = sqrt(P * 2^(b+1))
Solving for the number of *bits*:
b(m, P) = log2(m^2/P) - 1
For various values of m and P, the number of *bytes* required
according to b(m,P) rounded to next multiple of 3 is:
+-------+----------+----------+----------+----------+----------+----------+
| #m | P<=1e-24 | P<=1e-21 | P<=1e-18 | P<=1e-15 | P<=1e-12 | P<=1e-09 |
+-------+----------+----------+----------+----------+----------+----------+
| 1e+06 | 15 | 12 | 12 | 9 | 9 | 9 |
| 1e+09 | 15 | 15 | 12 | 12 | 9 | 9 |
| 1e+12 | 15 | 15 | 15 | 12 | 12 | 9 |
| 1e+15 | 18 | 15 | 15 | 15 | 12 | 12 |
| 1e+18 | 18 | 18 | 15 | 15 | 15 | 12 |
| 1e+21 | 21 | 18 | 18 | 15 | 15 | 15 |
| 1e+24 | 21 | 21 | 18 | 18 | 15 | 15 |
+-------+----------+----------+----------+----------+----------+----------+
For example, given 1e+18 expected messages and a desired collision
probability < 1e-15, we use digest_size = 15 (bytes).
[1] http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
[2] https://tools.ietf.org/html/rfc3548#section-4
[3] http://stackoverflow.com/a/4014407/342839
[4] http://stackoverflow.com/a/22029380/342839
[5] http://preshing.com/20110504/hash-collision-probabilities/
[6] https://en.wikipedia.org/wiki/Birthday_problem
"""
# TODO: Consider relaxing %3 constraint and stripping padding
if digest_size % 3 != 0:
raise ValueError("digest_size must be a multiple of 3")
if not 0 <= digest_size <= 63:
raise ValueError("digest_size must be between 0 and 63 (bytes)")
sha512 = Digest(hashlib.sha512(data.encode(ENC)).digest())
return sha512[:digest_size] | [
"def",
"vmc_digest",
"(",
"data",
",",
"digest_size",
"=",
"DEFAULT_DIGEST_SIZE",
")",
":",
"# TODO: Consider relaxing %3 constraint and stripping padding",
"if",
"digest_size",
"%",
"3",
"!=",
"0",
":",
"raise",
"ValueError",
"(",
"\"digest_size must be a multiple of 3\"",... | Returns the VMC Digest as a Digest object, which has both bytes and str (
URL-safe, Base 64) representations.
>>> d = vmc_digest("")
# I can't figure out how to make this test work on Py 2 and 3 :-(
>>> d # doctest: +SKIP
b'\xcf\x83\xe15~\xef\xb8\xbd\xf1T(P\xd6m\x80'
>>> str(d)
'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc'
>>> len(d), len(str(d))
(24, 32)
>>> str(vmc_digest("", 24))
'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc'
>>> vmc_digest("", 17)
Traceback (most recent call last):
...
ValueError: digest_size must be a multiple of 3
>>> vmc_digest("", 66)
Traceback (most recent call last):
...
ValueError: digest_size must be between 0 and 63 (bytes)
SHA-512 is 2x faster than SHA1 on modern 64-bit platforms.
However, few appliations require 512 bits (64 bytes) of keyspace.
That larger size translates into proportionally larger key size
requirements, with attendant performance implications. By
truncating the SHA-512 digest [1], users may obtain a tunable
level of collision avoidance.
The string returned by this function is Base 64 encoded with
URL-safe characters [2], making it suitable for use with URLs or
filesystem paths. Base 64 encoding results in an output string
that is 4/3 the size of the input. If the length of the input
string is not divisible by 3, the output is right-padded with
equal signs (=), which have no information content. Therefore,
this function requires that digest_size is evenly divisible by 3.
(The resulting vmc_digest will be 4/3*digest_size bytes.)
According to [3], the probability of a collision using b bits with
m messages (sequences) is:
P(b, m) = m^2 / 2^(b+1).
Note that the collision probability depends on the number of
messages, but not their size. Solving for the number of messages:
m(b, P) = sqrt(P * 2^(b+1))
Solving for the number of *bits*:
b(m, P) = log2(m^2/P) - 1
For various values of m and P, the number of *bytes* required
according to b(m,P) rounded to next multiple of 3 is:
+-------+----------+----------+----------+----------+----------+----------+
| #m | P<=1e-24 | P<=1e-21 | P<=1e-18 | P<=1e-15 | P<=1e-12 | P<=1e-09 |
+-------+----------+----------+----------+----------+----------+----------+
| 1e+06 | 15 | 12 | 12 | 9 | 9 | 9 |
| 1e+09 | 15 | 15 | 12 | 12 | 9 | 9 |
| 1e+12 | 15 | 15 | 15 | 12 | 12 | 9 |
| 1e+15 | 18 | 15 | 15 | 15 | 12 | 12 |
| 1e+18 | 18 | 18 | 15 | 15 | 15 | 12 |
| 1e+21 | 21 | 18 | 18 | 15 | 15 | 15 |
| 1e+24 | 21 | 21 | 18 | 18 | 15 | 15 |
+-------+----------+----------+----------+----------+----------+----------+
For example, given 1e+18 expected messages and a desired collision
probability < 1e-15, we use digest_size = 15 (bytes).
[1] http://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.180-4.pdf
[2] https://tools.ietf.org/html/rfc3548#section-4
[3] http://stackoverflow.com/a/4014407/342839
[4] http://stackoverflow.com/a/22029380/342839
[5] http://preshing.com/20110504/hash-collision-probabilities/
[6] https://en.wikipedia.org/wiki/Birthday_problem | [
"Returns",
"the",
"VMC",
"Digest",
"as",
"a",
"Digest",
"object",
"which",
"has",
"both",
"bytes",
"and",
"str",
"(",
"URL",
"-",
"safe",
"Base",
"64",
")",
"representations",
"."
] | train | https://github.com/biocommons/bioutils/blob/88bcbdfa707268fed1110800e91b6d4f8e9475a0/src/bioutils/vmc_digest.py#L13-L107 |
flo-compbio/genometools | genometools/expression/filter.py | filter_variance | def filter_variance(matrix, top):
"""Filter genes in an expression matrix by variance.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, (int, np.integer))
if top >= matrix.p:
logger.warning('Variance filter has no effect '
'("top" parameter is >= number of genes).')
return matrix.copy()
var = np.var(matrix.X, axis=1, ddof=1)
total_var = np.sum(var) # total sum of variance
a = np.argsort(var)
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
lost_p = matrix.p - top
lost_var = total_var - np.sum(var[sel])
logger.info('Selected the %d most variable genes '
'(excluded %.1f%% of genes, representing %.1f%% '
'of total variance).',
top, 100 * (lost_p / float(matrix.p)),
100 * (lost_var / total_var))
matrix = matrix.loc[sel]
return matrix | python | def filter_variance(matrix, top):
"""Filter genes in an expression matrix by variance.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, (int, np.integer))
if top >= matrix.p:
logger.warning('Variance filter has no effect '
'("top" parameter is >= number of genes).')
return matrix.copy()
var = np.var(matrix.X, axis=1, ddof=1)
total_var = np.sum(var) # total sum of variance
a = np.argsort(var)
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
lost_p = matrix.p - top
lost_var = total_var - np.sum(var[sel])
logger.info('Selected the %d most variable genes '
'(excluded %.1f%% of genes, representing %.1f%% '
'of total variance).',
top, 100 * (lost_p / float(matrix.p)),
100 * (lost_var / total_var))
matrix = matrix.loc[sel]
return matrix | [
"def",
"filter_variance",
"(",
"matrix",
",",
"top",
")",
":",
"assert",
"isinstance",
"(",
"matrix",
",",
"ExpMatrix",
")",
"assert",
"isinstance",
"(",
"top",
",",
"(",
"int",
",",
"np",
".",
"integer",
")",
")",
"if",
"top",
">=",
"matrix",
".",
"... | Filter genes in an expression matrix by variance.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix. | [
"Filter",
"genes",
"in",
"an",
"expression",
"matrix",
"by",
"variance",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/filter.py#L32-L71 |
flo-compbio/genometools | genometools/expression/filter.py | filter_mean | def filter_mean(matrix, top):
"""Filter genes in an expression matrix by mean expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, int)
if top >= matrix.p:
logger.warning('Gene expression filter with `top` parameter that is '
'>= the number of genes!')
top = matrix.p
a = np.argsort(np.mean(matrix.X, axis=1))
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
matrix = matrix.loc[sel]
return matrix | python | def filter_mean(matrix, top):
"""Filter genes in an expression matrix by mean expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, int)
if top >= matrix.p:
logger.warning('Gene expression filter with `top` parameter that is '
'>= the number of genes!')
top = matrix.p
a = np.argsort(np.mean(matrix.X, axis=1))
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
matrix = matrix.loc[sel]
return matrix | [
"def",
"filter_mean",
"(",
"matrix",
",",
"top",
")",
":",
"assert",
"isinstance",
"(",
"matrix",
",",
"ExpMatrix",
")",
"assert",
"isinstance",
"(",
"top",
",",
"int",
")",
"if",
"top",
">=",
"matrix",
".",
"p",
":",
"logger",
".",
"warning",
"(",
"... | Filter genes in an expression matrix by mean expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix. | [
"Filter",
"genes",
"in",
"an",
"expression",
"matrix",
"by",
"mean",
"expression",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/filter.py#L74-L104 |
flo-compbio/genometools | genometools/expression/filter.py | filter_percentile | def filter_percentile(matrix, top, percentile=50):
"""Filter genes in an expression matrix by percentile expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
percentile: int or float, optinonal
The percentile to use Defaults to the median (50th percentile).
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, int)
assert isinstance(percentile, (int, float))
if top >= matrix.p:
logger.warning('Gene expression filter with `top` parameter that is '
' >= the number of genes!')
top = matrix.p
a = np.argsort(np.percentile(matrix.X, percentile, axis=1))
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
matrix = matrix.loc[sel]
return matrix | python | def filter_percentile(matrix, top, percentile=50):
"""Filter genes in an expression matrix by percentile expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
percentile: int or float, optinonal
The percentile to use Defaults to the median (50th percentile).
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, int)
assert isinstance(percentile, (int, float))
if top >= matrix.p:
logger.warning('Gene expression filter with `top` parameter that is '
' >= the number of genes!')
top = matrix.p
a = np.argsort(np.percentile(matrix.X, percentile, axis=1))
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
matrix = matrix.loc[sel]
return matrix | [
"def",
"filter_percentile",
"(",
"matrix",
",",
"top",
",",
"percentile",
"=",
"50",
")",
":",
"assert",
"isinstance",
"(",
"matrix",
",",
"ExpMatrix",
")",
"assert",
"isinstance",
"(",
"top",
",",
"int",
")",
"assert",
"isinstance",
"(",
"percentile",
","... | Filter genes in an expression matrix by percentile expression.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
percentile: int or float, optinonal
The percentile to use Defaults to the median (50th percentile).
Returns
-------
ExpMatrix
The filtered expression matrix. | [
"Filter",
"genes",
"in",
"an",
"expression",
"matrix",
"by",
"percentile",
"expression",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/expression/filter.py#L107-L140 |
flo-compbio/genometools | genometools/ensembl/dna.py | _transform_chrom | def _transform_chrom(chrom):
"""Helper function to obtain specific sort order."""
try:
c = int(chrom)
except:
if chrom in ['X', 'Y']:
return chrom
elif chrom == 'MT':
return '_MT' # sort to the end
else:
return '__' + chrom # sort to the very end
else:
# make sure numbered chromosomes are sorted numerically
return '%02d' % c | python | def _transform_chrom(chrom):
"""Helper function to obtain specific sort order."""
try:
c = int(chrom)
except:
if chrom in ['X', 'Y']:
return chrom
elif chrom == 'MT':
return '_MT' # sort to the end
else:
return '__' + chrom # sort to the very end
else:
# make sure numbered chromosomes are sorted numerically
return '%02d' % c | [
"def",
"_transform_chrom",
"(",
"chrom",
")",
":",
"try",
":",
"c",
"=",
"int",
"(",
"chrom",
")",
"except",
":",
"if",
"chrom",
"in",
"[",
"'X'",
",",
"'Y'",
"]",
":",
"return",
"chrom",
"elif",
"chrom",
"==",
"'MT'",
":",
"return",
"'_MT'",
"# so... | Helper function to obtain specific sort order. | [
"Helper",
"function",
"to",
"obtain",
"specific",
"sort",
"order",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/dna.py#L35-L48 |
flo-compbio/genometools | genometools/ensembl/dna.py | get_chromosome_lengths | def get_chromosome_lengths(fasta_file, fancy_sort=True):
"""Extract chromosome lengths from genome FASTA file."""
chromlen = []
with gzip.open(fasta_file, 'rt', encoding='ascii') as fh:
fasta = SeqIO.parse(fh, 'fasta')
for i, f in enumerate(fasta):
chromlen.append((f.id, len(f.seq)))
_LOGGER.info('Processed chromosome "%s"...', f.id)
#print(dir(f))
#if i == 1: break
# convert to pandas Series
chromlen = pd.Series(OrderedDict(chromlen))
chromlen.index.name = 'Chromosome'
chromlen.name = 'Length'
if fancy_sort:
# sort using fancy ordering
chrom_for_sorting = chromlen.index.to_series().apply(_transform_chrom)
a = chrom_for_sorting.argsort(kind='mergesort')
chromlen = chromlen.iloc[a]
return chromlen | python | def get_chromosome_lengths(fasta_file, fancy_sort=True):
"""Extract chromosome lengths from genome FASTA file."""
chromlen = []
with gzip.open(fasta_file, 'rt', encoding='ascii') as fh:
fasta = SeqIO.parse(fh, 'fasta')
for i, f in enumerate(fasta):
chromlen.append((f.id, len(f.seq)))
_LOGGER.info('Processed chromosome "%s"...', f.id)
#print(dir(f))
#if i == 1: break
# convert to pandas Series
chromlen = pd.Series(OrderedDict(chromlen))
chromlen.index.name = 'Chromosome'
chromlen.name = 'Length'
if fancy_sort:
# sort using fancy ordering
chrom_for_sorting = chromlen.index.to_series().apply(_transform_chrom)
a = chrom_for_sorting.argsort(kind='mergesort')
chromlen = chromlen.iloc[a]
return chromlen | [
"def",
"get_chromosome_lengths",
"(",
"fasta_file",
",",
"fancy_sort",
"=",
"True",
")",
":",
"chromlen",
"=",
"[",
"]",
"with",
"gzip",
".",
"open",
"(",
"fasta_file",
",",
"'rt'",
",",
"encoding",
"=",
"'ascii'",
")",
"as",
"fh",
":",
"fasta",
"=",
"... | Extract chromosome lengths from genome FASTA file. | [
"Extract",
"chromosome",
"lengths",
"from",
"genome",
"FASTA",
"file",
"."
] | train | https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/dna.py#L51-L73 |
inveniosoftware/invenio-jsonschemas | invenio_jsonschemas/utils.py | resolve_schema | def resolve_schema(schema):
"""Transform JSON schemas "allOf".
This is the default schema resolver.
This function was created because some javascript JSON Schema libraries
don't support "allOf". We recommend to use this function only in this
specific case.
This function is transforming the JSON Schema by removing "allOf" keywords.
It recursively merges the sub-schemas as dictionaries. The process is
completely custom and works only for simple JSON Schemas which use basic
types (object, string, number, ...). Optional structures like "schema
dependencies" or "oneOf" keywords are not supported.
:param dict schema: the schema to resolve.
:returns: the resolved schema
.. note::
The schema should have the ``$ref`` already resolved before running
this method.
"""
def traverse(schema):
if isinstance(schema, dict):
if 'allOf' in schema:
for x in schema['allOf']:
sub_schema = x
sub_schema.pop('title', None)
schema = _merge_dicts(schema, sub_schema)
schema.pop('allOf')
schema = traverse(schema)
elif 'properties' in schema:
for x in schema.get('properties', []):
schema['properties'][x] = traverse(
schema['properties'][x])
elif 'items' in schema:
schema['items'] = traverse(schema['items'])
return schema
return traverse(schema) | python | def resolve_schema(schema):
"""Transform JSON schemas "allOf".
This is the default schema resolver.
This function was created because some javascript JSON Schema libraries
don't support "allOf". We recommend to use this function only in this
specific case.
This function is transforming the JSON Schema by removing "allOf" keywords.
It recursively merges the sub-schemas as dictionaries. The process is
completely custom and works only for simple JSON Schemas which use basic
types (object, string, number, ...). Optional structures like "schema
dependencies" or "oneOf" keywords are not supported.
:param dict schema: the schema to resolve.
:returns: the resolved schema
.. note::
The schema should have the ``$ref`` already resolved before running
this method.
"""
def traverse(schema):
if isinstance(schema, dict):
if 'allOf' in schema:
for x in schema['allOf']:
sub_schema = x
sub_schema.pop('title', None)
schema = _merge_dicts(schema, sub_schema)
schema.pop('allOf')
schema = traverse(schema)
elif 'properties' in schema:
for x in schema.get('properties', []):
schema['properties'][x] = traverse(
schema['properties'][x])
elif 'items' in schema:
schema['items'] = traverse(schema['items'])
return schema
return traverse(schema) | [
"def",
"resolve_schema",
"(",
"schema",
")",
":",
"def",
"traverse",
"(",
"schema",
")",
":",
"if",
"isinstance",
"(",
"schema",
",",
"dict",
")",
":",
"if",
"'allOf'",
"in",
"schema",
":",
"for",
"x",
"in",
"schema",
"[",
"'allOf'",
"]",
":",
"sub_s... | Transform JSON schemas "allOf".
This is the default schema resolver.
This function was created because some javascript JSON Schema libraries
don't support "allOf". We recommend to use this function only in this
specific case.
This function is transforming the JSON Schema by removing "allOf" keywords.
It recursively merges the sub-schemas as dictionaries. The process is
completely custom and works only for simple JSON Schemas which use basic
types (object, string, number, ...). Optional structures like "schema
dependencies" or "oneOf" keywords are not supported.
:param dict schema: the schema to resolve.
:returns: the resolved schema
.. note::
The schema should have the ``$ref`` already resolved before running
this method. | [
"Transform",
"JSON",
"schemas",
"allOf",
"."
] | train | https://github.com/inveniosoftware/invenio-jsonschemas/blob/93019b8fe3bf549335e94c84198c9c0b76d8fde2/invenio_jsonschemas/utils.py#L16-L55 |
inveniosoftware/invenio-jsonschemas | invenio_jsonschemas/utils.py | _merge_dicts | def _merge_dicts(first, second):
"""Merge the 'second' multiple-dictionary into the 'first' one."""
new = deepcopy(first)
for k, v in second.items():
if isinstance(v, dict) and v:
ret = _merge_dicts(new.get(k, dict()), v)
new[k] = ret
else:
new[k] = second[k]
return new | python | def _merge_dicts(first, second):
"""Merge the 'second' multiple-dictionary into the 'first' one."""
new = deepcopy(first)
for k, v in second.items():
if isinstance(v, dict) and v:
ret = _merge_dicts(new.get(k, dict()), v)
new[k] = ret
else:
new[k] = second[k]
return new | [
"def",
"_merge_dicts",
"(",
"first",
",",
"second",
")",
":",
"new",
"=",
"deepcopy",
"(",
"first",
")",
"for",
"k",
",",
"v",
"in",
"second",
".",
"items",
"(",
")",
":",
"if",
"isinstance",
"(",
"v",
",",
"dict",
")",
"and",
"v",
":",
"ret",
... | Merge the 'second' multiple-dictionary into the 'first' one. | [
"Merge",
"the",
"second",
"multiple",
"-",
"dictionary",
"into",
"the",
"first",
"one",
"."
] | train | https://github.com/inveniosoftware/invenio-jsonschemas/blob/93019b8fe3bf549335e94c84198c9c0b76d8fde2/invenio_jsonschemas/utils.py#L58-L67 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.