text stringlengths 81 112k |
|---|
Returns an individual with the specified name, or raises a
IndividualNameNotFoundException if it does not exist.
def getIndividualByName(self, name):
"""
Returns an individual with the specified name, or raises a
IndividualNameNotFoundException if it does not exist.
"""
if name not in self._individualNameMap:
raise exceptions.IndividualNameNotFoundException(name)
return self._individualNameMap[name] |
Returns the Individual with the specified id, or raises
a IndividualNotFoundException otherwise.
def getIndividual(self, id_):
"""
Returns the Individual with the specified id, or raises
a IndividualNotFoundException otherwise.
"""
if id_ not in self._individualIdMap:
raise exceptions.IndividualNotFoundException(id_)
return self._individualIdMap[id_] |
Returns a ReadGroupSet with the specified name, or raises a
ReadGroupSetNameNotFoundException if it does not exist.
def getReadGroupSetByName(self, name):
"""
Returns a ReadGroupSet with the specified name, or raises a
ReadGroupSetNameNotFoundException if it does not exist.
"""
if name not in self._readGroupSetNameMap:
raise exceptions.ReadGroupSetNameNotFoundException(name)
return self._readGroupSetNameMap[name] |
Returns the ReadGroupSet with the specified name, or raises
a ReadGroupSetNotFoundException otherwise.
def getReadGroupSet(self, id_):
"""
Returns the ReadGroupSet with the specified name, or raises
a ReadGroupSetNotFoundException otherwise.
"""
if id_ not in self._readGroupSetIdMap:
raise exceptions.ReadGroupNotFoundException(id_)
return self._readGroupSetIdMap[id_] |
Returns the RnaQuantification set with the specified name, or raises
an exception otherwise.
def getRnaQuantificationSetByName(self, name):
"""
Returns the RnaQuantification set with the specified name, or raises
an exception otherwise.
"""
if name not in self._rnaQuantificationSetNameMap:
raise exceptions.RnaQuantificationSetNameNotFoundException(name)
return self._rnaQuantificationSetNameMap[name] |
Returns the RnaQuantification set with the specified name, or raises
a RnaQuantificationSetNotFoundException otherwise.
def getRnaQuantificationSet(self, id_):
"""
Returns the RnaQuantification set with the specified name, or raises
a RnaQuantificationSetNotFoundException otherwise.
"""
if id_ not in self._rnaQuantificationSetIdMap:
raise exceptions.RnaQuantificationSetNotFoundException(id_)
return self._rnaQuantificationSetIdMap[id_] |
Parses the (probably) intended values out of the specified
BAM header dictionary, which is incompletely parsed by pysam.
This is caused by some tools incorrectly using spaces instead
of tabs as a seperator.
def parseMalformedBamHeader(headerDict):
"""
Parses the (probably) intended values out of the specified
BAM header dictionary, which is incompletely parsed by pysam.
This is caused by some tools incorrectly using spaces instead
of tabs as a seperator.
"""
headerString = " ".join(
"{}:{}".format(k, v) for k, v in headerDict.items() if k != 'CL')
ret = {}
for item in headerString.split():
key, value = item.split(":", 1)
# build up dict, casting everything back to original type
ret[key] = type(headerDict.get(key, ""))(value)
if 'CL' in headerDict:
ret['CL'] = headerDict['CL']
return ret |
Returns an iterator over the specified reads
def _getReadAlignments(
self, reference, start, end, readGroupSet, readGroup):
"""
Returns an iterator over the specified reads
"""
# TODO If reference is None, return against all references,
# including unmapped reads.
samFile = self.getFileHandle(self._dataUrl)
referenceName = reference.getLocalId().encode()
# TODO deal with errors from htslib
start, end = self.sanitizeAlignmentFileFetch(start, end)
readAlignments = samFile.fetch(referenceName, start, end)
for readAlignment in readAlignments:
tags = dict(readAlignment.tags)
if readGroup is None:
if 'RG' in tags:
alignmentReadGroupLocalId = tags['RG']
readGroupCompoundId = datamodel.ReadGroupCompoundId(
readGroupSet.getCompoundId(),
str(alignmentReadGroupLocalId))
yield self.convertReadAlignment(
readAlignment, readGroupSet, str(readGroupCompoundId))
else:
if self._filterReads:
if 'RG' in tags and tags['RG'] == self._localId:
yield self.convertReadAlignment(
readAlignment, readGroupSet,
str(readGroup.getCompoundId()))
else:
yield self.convertReadAlignment(
readAlignment, readGroupSet,
str(readGroup.getCompoundId())) |
Convert a pysam ReadAlignment to a GA4GH ReadAlignment
def convertReadAlignment(self, read, readGroupSet, readGroupId):
"""
Convert a pysam ReadAlignment to a GA4GH ReadAlignment
"""
samFile = self.getFileHandle(self._dataUrl)
# TODO fill out remaining fields
# TODO refine in tandem with code in converters module
ret = protocol.ReadAlignment()
# ret.fragmentId = 'TODO'
ret.aligned_quality.extend(read.query_qualities)
ret.aligned_sequence = read.query_sequence
if SamFlags.isFlagSet(read.flag, SamFlags.READ_UNMAPPED):
ret.ClearField("alignment")
else:
ret.alignment.CopyFrom(protocol.LinearAlignment())
ret.alignment.mapping_quality = read.mapping_quality
ret.alignment.position.CopyFrom(protocol.Position())
ret.alignment.position.reference_name = samFile.getrname(
read.reference_id)
ret.alignment.position.position = read.reference_start
ret.alignment.position.strand = protocol.POS_STRAND
if SamFlags.isFlagSet(read.flag, SamFlags.READ_REVERSE_STRAND):
ret.alignment.position.strand = protocol.NEG_STRAND
for operation, length in read.cigar:
gaCigarUnit = ret.alignment.cigar.add()
gaCigarUnit.operation = SamCigar.int2ga(operation)
gaCigarUnit.operation_length = length
gaCigarUnit.reference_sequence = "" # TODO fix this!
ret.duplicate_fragment = SamFlags.isFlagSet(
read.flag, SamFlags.DUPLICATE_READ)
ret.failed_vendor_quality_checks = SamFlags.isFlagSet(
read.flag, SamFlags.FAILED_QUALITY_CHECK)
ret.fragment_length = read.template_length
ret.fragment_name = read.query_name
for key, value in read.tags:
# Useful for inspecting the structure of read tags
# print("{key} {ktype}: {value}, {vtype}".format(
# key=key, ktype=type(key), value=value, vtype=type(value)))
protocol.setAttribute(ret.attributes.attr[key].values, value)
if SamFlags.isFlagSet(read.flag, SamFlags.MATE_UNMAPPED):
ret.next_mate_position.Clear()
else:
ret.next_mate_position.Clear()
if read.next_reference_id != -1:
ret.next_mate_position.reference_name = samFile.getrname(
read.next_reference_id)
else:
ret.next_mate_position.reference_name = ""
ret.next_mate_position.position = read.next_reference_start
ret.next_mate_position.strand = protocol.POS_STRAND
if SamFlags.isFlagSet(read.flag, SamFlags.MATE_REVERSE_STRAND):
ret.next_mate_position.strand = protocol.NEG_STRAND
if SamFlags.isFlagSet(read.flag, SamFlags.READ_PAIRED):
ret.number_reads = 2
else:
ret.number_reads = 1
ret.read_number = -1
if SamFlags.isFlagSet(read.flag, SamFlags.FIRST_IN_PAIR):
if SamFlags.isFlagSet(read.flag, SamFlags.SECOND_IN_PAIR):
ret.read_number = 2
else:
ret.read_number = 0
elif SamFlags.isFlagSet(read.flag, SamFlags.SECOND_IN_PAIR):
ret.read_number = 1
ret.improper_placement = not SamFlags.isFlagSet(
read.flag, SamFlags.READ_PROPER_PAIR)
ret.read_group_id = readGroupId
ret.secondary_alignment = SamFlags.isFlagSet(
read.flag, SamFlags.SECONDARY_ALIGNMENT)
ret.supplementary_alignment = SamFlags.isFlagSet(
read.flag, SamFlags.SUPPLEMENTARY_ALIGNMENT)
ret.id = readGroupSet.getReadAlignmentId(ret)
return ret |
Adds the specified ReadGroup to this ReadGroupSet.
def addReadGroup(self, readGroup):
"""
Adds the specified ReadGroup to this ReadGroupSet.
"""
id_ = readGroup.getId()
self._readGroupIdMap[id_] = readGroup
self._readGroupIds.append(id_) |
Returns the ReadGroup with the specified id if it exists in this
ReadGroupSet, or raises a ReadGroupNotFoundException otherwise.
def getReadGroup(self, id_):
"""
Returns the ReadGroup with the specified id if it exists in this
ReadGroupSet, or raises a ReadGroupNotFoundException otherwise.
"""
if id_ not in self._readGroupIdMap:
raise exceptions.ReadGroupNotFoundException(id_)
return self._readGroupIdMap[id_] |
Returns the GA4GH protocol representation of this ReadGroupSet.
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroupSet.
"""
readGroupSet = protocol.ReadGroupSet()
readGroupSet.id = self.getId()
readGroupSet.read_groups.extend(
[readGroup.toProtocolElement()
for readGroup in self.getReadGroups()]
)
readGroupSet.name = self.getLocalId()
readGroupSet.dataset_id = self.getParentContainer().getId()
readGroupSet.stats.CopyFrom(self.getStats())
self.serializeAttributes(readGroupSet)
return readGroupSet |
Returns a string ID suitable for use in the specified GA
ReadAlignment object in this ReadGroupSet.
def getReadAlignmentId(self, gaAlignment):
"""
Returns a string ID suitable for use in the specified GA
ReadAlignment object in this ReadGroupSet.
"""
compoundId = datamodel.ReadAlignmentCompoundId(
self.getCompoundId(), gaAlignment.fragment_name)
return str(compoundId) |
Returns the GA4GH protocol representation of this read group set's
ReadStats.
def getStats(self):
"""
Returns the GA4GH protocol representation of this read group set's
ReadStats.
"""
stats = protocol.ReadStats()
stats.aligned_read_count = self._numAlignedReads
stats.unaligned_read_count = self._numUnalignedReads
return stats |
Returns an iterator over the specified reads
def getReadAlignments(self, reference, start=None, end=None):
"""
Returns an iterator over the specified reads
"""
return self._getReadAlignments(reference, start, end, self, None) |
Populates the instance variables of this ReadGroupSet from the
specified database row.
def populateFromRow(self, readGroupSetRecord):
"""
Populates the instance variables of this ReadGroupSet from the
specified database row.
"""
self._dataUrl = readGroupSetRecord.dataurl
self._indexFile = readGroupSetRecord.indexfile
self._programs = []
for jsonDict in json.loads(readGroupSetRecord.programs):
program = protocol.fromJson(json.dumps(jsonDict),
protocol.Program)
self._programs.append(program)
stats = protocol.fromJson(readGroupSetRecord.stats, protocol.ReadStats)
self._numAlignedReads = stats.aligned_read_count
self._numUnalignedReads = stats.unaligned_read_count |
Populates the instance variables of this ReadGroupSet from the
specified dataUrl and indexFile. If indexFile is not specified
guess usual form.
def populateFromFile(self, dataUrl, indexFile=None):
"""
Populates the instance variables of this ReadGroupSet from the
specified dataUrl and indexFile. If indexFile is not specified
guess usual form.
"""
self._dataUrl = dataUrl
self._indexFile = indexFile
if indexFile is None:
self._indexFile = dataUrl + ".bai"
samFile = self.getFileHandle(self._dataUrl)
self._setHeaderFields(samFile)
if 'RG' not in samFile.header or len(samFile.header['RG']) == 0:
readGroup = HtslibReadGroup(self, self.defaultReadGroupName)
self.addReadGroup(readGroup)
else:
for readGroupHeader in samFile.header['RG']:
readGroup = HtslibReadGroup(self, readGroupHeader['ID'])
readGroup.populateFromHeader(readGroupHeader)
self.addReadGroup(readGroup)
self._bamHeaderReferenceSetName = None
for referenceInfo in samFile.header['SQ']:
if 'AS' not in referenceInfo:
infoDict = parseMalformedBamHeader(referenceInfo)
else:
infoDict = referenceInfo
name = infoDict.get('AS', references.DEFAULT_REFERENCESET_NAME)
if self._bamHeaderReferenceSetName is None:
self._bamHeaderReferenceSetName = name
elif self._bamHeaderReferenceSetName != name:
raise exceptions.MultipleReferenceSetsInReadGroupSet(
self._dataUrl, name, self._bamFileReferenceName)
self._numAlignedReads = samFile.mapped
self._numUnalignedReads = samFile.unmapped |
Returns the GA4GH protocol representation of this ReadGroup.
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReadGroup.
"""
# TODO this is very incomplete, but we don't have the
# implementation to fill out the rest of the fields currently
readGroup = protocol.ReadGroup()
readGroup.id = self.getId()
readGroup.created = self._creationTime
readGroup.updated = self._updateTime
dataset = self.getParentContainer().getParentContainer()
readGroup.dataset_id = dataset.getId()
readGroup.name = self.getLocalId()
readGroup.predicted_insert_size = pb.int(self.getPredictedInsertSize())
referenceSet = self._parentContainer.getReferenceSet()
readGroup.sample_name = pb.string(self.getSampleName())
readGroup.biosample_id = pb.string(self.getBiosampleId())
if referenceSet is not None:
readGroup.reference_set_id = referenceSet.getId()
readGroup.stats.CopyFrom(self.getStats())
readGroup.programs.extend(self.getPrograms())
readGroup.description = pb.string(self.getDescription())
readGroup.experiment.CopyFrom(self.getExperiment())
self.serializeAttributes(readGroup)
return readGroup |
Returns the GA4GH protocol representation of this read group's
ReadStats.
def getStats(self):
"""
Returns the GA4GH protocol representation of this read group's
ReadStats.
"""
stats = protocol.ReadStats()
stats.aligned_read_count = self.getNumAlignedReads()
stats.unaligned_read_count = self.getNumUnalignedReads()
# TODO base_count requires iterating through all reads
return stats |
Returns the GA4GH protocol representation of this read group's
Experiment.
def getExperiment(self):
"""
Returns the GA4GH protocol representation of this read group's
Experiment.
"""
experiment = protocol.Experiment()
experiment.id = self.getExperimentId()
experiment.instrument_model = pb.string(self.getInstrumentModel())
experiment.sequencing_center = pb.string(self.getSequencingCenter())
experiment.description = pb.string(self.getExperimentDescription())
experiment.library = pb.string(self.getLibrary())
experiment.platform_unit = pb.string(self.getPlatformUnit())
experiment.message_create_time = self._iso8601
experiment.message_update_time = self._iso8601
experiment.run_time = pb.string(self.getRunTime())
return experiment |
Populate the instance variables using the specified SAM header.
def populateFromHeader(self, readGroupHeader):
"""
Populate the instance variables using the specified SAM header.
"""
self._sampleName = readGroupHeader.get('SM', None)
self._description = readGroupHeader.get('DS', None)
if 'PI' in readGroupHeader:
self._predictedInsertSize = int(readGroupHeader['PI'])
self._instrumentModel = readGroupHeader.get('PL', None)
self._sequencingCenter = readGroupHeader.get('CN', None)
self._experimentDescription = readGroupHeader.get('DS', None)
self._library = readGroupHeader.get('LB', None)
self._platformUnit = readGroupHeader.get('PU', None)
self._runTime = readGroupHeader.get('DT', None) |
Populate the instance variables using the specified DB row.
def populateFromRow(self, readGroupRecord):
"""
Populate the instance variables using the specified DB row.
"""
self._sampleName = readGroupRecord.samplename
self._biosampleId = readGroupRecord.biosampleid
self._description = readGroupRecord.description
self._predictedInsertSize = readGroupRecord.predictedinsertsize
stats = protocol.fromJson(readGroupRecord.stats, protocol.ReadStats)
self._numAlignedReads = stats.aligned_read_count
self._numUnalignedReads = stats.unaligned_read_count
experiment = protocol.fromJson(
readGroupRecord.experiment, protocol.Experiment)
self._instrumentModel = experiment.instrument_model
self._sequencingCenter = experiment.sequencing_center
self._experimentDescription = experiment.description
self._library = experiment.library
self._platformUnit = experiment.platform_unit
self._runTime = experiment.run_time |
Returns the filename of the specified path without its extensions.
This is usually how we derive the default name for a given object.
def getNameFromPath(filePath):
"""
Returns the filename of the specified path without its extensions.
This is usually how we derive the default name for a given object.
"""
if len(filePath) == 0:
raise ValueError("Cannot have empty path for name")
fileName = os.path.split(os.path.normpath(filePath))[1]
# We need to handle things like .fa.gz, so we can't use
# os.path.splitext
ret = fileName.split(".")[0]
assert ret != ""
return ret |
Exits the repo manager with error status.
def repoExitError(message):
"""
Exits the repo manager with error status.
"""
wrapper = textwrap.TextWrapper(
break_on_hyphens=False, break_long_words=False)
formatted = wrapper.fill("{}: error: {}".format(sys.argv[0], message))
sys.exit(formatted) |
Runs the specified function that updates the repo with the specified
arguments. This method ensures that all updates are transactional,
so that if any part of the update fails no changes are made to the
repo.
def _updateRepo(self, func, *args, **kwargs):
"""
Runs the specified function that updates the repo with the specified
arguments. This method ensures that all updates are transactional,
so that if any part of the update fails no changes are made to the
repo.
"""
# TODO how do we make this properly transactional?
self._repo.open(datarepo.MODE_WRITE)
try:
func(*args, **kwargs)
self._repo.commit()
finally:
self._repo.close() |
Adds a new Ontology to this repo.
def addOntology(self):
"""
Adds a new Ontology to this repo.
"""
self._openRepo()
name = self._args.name
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
if name is None:
name = getNameFromPath(filePath)
ontology = ontologies.Ontology(name)
ontology.populateFromFile(filePath)
self._updateRepo(self._repo.insertOntology, ontology) |
Adds a new dataset into this repo.
def addDataset(self):
"""
Adds a new dataset into this repo.
"""
self._openRepo()
dataset = datasets.Dataset(self._args.datasetName)
dataset.setDescription(self._args.description)
dataset.setAttributes(json.loads(self._args.attributes))
self._updateRepo(self._repo.insertDataset, dataset) |
Adds a new reference set into this repo.
def addReferenceSet(self):
"""
Adds a new reference set into this repo.
"""
self._openRepo()
name = self._args.name
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
if name is None:
name = getNameFromPath(self._args.filePath)
referenceSet = references.HtslibReferenceSet(name)
referenceSet.populateFromFile(filePath)
referenceSet.setDescription(self._args.description)
if self._args.species is not None:
referenceSet.setSpeciesFromJson(self._args.species)
referenceSet.setIsDerived(self._args.isDerived)
referenceSet.setAssemblyId(self._args.assemblyId)
referenceSet.setAttributes(json.loads(self._args.attributes))
sourceAccessions = []
if self._args.sourceAccessions is not None:
sourceAccessions = self._args.sourceAccessions.split(",")
referenceSet.setSourceAccessions(sourceAccessions)
referenceSet.setSourceUri(self._args.sourceUri)
self._updateRepo(self._repo.insertReferenceSet, referenceSet) |
Adds a new ReadGroupSet into this repo.
def addReadGroupSet(self):
"""
Adds a new ReadGroupSet into this repo.
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
dataUrl = self._args.dataFile
indexFile = self._args.indexFile
parsed = urlparse.urlparse(dataUrl)
# TODO, add https support and others when they have been
# tested.
if parsed.scheme in ['http', 'ftp']:
if indexFile is None:
raise exceptions.MissingIndexException(dataUrl)
else:
if indexFile is None:
indexFile = dataUrl + ".bai"
dataUrl = self._getFilePath(self._args.dataFile,
self._args.relativePath)
indexFile = self._getFilePath(indexFile, self._args.relativePath)
name = self._args.name
if self._args.name is None:
name = getNameFromPath(dataUrl)
readGroupSet = reads.HtslibReadGroupSet(dataset, name)
readGroupSet.populateFromFile(dataUrl, indexFile)
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
# Try to find a reference set name from the BAM header.
referenceSetName = readGroupSet.getBamHeaderReferenceSetName()
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
readGroupSet.setReferenceSet(referenceSet)
readGroupSet.setAttributes(json.loads(self._args.attributes))
self._updateRepo(self._repo.insertReadGroupSet, readGroupSet) |
Adds a new VariantSet into this repo.
def addVariantSet(self):
"""
Adds a new VariantSet into this repo.
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
dataUrls = self._args.dataFiles
name = self._args.name
if len(dataUrls) == 1:
if self._args.name is None:
name = getNameFromPath(dataUrls[0])
if os.path.isdir(dataUrls[0]):
# Read in the VCF files from the directory.
# TODO support uncompressed VCF and BCF files
vcfDir = dataUrls[0]
pattern = os.path.join(vcfDir, "*.vcf.gz")
dataUrls = glob.glob(pattern)
if len(dataUrls) == 0:
raise exceptions.RepoManagerException(
"Cannot find any VCF files in the directory "
"'{}'.".format(vcfDir))
dataUrls[0] = self._getFilePath(dataUrls[0],
self._args.relativePath)
elif self._args.name is None:
raise exceptions.RepoManagerException(
"Cannot infer the intended name of the VariantSet when "
"more than one VCF file is provided. Please provide a "
"name argument using --name.")
parsed = urlparse.urlparse(dataUrls[0])
if parsed.scheme not in ['http', 'ftp']:
dataUrls = map(lambda url: self._getFilePath(
url, self._args.relativePath), dataUrls)
# Now, get the index files for the data files that we've now obtained.
indexFiles = self._args.indexFiles
if indexFiles is None:
# First check if all the paths exist locally, as they must
# if we are making a default index path.
for dataUrl in dataUrls:
if not os.path.exists(dataUrl):
raise exceptions.MissingIndexException(
"Cannot find file '{}'. All variant files must be "
"stored locally if the default index location is "
"used. If you are trying to create a VariantSet "
"based on remote URLs, please download the index "
"files to the local file system and provide them "
"with the --indexFiles argument".format(dataUrl))
# We assume that the indexes are made by adding .tbi
indexSuffix = ".tbi"
# TODO support BCF input properly here by adding .csi
indexFiles = [filename + indexSuffix for filename in dataUrls]
indexFiles = map(lambda url: self._getFilePath(
url, self._args.relativePath), indexFiles)
variantSet = variants.HtslibVariantSet(dataset, name)
variantSet.populateFromFile(dataUrls, indexFiles)
# Get the reference set that is associated with the variant set.
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
# Try to find a reference set name from the VCF header.
referenceSetName = variantSet.getVcfHeaderReferenceSetName()
if referenceSetName is None:
raise exceptions.RepoManagerException(
"Cannot infer the ReferenceSet from the VCF header. Please "
"specify the ReferenceSet to associate with this "
"VariantSet using the --referenceSetName option")
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
variantSet.setReferenceSet(referenceSet)
variantSet.setAttributes(json.loads(self._args.attributes))
# Now check for annotations
annotationSets = []
if variantSet.isAnnotated() and self._args.addAnnotationSets:
ontologyName = self._args.ontologyName
if ontologyName is None:
raise exceptions.RepoManagerException(
"A sequence ontology name must be provided")
ontology = self._repo.getOntologyByName(ontologyName)
self._checkSequenceOntology(ontology)
for annotationSet in variantSet.getVariantAnnotationSets():
annotationSet.setOntology(ontology)
annotationSets.append(annotationSet)
# Add the annotation sets and the variant set as an atomic update
def updateRepo():
self._repo.insertVariantSet(variantSet)
for annotationSet in annotationSets:
self._repo.insertVariantAnnotationSet(annotationSet)
self._updateRepo(updateRepo) |
Adds a new phenotype association set to this repo.
def addPhenotypeAssociationSet(self):
"""
Adds a new phenotype association set to this repo.
"""
self._openRepo()
name = self._args.name
if name is None:
name = getNameFromPath(self._args.dirPath)
dataset = self._repo.getDatasetByName(self._args.datasetName)
phenotypeAssociationSet = \
genotype_phenotype.RdfPhenotypeAssociationSet(
dataset, name, self._args.dirPath)
phenotypeAssociationSet.setAttributes(
json.loads(self._args.attributes))
self._updateRepo(
self._repo.insertPhenotypeAssociationSet,
phenotypeAssociationSet) |
Removes a phenotype association set from the repo
def removePhenotypeAssociationSet(self):
"""
Removes a phenotype association set from the repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
phenotypeAssociationSet = dataset.getPhenotypeAssociationSetByName(
self._args.name)
def func():
self._updateRepo(
self._repo.removePhenotypeAssociationSet,
phenotypeAssociationSet)
self._confirmDelete(
"PhenotypeAssociationSet",
phenotypeAssociationSet.getLocalId(),
func) |
Removes a referenceSet from the repo.
def removeReferenceSet(self):
"""
Removes a referenceSet from the repo.
"""
self._openRepo()
referenceSet = self._repo.getReferenceSetByName(
self._args.referenceSetName)
def func():
self._updateRepo(self._repo.removeReferenceSet, referenceSet)
self._confirmDelete("ReferenceSet", referenceSet.getLocalId(), func) |
Removes a readGroupSet from the repo.
def removeReadGroupSet(self):
"""
Removes a readGroupSet from the repo.
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
readGroupSet = dataset.getReadGroupSetByName(
self._args.readGroupSetName)
def func():
self._updateRepo(self._repo.removeReadGroupSet, readGroupSet)
self._confirmDelete("ReadGroupSet", readGroupSet.getLocalId(), func) |
Removes a variantSet from the repo.
def removeVariantSet(self):
"""
Removes a variantSet from the repo.
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
variantSet = dataset.getVariantSetByName(self._args.variantSetName)
def func():
self._updateRepo(self._repo.removeVariantSet, variantSet)
self._confirmDelete("VariantSet", variantSet.getLocalId(), func) |
Removes a dataset from the repo.
def removeDataset(self):
"""
Removes a dataset from the repo.
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
def func():
self._updateRepo(self._repo.removeDataset, dataset)
self._confirmDelete("Dataset", dataset.getLocalId(), func) |
Adds a new feature set into this repo
def addFeatureSet(self):
"""
Adds a new feature set into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
name = getNameFromPath(self._args.filePath)
featureSet = sequence_annotations.Gff3DbFeatureSet(
dataset, name)
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
raise exceptions.RepoManagerException(
"A reference set name must be provided")
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
featureSet.setReferenceSet(referenceSet)
ontologyName = self._args.ontologyName
if ontologyName is None:
raise exceptions.RepoManagerException(
"A sequence ontology name must be provided")
ontology = self._repo.getOntologyByName(ontologyName)
self._checkSequenceOntology(ontology)
featureSet.setOntology(ontology)
featureSet.populateFromFile(filePath)
featureSet.setAttributes(json.loads(self._args.attributes))
self._updateRepo(self._repo.insertFeatureSet, featureSet) |
Removes a feature set from this repo
def removeFeatureSet(self):
"""
Removes a feature set from this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
featureSet = dataset.getFeatureSetByName(self._args.featureSetName)
def func():
self._updateRepo(self._repo.removeFeatureSet, featureSet)
self._confirmDelete("FeatureSet", featureSet.getLocalId(), func) |
Adds a new continuous set into this repo
def addContinuousSet(self):
"""
Adds a new continuous set into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
filePath = self._getFilePath(self._args.filePath,
self._args.relativePath)
name = getNameFromPath(self._args.filePath)
continuousSet = continuous.FileContinuousSet(dataset, name)
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
raise exceptions.RepoManagerException(
"A reference set name must be provided")
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
continuousSet.setReferenceSet(referenceSet)
continuousSet.populateFromFile(filePath)
self._updateRepo(self._repo.insertContinuousSet, continuousSet) |
Removes a continuous set from this repo
def removeContinuousSet(self):
"""
Removes a continuous set from this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
continuousSet = dataset.getContinuousSetByName(
self._args.continuousSetName)
def func():
self._updateRepo(self._repo.removeContinuousSet, continuousSet)
self._confirmDelete("ContinuousSet", continuousSet.getLocalId(), func) |
Adds a new biosample into this repo
def addBiosample(self):
"""
Adds a new biosample into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
biosample = bio_metadata.Biosample(
dataset, self._args.biosampleName)
biosample.populateFromJson(self._args.biosample)
self._updateRepo(self._repo.insertBiosample, biosample) |
Removes a biosample from this repo
def removeBiosample(self):
"""
Removes a biosample from this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
biosample = dataset.getBiosampleByName(self._args.biosampleName)
def func():
self._updateRepo(self._repo.removeBiosample, biosample)
self._confirmDelete("Biosample", biosample.getLocalId(), func) |
Adds a new individual into this repo
def addIndividual(self):
"""
Adds a new individual into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
individual = bio_metadata.Individual(
dataset, self._args.individualName)
individual.populateFromJson(self._args.individual)
self._updateRepo(self._repo.insertIndividual, individual) |
Removes an individual from this repo
def removeIndividual(self):
"""
Removes an individual from this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
individual = dataset.getIndividualByName(self._args.individualName)
def func():
self._updateRepo(self._repo.removeIndividual, individual)
self._confirmDelete("Individual", individual.getLocalId(), func) |
Adds a new peer into this repo
def addPeer(self):
"""
Adds a new peer into this repo
"""
self._openRepo()
try:
peer = peers.Peer(
self._args.url, json.loads(self._args.attributes))
except exceptions.BadUrlException:
raise exceptions.RepoManagerException("The URL for the peer was "
"malformed.")
except ValueError as e:
raise exceptions.RepoManagerException(
"The attributes message "
"was malformed. {}".format(e))
self._updateRepo(self._repo.insertPeer, peer) |
Removes a peer by URL from this repo
def removePeer(self):
"""
Removes a peer by URL from this repo
"""
self._openRepo()
def func():
self._updateRepo(self._repo.removePeer, self._args.url)
self._confirmDelete("Peer", self._args.url, func) |
Removes an ontology from the repo.
def removeOntology(self):
"""
Removes an ontology from the repo.
"""
self._openRepo()
ontology = self._repo.getOntologyByName(self._args.ontologyName)
def func():
self._updateRepo(self._repo.removeOntology, ontology)
self._confirmDelete("Ontology", ontology.getName(), func) |
Adds an rnaQuantification into this repo
def addRnaQuantification(self):
"""
Adds an rnaQuantification into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
biosampleId = ""
if self._args.biosampleName:
biosample = dataset.getBiosampleByName(self._args.biosampleName)
biosampleId = biosample.getId()
if self._args.name is None:
name = getNameFromPath(self._args.quantificationFilePath)
else:
name = self._args.name
# TODO: programs not fully supported by GA4GH yet
programs = ""
featureType = "gene"
if self._args.transcript:
featureType = "transcript"
rnaseq2ga.rnaseq2ga(
self._args.quantificationFilePath, self._args.filePath, name,
self._args.format, dataset=dataset, featureType=featureType,
description=self._args.description, programs=programs,
featureSetNames=self._args.featureSetNames,
readGroupSetNames=self._args.readGroupSetName,
biosampleId=biosampleId) |
Initialize an empty RNA quantification set
def initRnaQuantificationSet(self):
"""
Initialize an empty RNA quantification set
"""
store = rnaseq2ga.RnaSqliteStore(self._args.filePath)
store.createTables() |
Adds an rnaQuantificationSet into this repo
def addRnaQuantificationSet(self):
"""
Adds an rnaQuantificationSet into this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
if self._args.name is None:
name = getNameFromPath(self._args.filePath)
else:
name = self._args.name
rnaQuantificationSet = rna_quantification.SqliteRnaQuantificationSet(
dataset, name)
referenceSetName = self._args.referenceSetName
if referenceSetName is None:
raise exceptions.RepoManagerException(
"A reference set name must be provided")
referenceSet = self._repo.getReferenceSetByName(referenceSetName)
rnaQuantificationSet.setReferenceSet(referenceSet)
rnaQuantificationSet.populateFromFile(self._args.filePath)
rnaQuantificationSet.setAttributes(json.loads(self._args.attributes))
self._updateRepo(
self._repo.insertRnaQuantificationSet, rnaQuantificationSet) |
Removes an rnaQuantificationSet from this repo
def removeRnaQuantificationSet(self):
"""
Removes an rnaQuantificationSet from this repo
"""
self._openRepo()
dataset = self._repo.getDatasetByName(self._args.datasetName)
rnaQuantSet = dataset.getRnaQuantificationSetByName(
self._args.rnaQuantificationSetName)
def func():
self._updateRepo(self._repo.removeRnaQuantificationSet,
rnaQuantSet)
self._confirmDelete(
"RnaQuantificationSet", rnaQuantSet.getLocalId(), func) |
Reads RNA Quantification data in one of several formats and stores the data
in a sqlite database for use by the GA4GH reference server.
Supports the following quantification output types:
Cufflinks, kallisto, RSEM.
def rnaseq2ga(quantificationFilename, sqlFilename, localName, rnaType,
dataset=None, featureType="gene",
description="", programs="", featureSetNames="",
readGroupSetNames="", biosampleId=""):
"""
Reads RNA Quantification data in one of several formats and stores the data
in a sqlite database for use by the GA4GH reference server.
Supports the following quantification output types:
Cufflinks, kallisto, RSEM.
"""
readGroupSetName = ""
if readGroupSetNames:
readGroupSetName = readGroupSetNames.strip().split(",")[0]
featureSetIds = ""
readGroupIds = ""
if dataset:
featureSetIdList = []
if featureSetNames:
for annotationName in featureSetNames.split(","):
featureSet = dataset.getFeatureSetByName(annotationName)
featureSetIdList.append(featureSet.getId())
featureSetIds = ",".join(featureSetIdList)
# TODO: multiple readGroupSets
if readGroupSetName:
readGroupSet = dataset.getReadGroupSetByName(readGroupSetName)
readGroupIds = ",".join(
[x.getId() for x in readGroupSet.getReadGroups()])
if rnaType not in SUPPORTED_RNA_INPUT_FORMATS:
raise exceptions.UnsupportedFormatException(rnaType)
rnaDB = RnaSqliteStore(sqlFilename)
if rnaType == "cufflinks":
writer = CufflinksWriter(rnaDB, featureType, dataset=dataset)
elif rnaType == "kallisto":
writer = KallistoWriter(rnaDB, featureType, dataset=dataset)
elif rnaType == "rsem":
writer = RsemWriter(rnaDB, featureType, dataset=dataset)
writeRnaseqTable(rnaDB, [localName], description, featureSetIds,
readGroupId=readGroupIds, programs=programs,
biosampleId=biosampleId)
writeExpressionTable(writer, [(localName, quantificationFilename)]) |
Adds an RNAQuantification to the db. Datafields is a tuple in the
order:
id, feature_set_ids, description, name,
read_group_ids, programs, biosample_id
def addRNAQuantification(self, datafields):
"""
Adds an RNAQuantification to the db. Datafields is a tuple in the
order:
id, feature_set_ids, description, name,
read_group_ids, programs, biosample_id
"""
self._rnaValueList.append(datafields)
if len(self._rnaValueList) >= self._batchSize:
self.batchaddRNAQuantification() |
Adds an Expression to the db. Datafields is a tuple in the order:
id, rna_quantification_id, name, expression,
is_normalized, raw_read_count, score, units, conf_low, conf_hi
def addExpression(self, datafields):
"""
Adds an Expression to the db. Datafields is a tuple in the order:
id, rna_quantification_id, name, expression,
is_normalized, raw_read_count, score, units, conf_low, conf_hi
"""
self._expressionValueList.append(datafields)
if len(self._expressionValueList) >= self._batchSize:
self.batchAddExpression() |
Index columns that are queried. The expression index can
take a long time.
def createIndices(self):
"""
Index columns that are queried. The expression index can
take a long time.
"""
sql = '''CREATE INDEX name_index
ON Expression (name)'''
self._cursor.execute(sql)
self._dbConn.commit()
sql = '''CREATE INDEX expression_index
ON Expression (expression)'''
self._cursor.execute(sql)
self._dbConn.commit() |
Reads the quantification results file and adds entries to the
specified database.
def writeExpression(self, rnaQuantificationId, quantfilename):
"""
Reads the quantification results file and adds entries to the
specified database.
"""
isNormalized = self._isNormalized
units = self._units
with open(quantfilename, "r") as quantFile:
quantificationReader = csv.reader(quantFile, delimiter=b"\t")
header = next(quantificationReader)
expressionLevelColNum = self.setColNum(
header, self._expressionLevelCol)
nameColNum = self.setColNum(header, self._nameCol)
countColNum = self.setColNum(header, self._countCol, -1)
confColLowNum = self.setColNum(header, self._confColLow, -1)
confColHiNum = self.setColNum(header, self._confColHi, -1)
expressionId = 0
for expression in quantificationReader:
expressionLevel = expression[expressionLevelColNum]
name = expression[nameColNum]
rawCount = 0.0
if countColNum != -1:
rawCount = expression[countColNum]
confidenceLow = 0.0
confidenceHi = 0.0
score = 0.0
if confColLowNum != -1 and confColHiNum != -1:
confidenceLow = float(expression[confColLowNum])
confidenceHi = float(expression[confColHiNum])
score = (confidenceLow + confidenceHi)/2
datafields = (expressionId, rnaQuantificationId, name,
expressionLevel, isNormalized, rawCount, score,
units, confidenceLow, confidenceHi)
self._db.addExpression(datafields)
expressionId += 1
self._db.batchAddExpression() |
Fetch sequences from NCBI using the eself interface.
An interbase interval may be optionally provided with startIndex and
endIndex. NCBI eself will return just the requested subsequence, which
might greatly reduce payload sizes (especially with chromosome-scale
sequences). When wrapped is True, return list of sequence lines rather
than concatenated sequence.
>>> len(_fetchSequence('NP_056374.2'))
1596
Pass the desired interval rather than using Python's [] slice
operator.
>>> _fetchSequence('NP_056374.2',0,10)
'MESRETLSSS'
>>> _fetchSequence('NP_056374.2')[0:10]
'MESRETLSSS'
def _fetchSequence(ac, startIndex=None, endIndex=None):
"""Fetch sequences from NCBI using the eself interface.
An interbase interval may be optionally provided with startIndex and
endIndex. NCBI eself will return just the requested subsequence, which
might greatly reduce payload sizes (especially with chromosome-scale
sequences). When wrapped is True, return list of sequence lines rather
than concatenated sequence.
>>> len(_fetchSequence('NP_056374.2'))
1596
Pass the desired interval rather than using Python's [] slice
operator.
>>> _fetchSequence('NP_056374.2',0,10)
'MESRETLSSS'
>>> _fetchSequence('NP_056374.2')[0:10]
'MESRETLSSS'
"""
urlFmt = (
"http://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?"
"db=nucleotide&id={ac}&rettype=fasta&retmode=text")
if startIndex is None or endIndex is None:
url = urlFmt.format(ac=ac)
else:
urlFmt += "&seq_start={start}&seq_stop={stop}"
url = urlFmt.format(ac=ac, start=startIndex + 1, stop=endIndex)
resp = requests.get(url)
resp.raise_for_status()
seqlines = resp.content.splitlines()[1:]
print("{ac}[{s},{e}) => {n} lines ({u})".format(
ac=ac, s=startIndex, e=endIndex, n=len(seqlines), u=url))
# return response as list of lines, already line wrapped
return seqlines |
Creates a new bam header based on the specified header from the
parent BAM file.
def createBamHeader(self, baseHeader):
"""
Creates a new bam header based on the specified header from the
parent BAM file.
"""
header = dict(baseHeader)
newSequences = []
for index, referenceInfo in enumerate(header['SQ']):
if index < self.numChromosomes:
referenceName = referenceInfo['SN']
# The sequence dictionary in the BAM file has to match up
# with the sequence ids in the data, so we must be sure
# that these still match up.
assert referenceName == self.chromosomes[index]
newReferenceInfo = {
'AS': self.referenceSetName,
'SN': referenceName,
'LN': 0, # FIXME
'UR': 'http://example.com',
'M5': 'dbb6e8ece0b5de29da56601613007c2a', # FIXME
'SP': 'Human'
}
newSequences.append(newReferenceInfo)
header['SQ'] = newSequences
return header |
Creates the repository for all the data we've just downloaded.
def createRepo(self):
"""
Creates the repository for all the data we've just downloaded.
"""
repo = datarepo.SqlDataRepository(self.repoPath)
repo.open("w")
repo.initialise()
referenceSet = references.HtslibReferenceSet("GRCh37-subset")
referenceSet.populateFromFile(self.fastaFilePath)
referenceSet.setDescription("Subset of GRCh37 used for demonstration")
referenceSet.setSpeciesFromJson(
'{"id": "9606",'
+ '"term": "Homo sapiens", "source_name": "NCBI"}')
for reference in referenceSet.getReferences():
reference.setSpeciesFromJson(
'{"id": "9606",'
+ '"term": "Homo sapiens", "source_name": "NCBI"}')
reference.setSourceAccessions(
self.accessions[reference.getName()] + ".subset")
repo.insertReferenceSet(referenceSet)
dataset = datasets.Dataset("1kg-p3-subset")
dataset.setDescription("Sample data from 1000 Genomes phase 3")
repo.insertDataset(dataset)
variantSet = variants.HtslibVariantSet(dataset, "mvncall")
variantSet.setReferenceSet(referenceSet)
dataUrls = [vcfFile for vcfFile, _ in self.vcfFilePaths]
indexFiles = [indexFile for _, indexFile in self.vcfFilePaths]
variantSet.populateFromFile(dataUrls, indexFiles)
variantSet.checkConsistency()
repo.insertVariantSet(variantSet)
for sample, (bamFile, indexFile) in zip(
self.samples, self.bamFilePaths):
readGroupSet = reads.HtslibReadGroupSet(dataset, sample)
readGroupSet.populateFromFile(bamFile, indexFile)
readGroupSet.setReferenceSet(referenceSet)
repo.insertReadGroupSet(readGroupSet)
repo.commit()
repo.close()
self.log("Finished creating the repository; summary:\n")
repo.open("r")
repo.printSummary() |
A helper function used just to help modularize the code a bit.
def _configure_backend(app):
"""A helper function used just to help modularize the code a bit."""
# Allocate the backend
# We use URLs to specify the backend. Currently we have file:// URLs (or
# URLs with no scheme) for the SqlDataRepository, and special empty:// and
# simulated:// URLs for empty or simulated data sources.
dataSource = urlparse.urlparse(app.config["DATA_SOURCE"], "file")
if dataSource.scheme == "simulated":
# Ignore the query string
randomSeed = app.config["SIMULATED_BACKEND_RANDOM_SEED"]
numCalls = app.config["SIMULATED_BACKEND_NUM_CALLS"]
variantDensity = app.config["SIMULATED_BACKEND_VARIANT_DENSITY"]
numVariantSets = app.config["SIMULATED_BACKEND_NUM_VARIANT_SETS"]
numReferenceSets = app.config[
"SIMULATED_BACKEND_NUM_REFERENCE_SETS"]
numReferencesPerReferenceSet = app.config[
"SIMULATED_BACKEND_NUM_REFERENCES_PER_REFERENCE_SET"]
numAlignmentsPerReadGroup = app.config[
"SIMULATED_BACKEND_NUM_ALIGNMENTS_PER_READ_GROUP"]
numReadGroupsPerReadGroupSet = app.config[
"SIMULATED_BACKEND_NUM_READ_GROUPS_PER_READ_GROUP_SET"]
numPhenotypeAssociations = app.config[
"SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATIONS"]
numPhenotypeAssociationSets = app.config[
"SIMULATED_BACKEND_NUM_PHENOTYPE_ASSOCIATION_SETS"]
numRnaQuantSets = app.config[
"SIMULATED_BACKEND_NUM_RNA_QUANTIFICATION_SETS"]
numExpressionLevels = app.config[
"SIMULATED_BACKEND_NUM_EXPRESSION_LEVELS_PER_RNA_QUANT_SET"]
dataRepository = datarepo.SimulatedDataRepository(
randomSeed=randomSeed, numCalls=numCalls,
variantDensity=variantDensity, numVariantSets=numVariantSets,
numReferenceSets=numReferenceSets,
numReferencesPerReferenceSet=numReferencesPerReferenceSet,
numReadGroupsPerReadGroupSet=numReadGroupsPerReadGroupSet,
numAlignments=numAlignmentsPerReadGroup,
numPhenotypeAssociations=numPhenotypeAssociations,
numPhenotypeAssociationSets=numPhenotypeAssociationSets,
numRnaQuantSets=numRnaQuantSets,
numExpressionLevels=numExpressionLevels)
elif dataSource.scheme == "empty":
dataRepository = datarepo.EmptyDataRepository()
elif dataSource.scheme == "file":
path = os.path.join(dataSource.netloc, dataSource.path)
dataRepository = datarepo.SqlDataRepository(path)
dataRepository.open(datarepo.MODE_READ)
else:
raise exceptions.ConfigurationException(
"Unsupported data source scheme: " + dataSource.scheme)
theBackend = backend.Backend(dataRepository)
theBackend.setRequestValidation(app.config["REQUEST_VALIDATION"])
theBackend.setDefaultPageSize(app.config["DEFAULT_PAGE_SIZE"])
theBackend.setMaxResponseLength(app.config["MAX_RESPONSE_LENGTH"])
return theBackend |
TODO Document this critical function! What does it do? What does
it assume?
def configure(configFile=None, baseConfig="ProductionConfig",
port=8000, extraConfig={}):
"""
TODO Document this critical function! What does it do? What does
it assume?
"""
file_handler = StreamHandler()
file_handler.setLevel(logging.WARNING)
app.logger.addHandler(file_handler)
configStr = 'ga4gh.server.serverconfig:{0}'.format(baseConfig)
app.config.from_object(configStr)
if os.environ.get('GA4GH_CONFIGURATION') is not None:
app.config.from_envvar('GA4GH_CONFIGURATION')
if configFile is not None:
app.config.from_pyfile(configFile)
app.config.update(extraConfig.items())
# Setup file handle cache max size
datamodel.fileHandleCache.setMaxCacheSize(
app.config["FILE_HANDLE_CACHE_MAX_SIZE"])
# Setup CORS
try:
cors.CORS(app, allow_headers='Content-Type')
except AssertionError:
pass
app.serverStatus = ServerStatus()
app.backend = _configure_backend(app)
if app.config.get('SECRET_KEY'):
app.secret_key = app.config['SECRET_KEY']
elif app.config.get('OIDC_PROVIDER'):
raise exceptions.ConfigurationException(
'OIDC configuration requires a secret key')
if app.config.get('CACHE_DIRECTORY'):
app.cache_dir = app.config['CACHE_DIRECTORY']
else:
app.cache_dir = '/tmp/ga4gh'
app.cache = FileSystemCache(
app.cache_dir, threshold=5000, default_timeout=600, mode=384)
# Peer service initialization
network.initialize(
app.config.get('INITIAL_PEERS'),
app.backend.getDataRepository(),
app.logger)
app.oidcClient = None
app.myPort = port
if app.config.get('AUTH0_ENABLED'):
emails = app.config.get('AUTH0_AUTHORIZED_EMAILS', '').split(',')
[auth.authorize_email(e, app.cache) for e in emails]
if "OIDC_PROVIDER" in app.config:
# The oic client. If we're testing, we don't want to verify
# SSL certificates
app.oidcClient = oic.oic.Client(
verify_ssl=('TESTING' not in app.config))
try:
app.oidcClient.provider_config(app.config['OIDC_PROVIDER'])
except requests.exceptions.ConnectionError:
configResponse = message.ProviderConfigurationResponse(
issuer=app.config['OIDC_PROVIDER'],
authorization_endpoint=app.config['OIDC_AUTHZ_ENDPOINT'],
token_endpoint=app.config['OIDC_TOKEN_ENDPOINT'],
revocation_endpoint=app.config['OIDC_TOKEN_REV_ENDPOINT'])
app.oidcClient.handle_provider_config(configResponse,
app.config['OIDC_PROVIDER'])
# The redirect URI comes from the configuration.
# If we are testing, then we allow the automatic creation of a
# redirect uri if none is configured
redirectUri = app.config.get('OIDC_REDIRECT_URI')
if redirectUri is None and app.config.get('TESTING'):
redirectUri = 'https://{0}:{1}/oauth2callback'.format(
socket.gethostname(), app.myPort)
app.oidcClient.redirect_uris = [redirectUri]
if redirectUri is []:
raise exceptions.ConfigurationException(
'OIDC configuration requires a redirect uri')
# We only support dynamic registration while testing.
if ('registration_endpoint' in app.oidcClient.provider_info and
app.config.get('TESTING')):
app.oidcClient.register(
app.oidcClient.provider_info["registration_endpoint"],
redirect_uris=[redirectUri])
else:
response = message.RegistrationResponse(
client_id=app.config['OIDC_CLIENT_ID'],
client_secret=app.config['OIDC_CLIENT_SECRET'],
redirect_uris=[redirectUri],
verify_ssl=False)
app.oidcClient.store_registration_info(response) |
Returns a Flask response object for the specified data and HTTP status.
def getFlaskResponse(responseString, httpStatus=200):
"""
Returns a Flask response object for the specified data and HTTP status.
"""
return flask.Response(responseString, status=httpStatus, mimetype=MIMETYPE) |
Handles the specified HTTP POST request, which maps to the specified
protocol handler endpoint and protocol request class.
def handleHttpPost(request, endpoint):
"""
Handles the specified HTTP POST request, which maps to the specified
protocol handler endpoint and protocol request class.
"""
if request.mimetype and request.mimetype != MIMETYPE:
raise exceptions.UnsupportedMediaTypeException()
request = request.get_data()
if request == '' or request is None:
request = '{}'
responseStr = endpoint(request)
return getFlaskResponse(responseStr) |
Handles an exception that occurs somewhere in the process of handling
a request.
def handleException(exception):
"""
Handles an exception that occurs somewhere in the process of handling
a request.
"""
serverException = exception
if not isinstance(exception, exceptions.BaseServerException):
with app.test_request_context():
app.log_exception(exception)
serverException = exceptions.getServerError(exception)
error = serverException.toProtocolElement()
# If the exception is being viewed by a web browser, we can render a nicer
# view.
if flask.request and 'Accept' in flask.request.headers and \
flask.request.headers['Accept'].find('text/html') != -1:
message = "<h1>Error {}</h1><pre>{}</pre>".format(
serverException.httpStatus,
protocol.toJson(error))
if serverException.httpStatus == 401 \
or serverException.httpStatus == 403:
message += "Please try <a href=\"/login\">logging in</a>."
return message
else:
responseStr = protocol.toJson(error)
return getFlaskResponse(responseStr, serverException.httpStatus) |
If we are not logged in, this generates the redirect URL to the OIDC
provider and returns the redirect response
:return: A redirect response to the OIDC provider
def startLogin():
"""
If we are not logged in, this generates the redirect URL to the OIDC
provider and returns the redirect response
:return: A redirect response to the OIDC provider
"""
flask.session["state"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH)
flask.session["nonce"] = oic.oauth2.rndstr(SECRET_KEY_LENGTH)
args = {
"client_id": app.oidcClient.client_id,
"response_type": "code",
"scope": ["openid", "profile"],
"nonce": flask.session["nonce"],
"redirect_uri": app.oidcClient.redirect_uris[0],
"state": flask.session["state"]
}
result = app.oidcClient.do_authorization_request(
request_args=args, state=flask.session["state"])
return flask.redirect(result.url) |
The request will have a parameter 'key' if it came from the command line
client, or have a session key of 'key' if it's the browser.
If the token is not found, start the login process.
If there is no oidcClient, we are running naked and we don't check.
If we're being redirected to the oidcCallback we don't check.
:returns None if all is ok (and the request handler continues as usual).
Otherwise if the key was in the session (therefore we're in a browser)
then startLogin() will redirect to the OIDC provider. If the key was in
the request arguments, we're using the command line and just raise an
exception.
def checkAuthentication():
"""
The request will have a parameter 'key' if it came from the command line
client, or have a session key of 'key' if it's the browser.
If the token is not found, start the login process.
If there is no oidcClient, we are running naked and we don't check.
If we're being redirected to the oidcCallback we don't check.
:returns None if all is ok (and the request handler continues as usual).
Otherwise if the key was in the session (therefore we're in a browser)
then startLogin() will redirect to the OIDC provider. If the key was in
the request arguments, we're using the command line and just raise an
exception.
"""
if app.oidcClient is None:
return
if flask.request.endpoint == 'oidcCallback':
return
key = flask.session.get('key') or flask.request.args.get('key')
if key is None or not app.cache.get(key):
if 'key' in flask.request.args:
raise exceptions.NotAuthenticatedException()
else:
return startLogin() |
Handles the specified flask request for one of the GET URLs
Invokes the specified endpoint to generate a response.
def handleFlaskGetRequest(id_, flaskRequest, endpoint):
"""
Handles the specified flask request for one of the GET URLs
Invokes the specified endpoint to generate a response.
"""
if flaskRequest.method == "GET":
return handleHttpGet(id_, endpoint)
else:
raise exceptions.MethodNotAllowedException() |
Handles the specified flask request for one of the POST URLS
Invokes the specified endpoint to generate a response.
def handleFlaskPostRequest(flaskRequest, endpoint):
"""
Handles the specified flask request for one of the POST URLS
Invokes the specified endpoint to generate a response.
"""
if flaskRequest.method == "POST":
return handleHttpPost(flaskRequest, endpoint)
elif flaskRequest.method == "OPTIONS":
return handleHttpOptions()
else:
raise exceptions.MethodNotAllowedException() |
Returns the list of ReferenceSets for this server.
def getVariantAnnotationSets(self, datasetId):
"""
Returns the list of ReferenceSets for this server.
"""
# TODO this should be displayed per-variant set, not per dataset.
variantAnnotationSets = []
dataset = app.backend.getDataRepository().getDataset(datasetId)
for variantSet in dataset.getVariantSets():
variantAnnotationSets.extend(
variantSet.getVariantAnnotationSets())
return variantAnnotationSets |
This decorator wraps a view function so that it is protected when Auth0
is enabled. This means that any request will be expected to have a signed
token in the authorization header if the `AUTH0_ENABLED` configuration
setting is True.
The authorization header will have the form:
"authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9....."
If a request is not properly signed, an attempt is made to provide the
client with useful error messages. This means that if a request is not
authorized the underlying view function will not be executed.
When `AUTH0_ENABLED` is false, this decorator will simply execute the
decorated view without observing the authorization header.
:param app:
:return: Flask view decorator
def auth_decorator(app=None):
"""
This decorator wraps a view function so that it is protected when Auth0
is enabled. This means that any request will be expected to have a signed
token in the authorization header if the `AUTH0_ENABLED` configuration
setting is True.
The authorization header will have the form:
"authorization: Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9....."
If a request is not properly signed, an attempt is made to provide the
client with useful error messages. This means that if a request is not
authorized the underlying view function will not be executed.
When `AUTH0_ENABLED` is false, this decorator will simply execute the
decorated view without observing the authorization header.
:param app:
:return: Flask view decorator
"""
def requires_auth(f):
@functools.wraps(f)
def decorated(*args, **kwargs):
# This decorator will only apply with AUTH0_ENABLED set to True.
if app.config.get('AUTH0_ENABLED', False):
client_id = app.config.get("AUTH0_CLIENT_ID")
client_secret = app.config.get("AUTH0_CLIENT_SECRET")
auth_header = flask.request.headers.get('Authorization', None)
# Each of these functions will throw a 401 is there is a
# problem decoding the token with some helpful error message.
if auth_header:
token, profile = decode_header(
auth_header, client_id, client_secret)
else:
raise exceptions.NotAuthorizedException()
# We store the token in the session so that later
# stages can use it to connect identity and authorization.
flask.session['auth0_key'] = token
# Now we need to make sure that on top of having a good token
# They are authorized, and if not provide an error message
is_authorized(app.cache, profile['email'])
is_active(app.cache, token)
return f(*args, **kwargs)
return decorated
return requires_auth |
A function that threads the header through decoding and returns a tuple
of the token and payload if successful. This does not fully authenticate
a request.
:param auth_header:
:param client_id:
:param client_secret:
:return: (token, profile)
def decode_header(auth_header, client_id, client_secret):
"""
A function that threads the header through decoding and returns a tuple
of the token and payload if successful. This does not fully authenticate
a request.
:param auth_header:
:param client_id:
:param client_secret:
:return: (token, profile)
"""
return _decode_header(
_well_formed(
_has_token(_has_bearer(_has_header(auth_header)))),
client_id, client_secret) |
Logs out the current session by removing it from the cache. This is
expected to only occur when a session has
def logout(cache):
"""
Logs out the current session by removing it from the cache. This is
expected to only occur when a session has
"""
cache.set(flask.session['auth0_key'], None)
flask.session.clear()
return True |
This function will generate a view function that can be used to handle
the return from Auth0. The "callback" is a redirected session from auth0
that includes the token we can use to authenticate that session.
If the session is properly authenticated Auth0 will provide a code so our
application can identify the session. Once this has been done we ask
for more information about the identified session from Auth0. We then use
the email of the user logged in to Auth0 to authorize their token to make
further requests by adding it to the application's cache.
It sets a value in the cache that sets the current session as logged in. We
can then refer to this id_token to later authenticate a session.
:param domain:
:param client_id:
:param client_secret:
:param redirect_uri:
:return : View function
def callback_maker(
cache=None, domain='', client_id='',
client_secret='', redirect_uri=''):
"""
This function will generate a view function that can be used to handle
the return from Auth0. The "callback" is a redirected session from auth0
that includes the token we can use to authenticate that session.
If the session is properly authenticated Auth0 will provide a code so our
application can identify the session. Once this has been done we ask
for more information about the identified session from Auth0. We then use
the email of the user logged in to Auth0 to authorize their token to make
further requests by adding it to the application's cache.
It sets a value in the cache that sets the current session as logged in. We
can then refer to this id_token to later authenticate a session.
:param domain:
:param client_id:
:param client_secret:
:param redirect_uri:
:return : View function
"""
def callback_handling():
code = flask.request.args.get('code')
if code is None:
raise exceptions.NotAuthorizedException(
'The callback expects a well '
'formatted code, {} was provided'.format(code))
json_header = {'content-type': 'application/json'}
# Get auth token
token_url = "https://{domain}/oauth/token".format(domain=domain)
token_payload = {
'client_id': client_id,
'client_secret': client_secret,
'redirect_uri': redirect_uri,
'code': code,
'grant_type': 'authorization_code'}
try:
token_info = requests.post(
token_url,
data=json.dumps(token_payload),
headers=json_header).json()
id_token = token_info['id_token']
access_token = token_info['access_token']
except Exception as e:
raise exceptions.NotAuthorizedException(
'The callback from Auth0 did not'
'include the expected tokens: \n'
'{}'.format(e.message))
# Get profile information
try:
user_url = \
"https://{domain}/userinfo?access_token={access_token}".format(
domain=domain, access_token=access_token)
user_info = requests.get(user_url).json()
email = user_info['email']
except Exception as e:
raise exceptions.NotAuthorizedException(
'The user profile from Auth0 did '
'not contain the expected data: \n {}'.format(e.message))
# Log token in
user = cache.get(email)
if user and user['authorized']:
cache.set(id_token, user_info)
return flask.redirect('/login?code={}'.format(id_token))
else:
return flask.redirect('/login')
return callback_handling |
This function will generate a view function that can be used to handle
the return from Auth0. The "callback" is a redirected session from auth0
that includes the token we can use to authenticate that session.
If the session is properly authenticated Auth0 will provide a code so our
application can identify the session. Once this has been done we ask
for more information about the identified session from Auth0. We then use
the email of the user logged in to Auth0 to authorize their token to make
further requests by adding it to the application's cache.
It sets a value in the cache that sets the current session as logged in. We
can then refer to this id_token to later authenticate a session.
:param app:
:param scopes:
:param redirect_uri:
:param domain:
:param client_id:
:return : Rendered login template
def render_login(
app=None, scopes='', redirect_uri='', domain='', client_id=''):
"""
This function will generate a view function that can be used to handle
the return from Auth0. The "callback" is a redirected session from auth0
that includes the token we can use to authenticate that session.
If the session is properly authenticated Auth0 will provide a code so our
application can identify the session. Once this has been done we ask
for more information about the identified session from Auth0. We then use
the email of the user logged in to Auth0 to authorize their token to make
further requests by adding it to the application's cache.
It sets a value in the cache that sets the current session as logged in. We
can then refer to this id_token to later authenticate a session.
:param app:
:param scopes:
:param redirect_uri:
:param domain:
:param client_id:
:return : Rendered login template
"""
return app.jinja_env.from_string(LOGIN_HTML).render(
scopes=scopes,
redirect_uri=redirect_uri,
domain=domain,
client_id=client_id) |
Renders a view from the app and a key that lets the current session grab
its token.
:param app:
:param key:
:return: Rendered view
def render_key(app, key=""):
"""
Renders a view from the app and a key that lets the current session grab
its token.
:param app:
:param key:
:return: Rendered view
"""
return app.jinja_env.from_string(KEY_HTML).render(
key=key) |
Takes the header and tries to return an active token and decoded
payload.
:param auth_header:
:param client_id:
:param client_secret:
:return: (token, profile)
def _decode_header(auth_header, client_id, client_secret):
"""
Takes the header and tries to return an active token and decoded
payload.
:param auth_header:
:param client_id:
:param client_secret:
:return: (token, profile)
"""
try:
token = auth_header.split()[1]
payload = jwt.decode(
token,
client_secret,
audience=client_id)
except jwt.ExpiredSignature:
raise exceptions.NotAuthorizedException(
'Token has expired, please log in again.')
# is valid client
except jwt.InvalidAudienceError:
message = 'Incorrect audience, expected: {}'.format(
client_id)
raise exceptions.NotAuthorizedException(message)
# is valid token
except jwt.DecodeError:
raise exceptions.NotAuthorizedException(
'Token signature could not be validated.')
except Exception as e:
raise exceptions.NotAuthorizedException(
'Token signature was malformed. {}'.format(e.message))
return token, payload |
Accepts the cache and ID token and checks to see if the profile is
currently logged in. If so, return the token, otherwise throw a
NotAuthenticatedException.
:param cache:
:param token:
:return:
def is_active(cache, token):
"""
Accepts the cache and ID token and checks to see if the profile is
currently logged in. If so, return the token, otherwise throw a
NotAuthenticatedException.
:param cache:
:param token:
:return:
"""
profile = cache.get(token)
if not profile:
raise exceptions.NotAuthenticatedException(
'The token is good, but you are not logged in. Please '
'try logging in again.')
return profile |
Adds the specified reference to this ReferenceSet.
def addReference(self, reference):
"""
Adds the specified reference to this ReferenceSet.
"""
id_ = reference.getId()
self._referenceIdMap[id_] = reference
self._referenceNameMap[reference.getLocalId()] = reference
self._referenceIds.append(id_) |
Sets the species, an OntologyTerm, to the specified value, given as
a JSON string.
See the documentation for details of this field.
def setSpeciesFromJson(self, speciesJson):
"""
Sets the species, an OntologyTerm, to the specified value, given as
a JSON string.
See the documentation for details of this field.
"""
try:
parsed = protocol.fromJson(speciesJson, protocol.OntologyTerm)
except:
raise exceptions.InvalidJsonException(speciesJson)
self._species = protocol.toJsonDict(parsed) |
Returns the reference with the specified name.
def getReferenceByName(self, name):
"""
Returns the reference with the specified name.
"""
if name not in self._referenceNameMap:
raise exceptions.ReferenceNameNotFoundException(name)
return self._referenceNameMap[name] |
Returns the Reference with the specified ID or raises a
ReferenceNotFoundException if it does not exist.
def getReference(self, id_):
"""
Returns the Reference with the specified ID or raises a
ReferenceNotFoundException if it does not exist.
"""
if id_ not in self._referenceIdMap:
raise exceptions.ReferenceNotFoundException(id_)
return self._referenceIdMap[id_] |
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
def getMd5Checksum(self):
"""
Returns the MD5 checksum for this reference set. This checksum is
calculated by making a list of `Reference.md5checksum` for all
`Reference`s in this set. We then sort this list, and take the
MD5 hash of all the strings concatenated together.
"""
references = sorted(
self.getReferences(),
key=lambda ref: ref.getMd5Checksum())
checksums = ''.join([ref.getMd5Checksum() for ref in references])
md5checksum = hashlib.md5(checksums).hexdigest()
return md5checksum |
Returns the GA4GH protocol representation of this ReferenceSet.
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this ReferenceSet.
"""
ret = protocol.ReferenceSet()
ret.assembly_id = pb.string(self.getAssemblyId())
ret.description = pb.string(self.getDescription())
ret.id = self.getId()
ret.is_derived = self.getIsDerived()
ret.md5checksum = self.getMd5Checksum()
if self.getSpecies():
term = protocol.fromJson(
json.dumps(self.getSpecies()), protocol.OntologyTerm)
ret.species.term_id = term.term_id
ret.species.term = term.term
ret.source_accessions.extend(self.getSourceAccessions())
ret.source_uri = pb.string(self.getSourceUri())
ret.name = self.getLocalId()
self.serializeAttributes(ret)
return ret |
Returns the GA4GH protocol representation of this Reference.
def toProtocolElement(self):
"""
Returns the GA4GH protocol representation of this Reference.
"""
reference = protocol.Reference()
reference.id = self.getId()
reference.is_derived = self.getIsDerived()
reference.length = self.getLength()
reference.md5checksum = self.getMd5Checksum()
reference.name = self.getName()
if self.getSpecies():
term = protocol.fromJson(
json.dumps(self.getSpecies()), protocol.OntologyTerm)
reference.species.term_id = term.term_id
reference.species.term = term.term
reference.source_accessions.extend(self.getSourceAccessions())
reference.source_divergence = pb.int(self.getSourceDivergence())
reference.source_uri = self.getSourceUri()
self.serializeAttributes(reference)
return reference |
Checks to ensure that the query range is valid within this reference.
If not, raise ReferenceRangeErrorException.
def checkQueryRange(self, start, end):
"""
Checks to ensure that the query range is valid within this reference.
If not, raise ReferenceRangeErrorException.
"""
condition = (
(start < 0 or end > self.getLength()) or
start > end or start == end)
if condition:
raise exceptions.ReferenceRangeErrorException(
self.getId(), start, end) |
Populates the instance variables of this ReferencSet from the
data URL.
def populateFromFile(self, dataUrl):
"""
Populates the instance variables of this ReferencSet from the
data URL.
"""
self._dataUrl = dataUrl
fastaFile = self.getFastaFile()
for referenceName in fastaFile.references:
reference = HtslibReference(self, referenceName)
# TODO break this up into chunks and calculate the MD5
# in bits (say, 64K chunks?)
bases = fastaFile.fetch(referenceName)
md5checksum = hashlib.md5(bases).hexdigest()
reference.setMd5checksum(md5checksum)
reference.setLength(len(bases))
self.addReference(reference) |
Populates this reference set from the values in the specified DB
row.
def populateFromRow(self, referenceSetRecord):
"""
Populates this reference set from the values in the specified DB
row.
"""
self._dataUrl = referenceSetRecord.dataurl
self._description = referenceSetRecord.description
self._assemblyId = referenceSetRecord.assemblyid
self._isDerived = bool(referenceSetRecord.isderived)
self._md5checksum = referenceSetRecord.md5checksum
species = referenceSetRecord.species
if species is not None and species != 'null':
self.setSpeciesFromJson(species)
self._sourceAccessions = json.loads(
referenceSetRecord.sourceaccessions)
self._sourceUri = referenceSetRecord.sourceuri |
Populates this reference from the values in the specified DB row.
def populateFromRow(self, referenceRecord):
"""
Populates this reference from the values in the specified DB row.
"""
self._length = referenceRecord.length
self._isDerived = bool(referenceRecord.isderived)
self._md5checksum = referenceRecord.md5checksum
species = referenceRecord.species
if species is not None and species != 'null':
self.setSpeciesFromJson(species)
self._sourceAccessions = json.loads(referenceRecord.sourceaccessions)
self._sourceDivergence = referenceRecord.sourcedivergence
self._sourceUri = referenceRecord.sourceuri |
Given a set of results from our search query, return the
`details` (feature,environment,phenotype)
def _extractAssociationsDetails(self, associations):
"""
Given a set of results from our search query, return the
`details` (feature,environment,phenotype)
"""
detailedURIRef = []
for row in associations.bindings:
if 'feature' in row:
detailedURIRef.append(row['feature'])
detailedURIRef.append(row['environment'])
detailedURIRef.append(row['phenotype'])
return detailedURIRef |
Given a list of uriRefs, return a list of dicts:
{'subject': s, 'predicate': p, 'object': o }
all values are strings
def _detailTuples(self, uriRefs):
"""
Given a list of uriRefs, return a list of dicts:
{'subject': s, 'predicate': p, 'object': o }
all values are strings
"""
details = []
for uriRef in uriRefs:
for subject, predicate, object_ in self._rdfGraph.triples(
(uriRef, None, None)):
details.append({
'subject': subject.toPython(),
'predicate': predicate.toPython(),
'object': object_.toPython()
})
return details |
Given a binding from the sparql query result,
create a dict of plain text
def _bindingsToDict(self, bindings):
"""
Given a binding from the sparql query result,
create a dict of plain text
"""
myDict = {}
for key, val in bindings.iteritems():
myDict[key.toPython().replace('?', '')] = val.toPython()
return myDict |
Given a filename, add it to the graph
def _addDataFile(self, filename):
"""
Given a filename, add it to the graph
"""
if filename.endswith('.ttl'):
self._rdfGraph.parse(filename, format='n3')
else:
self._rdfGraph.parse(filename, format='xml') |
Given a uriRef, return a dict of all the details for that Ref
use the uriRef as the 'id' of the dict
def _getDetails(self, uriRef, associations_details):
"""
Given a uriRef, return a dict of all the details for that Ref
use the uriRef as the 'id' of the dict
"""
associationDetail = {}
for detail in associations_details:
if detail['subject'] == uriRef:
associationDetail[detail['predicate']] = detail['object']
associationDetail['id'] = uriRef
return associationDetail |
Formats several external identifiers for query
def _formatExternalIdentifiers(self, element, element_type):
"""
Formats several external identifiers for query
"""
elementClause = None
elements = []
if not issubclass(element.__class__, dict):
element = protocol.toJsonDict(element)
if element['externalIdentifiers']:
for _id in element['externalIdentifiers']:
elements.append(self._formatExternalIdentifier(
_id, element_type))
elementClause = "({})".format(" || ".join(elements))
return elementClause |
Formats a single external identifier for query
def _formatExternalIdentifier(self, element, element_type):
"""
Formats a single external identifier for query
"""
if "http" not in element['database']:
term = "{}:{}".format(element['database'], element['identifier'])
namespaceTerm = self._toNamespaceURL(term)
else:
namespaceTerm = "{}{}".format(
element['database'], element['identifier'])
comparison = '?{} = <{}> '.format(element_type, namespaceTerm)
return comparison |
Formats the ontology terms for query
def _formatOntologyTerm(self, element, element_type):
"""
Formats the ontology terms for query
"""
elementClause = None
if isinstance(element, dict) and element.get('terms'):
elements = []
for _term in element['terms']:
if _term.get('id'):
elements.append('?{} = <{}> '.format(
element_type, _term['id']))
else:
elements.append('?{} = <{}> '.format(
element_type, self._toNamespaceURL(_term['term'])))
elementClause = "({})".format(" || ".join(elements))
return elementClause |
Formats the ontology term object for query
def _formatOntologyTermObject(self, terms, element_type):
"""
Formats the ontology term object for query
"""
elementClause = None
if not isinstance(terms, collections.Iterable):
terms = [terms]
elements = []
for term in terms:
if term.term_id:
elements.append('?{} = <{}> '.format(
element_type, term.term_id))
else:
elements.append('?{} = <{}> '.format(
element_type, self._toNamespaceURL(term.term)))
elementClause = "({})".format(" || ".join(elements))
return elementClause |
Formats a set of identifiers for query
def _formatIds(self, element, element_type):
"""
Formats a set of identifiers for query
"""
elementClause = None
if isinstance(element, collections.Iterable):
elements = []
for _id in element:
elements.append('?{} = <{}> '.format(
element_type, _id))
elementClause = "({})".format(" || ".join(elements))
return elementClause |
Formats elements passed into parts of a query for filtering
def _formatEvidence(self, elements):
"""
Formats elements passed into parts of a query for filtering
"""
elementClause = None
filters = []
for evidence in elements:
if evidence.description:
elementClause = 'regex(?{}, "{}")'.format(
'environment_label', evidence.description)
if (hasattr(evidence, 'externalIdentifiers') and
evidence.externalIdentifiers):
# TODO will this pick up > 1 externalIdentifiers ?
for externalIdentifier in evidence['externalIdentifiers']:
exid_clause = self._formatExternalIdentifier(
externalIdentifier, 'environment')
# cleanup parens from _formatExternalIdentifier method
elementClause = exid_clause[1:-1]
if elementClause:
filters.append(elementClause)
elementClause = "({})".format(" || ".join(filters))
return elementClause |
Given a url identifier return identifier portion
Leverages prefixes already in graph namespace
Returns None if no match
Ex. "http://www.drugbank.ca/drugs/DB01268" -> "DB01268"
def _getIdentifier(self, url):
"""
Given a url identifier return identifier portion
Leverages prefixes already in graph namespace
Returns None if no match
Ex. "http://www.drugbank.ca/drugs/DB01268" -> "DB01268"
"""
for prefix, namespace in self._rdfGraph.namespaces():
if namespace in url:
return url.replace(namespace, '') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.