text stringlengths 81 112k |
|---|
Parse the attributes and values
def _parseAttrs(self, attrsStr):
"""
Parse the attributes and values
"""
attributes = dict()
for attrStr in self.SPLIT_ATTR_COL_RE.split(attrsStr):
name, vals = self._parseAttrVal(attrStr)
if name in attributes:
raise GFF3Exception(
"duplicated attribute name: {}".format(name),
self.fileName, self.lineNumber)
attributes[name] = vals
return attributes |
Parse one record.
def _parseRecord(self, gff3Set, line):
"""
Parse one record.
"""
row = line.split("\t")
if len(row) != self.GFF3_NUM_COLS:
raise GFF3Exception(
"Wrong number of columns, expected {}, got {}".format(
self.GFF3_NUM_COLS, len(row)),
self.fileName, self.lineNumber)
feature = Feature(
urllib.unquote(row[0]),
urllib.unquote(row[1]),
urllib.unquote(row[2]),
int(row[3]), int(row[4]),
row[5], row[6], row[7],
self._parseAttrs(row[8]))
gff3Set.add(feature) |
Run the parse and return the resulting Gff3Set object.
def parse(self):
"""
Run the parse and return the resulting Gff3Set object.
"""
fh = self._open()
try:
gff3Set = Gff3Set(self.fileName)
for line in fh:
self.lineNumber += 1
self._parseLine(gff3Set, line[0:-1])
finally:
fh.close()
gff3Set.linkChildFeaturesToParents()
return gff3Set |
Adds the specified dataset to this data repository.
def addDataset(self, dataset):
"""
Adds the specified dataset to this data repository.
"""
id_ = dataset.getId()
self._datasetIdMap[id_] = dataset
self._datasetNameMap[dataset.getLocalId()] = dataset
self._datasetIds.append(id_) |
Adds the specified reference set to this data repository.
def addReferenceSet(self, referenceSet):
"""
Adds the specified reference set to this data repository.
"""
id_ = referenceSet.getId()
self._referenceSetIdMap[id_] = referenceSet
self._referenceSetNameMap[referenceSet.getLocalId()] = referenceSet
self._referenceSetIds.append(id_) |
Add an ontology map to this data repository.
def addOntology(self, ontology):
"""
Add an ontology map to this data repository.
"""
self._ontologyNameMap[ontology.getName()] = ontology
self._ontologyIdMap[ontology.getId()] = ontology
self._ontologyIds.append(ontology.getId()) |
Select the first peer in the datarepo with the given url simulating
the behavior of selecting by URL. This is only used during testing.
def getPeer(self, url):
"""
Select the first peer in the datarepo with the given url simulating
the behavior of selecting by URL. This is only used during testing.
"""
peers = filter(lambda x: x.getUrl() == url, self.getPeers())
if len(peers) == 0:
raise exceptions.PeerNotFoundException(url)
return peers[0] |
Returns a dataset with the specified ID, or raises a
DatasetNotFoundException if it does not exist.
def getDataset(self, id_):
"""
Returns a dataset with the specified ID, or raises a
DatasetNotFoundException if it does not exist.
"""
if id_ not in self._datasetIdMap:
raise exceptions.DatasetNotFoundException(id_)
return self._datasetIdMap[id_] |
Returns the dataset with the specified name.
def getDatasetByName(self, name):
"""
Returns the dataset with the specified name.
"""
if name not in self._datasetNameMap:
raise exceptions.DatasetNameNotFoundException(name)
return self._datasetNameMap[name] |
Returns the ontology with the specified ID.
def getOntology(self, id_):
"""
Returns the ontology with the specified ID.
"""
if id_ not in self._ontologyIdMap:
raise exceptions.OntologyNotFoundException(id_)
return self._ontologyIdMap[id_] |
Returns an ontology by name
def getOntologyByName(self, name):
"""
Returns an ontology by name
"""
if name not in self._ontologyNameMap:
raise exceptions.OntologyNameNotFoundException(name)
return self._ontologyNameMap[name] |
Retuns the ReferenceSet with the specified ID, or raises a
ReferenceSetNotFoundException if it does not exist.
def getReferenceSet(self, id_):
"""
Retuns the ReferenceSet with the specified ID, or raises a
ReferenceSetNotFoundException if it does not exist.
"""
if id_ not in self._referenceSetIdMap:
raise exceptions.ReferenceSetNotFoundException(id_)
return self._referenceSetIdMap[id_] |
Returns the reference set with the specified name.
def getReferenceSetByName(self, name):
"""
Returns the reference set with the specified name.
"""
if name not in self._referenceSetNameMap:
raise exceptions.ReferenceSetNameNotFoundException(name)
return self._referenceSetNameMap[name] |
Returns the readgroup set with the specified ID.
def getReadGroupSet(self, id_):
"""
Returns the readgroup set with the specified ID.
"""
compoundId = datamodel.ReadGroupSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.dataset_id)
return dataset.getReadGroupSet(id_) |
Returns the readgroup set with the specified ID.
def getVariantSet(self, id_):
"""
Returns the readgroup set with the specified ID.
"""
compoundId = datamodel.VariantSetCompoundId.parse(id_)
dataset = self.getDataset(compoundId.dataset_id)
return dataset.getVariantSet(id_) |
Prints a summary of this data repository to stdout.
def printSummary(self):
"""
Prints a summary of this data repository to stdout.
"""
print("Ontologies:")
for ontology in self.getOntologys():
print(
"",
ontology.getOntologyPrefix(),
ontology.getName(),
ontology.getDataUrl(),
sep="\t")
print("ReferenceSets:")
for referenceSet in self.getReferenceSets():
print(
"", referenceSet.getLocalId(), referenceSet.getId(),
referenceSet.getDescription(), referenceSet.getDataUrl(),
sep="\t")
for reference in referenceSet.getReferences():
print(
"\t", reference.getLocalId(), reference.getId(),
sep="\t")
print("Datasets:")
for dataset in self.getDatasets():
print(
"", dataset.getLocalId(), dataset.getId(),
dataset.getDescription(), sep="\t")
print("\tReadGroupSets:")
for readGroupSet in dataset.getReadGroupSets():
print(
"\t", readGroupSet.getLocalId(),
readGroupSet.getReferenceSet().getLocalId(),
readGroupSet.getId(),
readGroupSet.getDataUrl(), sep="\t")
for readGroup in readGroupSet.getReadGroups():
print(
"\t\t", readGroup.getId(), readGroup.getLocalId(),
sep="\t")
print("\tVariantSets:")
for variantSet in dataset.getVariantSets():
print(
"\t", variantSet.getLocalId(),
variantSet.getReferenceSet().getLocalId(),
variantSet.getId(),
sep="\t")
if variantSet.getNumVariantAnnotationSets() > 0:
print("\t\tVariantAnnotationSets:")
for vas in variantSet.getVariantAnnotationSets():
print(
"\t\t", vas.getLocalId(),
vas.getAnnotationType(),
vas.getOntology().getName(), sep="\t")
print("\tFeatureSets:")
for featureSet in dataset.getFeatureSets():
print(
"\t", featureSet.getLocalId(),
featureSet.getReferenceSet().getLocalId(),
featureSet.getOntology().getName(),
featureSet.getId(),
sep="\t")
print("\tContinuousSets:")
for continuousSet in dataset.getContinuousSets():
print(
"\t", continuousSet.getLocalId(),
continuousSet.getReferenceSet().getLocalId(),
continuousSet.getId(),
sep="\t")
print("\tPhenotypeAssociationSets:")
for phenotypeAssociationSet in \
dataset.getPhenotypeAssociationSets():
print(
"\t", phenotypeAssociationSet.getLocalId(),
phenotypeAssociationSet.getParentContainer().getId(),
sep="\t")
# TODO - please improve this listing
print("\tRnaQuantificationSets:")
for rna_quantification_set in dataset.getRnaQuantificationSets():
print(
"\t", rna_quantification_set.getLocalId(),
rna_quantification_set.getId(), sep="\t")
for quant in rna_quantification_set.getRnaQuantifications():
print(
"\t\t", quant.getLocalId(),
quant._description,
",".join(quant._readGroupIds),
",".join(quant._featureSetIds), sep="\t") |
Return an iterator over all read groups in the data repo
def allReadGroups(self):
"""
Return an iterator over all read groups in the data repo
"""
for dataset in self.getDatasets():
for readGroupSet in dataset.getReadGroupSets():
for readGroup in readGroupSet.getReadGroups():
yield readGroup |
Return an iterator over all features in the data repo
def allFeatures(self):
"""
Return an iterator over all features in the data repo
"""
for dataset in self.getDatasets():
for featureSet in dataset.getFeatureSets():
for feature in featureSet.getFeatures():
yield feature |
Return an iterator over all call sets in the data repo
def allCallSets(self):
"""
Return an iterator over all call sets in the data repo
"""
for dataset in self.getDatasets():
for variantSet in dataset.getVariantSets():
for callSet in variantSet.getCallSets():
yield callSet |
Return an iterator over all variant annotation sets
in the data repo
def allVariantAnnotationSets(self):
"""
Return an iterator over all variant annotation sets
in the data repo
"""
for dataset in self.getDatasets():
for variantSet in dataset.getVariantSets():
for vaSet in variantSet.getVariantAnnotationSets():
yield vaSet |
Return an iterator over all rna quantifications
def allRnaQuantifications(self):
"""
Return an iterator over all rna quantifications
"""
for dataset in self.getDatasets():
for rnaQuantificationSet in dataset.getRnaQuantificationSets():
for rnaQuantification in \
rnaQuantificationSet.getRnaQuantifications():
yield rnaQuantification |
Return an iterator over all expression levels
def allExpressionLevels(self):
"""
Return an iterator over all expression levels
"""
for dataset in self.getDatasets():
for rnaQuantificationSet in dataset.getRnaQuantificationSets():
for rnaQuantification in \
rnaQuantificationSet.getRnaQuantifications():
for expressionLevel in \
rnaQuantification.getExpressionLevels():
yield expressionLevel |
Finds a peer by URL and return the first peer record with that URL.
def getPeer(self, url):
"""
Finds a peer by URL and return the first peer record with that URL.
"""
peers = list(models.Peer.select().where(models.Peer.url == url))
if len(peers) == 0:
raise exceptions.PeerNotFoundException(url)
return peers[0] |
Get the list of peers using an SQL offset and limit. Returns a list
of peer datamodel objects in a list.
def getPeers(self, offset=0, limit=1000):
"""
Get the list of peers using an SQL offset and limit. Returns a list
of peer datamodel objects in a list.
"""
select = models.Peer.select().order_by(
models.Peer.url).limit(limit).offset(offset)
return [peers.Peer(p.url, record=p) for p in select] |
Takes a model class and attempts to create a table in TSV format
that can be imported into a spreadsheet program.
def tableToTsv(self, model):
"""
Takes a model class and attempts to create a table in TSV format
that can be imported into a spreadsheet program.
"""
first = True
for item in model.select():
if first:
header = "".join(
["{}\t".format(x) for x in model._meta.fields.keys()])
print(header)
first = False
row = "".join(
["{}\t".format(
getattr(item, key)) for key in model._meta.fields.keys()])
print(row) |
Flushes the announcement table.
def clearAnnouncements(self):
"""
Flushes the announcement table.
"""
try:
q = models.Announcement.delete().where(
models.Announcement.id > 0)
q.execute()
except Exception as e:
raise exceptions.RepoManagerException(e) |
Adds an announcement to the registry for later analysis.
def insertAnnouncement(self, announcement):
"""
Adds an announcement to the registry for later analysis.
"""
url = announcement.get('url', None)
try:
peers.Peer(url)
except:
raise exceptions.BadUrlException(url)
try:
# TODO get more details about the user agent
models.Announcement.create(
url=announcement.get('url'),
attributes=json.dumps(announcement.get('attributes', {})),
remote_addr=announcement.get('remote_addr', None),
user_agent=announcement.get('user_agent', None))
except Exception as e:
raise exceptions.RepoManagerException(e) |
Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour.
def open(self, mode=MODE_READ):
"""
Opens this repo in the specified mode.
TODO: figure out the correct semantics of this and document
the intended future behaviour as well as the current
transitional behaviour.
"""
if mode not in [MODE_READ, MODE_WRITE]:
error = "Open mode must be '{}' or '{}'".format(
MODE_READ, MODE_WRITE)
raise ValueError(error)
self._openMode = mode
if mode == MODE_READ:
self.assertExists()
if mode == MODE_READ:
# This is part of the transitional behaviour where
# we load the whole DB into memory to get access to
# the data model.
self.load() |
Verifies that the data in the repository is consistent.
def verify(self):
"""
Verifies that the data in the repository is consistent.
"""
# TODO this should emit to a log that we can configure so we can
# have verbosity levels. We should provide a way to configure
# where we look at various chromosomes and so on. This will be
# an important debug tool for administrators.
for ontology in self.getOntologys():
print(
"Verifying Ontology", ontology.getName(),
"@", ontology.getDataUrl())
# TODO how do we verify this? Check some well-know SO terms?
for referenceSet in self.getReferenceSets():
print(
"Verifying ReferenceSet", referenceSet.getLocalId(),
"@", referenceSet.getDataUrl())
for reference in referenceSet.getReferences():
length = min(reference.getLength(), 1000)
bases = reference.getBases(0, length)
assert len(bases) == length
print(
"\tReading", length, "bases from",
reference.getLocalId())
for dataset in self.getDatasets():
print("Verifying Dataset", dataset.getLocalId())
for featureSet in dataset.getFeatureSets():
for referenceSet in self.getReferenceSets():
# TODO cycle through references?
reference = referenceSet.getReferences()[0]
print(
"\tVerifying FeatureSet",
featureSet.getLocalId(),
"with reference", reference.getLocalId())
length = min(reference.getLength(), 1000)
features = featureSet.getFeatures(
reference.getLocalId(), 0, length, None, 3)
for feature in features:
print("\t{}".format(feature))
# for continuousSet in dataset.getContinuousSets():
# -- there is no getContinuous
for readGroupSet in dataset.getReadGroupSets():
print(
"\tVerifying ReadGroupSet", readGroupSet.getLocalId(),
"@", readGroupSet.getDataUrl())
references = readGroupSet.getReferenceSet().getReferences()
# TODO should we cycle through the references? Should probably
# be an option.
reference = references[0]
max_alignments = 10
for readGroup in readGroupSet.getReadGroups():
alignments = readGroup.getReadAlignments(reference)
for i, alignment in enumerate(alignments):
if i == max_alignments:
break
print(
"\t\tRead", i, "alignments from",
readGroup.getLocalId())
for variantSet in dataset.getVariantSets():
print("\tVerifying VariantSet", variantSet.getLocalId())
max_variants = 10
max_annotations = 10
refMap = variantSet.getReferenceToDataUrlIndexMap()
for referenceName, (dataUrl, indexFile) in refMap.items():
variants = variantSet.getVariants(referenceName, 0, 2**31)
for i, variant in enumerate(variants):
if i == max_variants:
break
print(
"\t\tRead", i, "variants from reference",
referenceName, "@", dataUrl)
for annotationSet in variantSet.getVariantAnnotationSets():
print(
"\t\tVerifying VariantAnnotationSet",
annotationSet.getLocalId())
for referenceName in refMap.keys():
annotations = annotationSet.getVariantAnnotations(
referenceName, 0, 2**31)
for i, annotation in enumerate(annotations):
if i == max_annotations:
break
print(
"\t\t\tRead", i, "annotations from reference",
referenceName)
for phenotypeAssociationSet \
in dataset.getPhenotypeAssociationSets():
print("\t\tVerifying PhenotypeAssociationSet")
print(
"\t\t\t", phenotypeAssociationSet.getLocalId(),
phenotypeAssociationSet.getParentContainer().getId(),
sep="\t") |
Inserts the specified ontology into this repository.
def insertOntology(self, ontology):
"""
Inserts the specified ontology into this repository.
"""
try:
models.Ontology.create(
id=ontology.getName(),
name=ontology.getName(),
dataurl=ontology.getDataUrl(),
ontologyprefix=ontology.getOntologyPrefix())
except Exception:
raise exceptions.DuplicateNameException(
ontology.getName()) |
Removes the specified ontology term map from this repository.
def removeOntology(self, ontology):
"""
Removes the specified ontology term map from this repository.
"""
q = models.Ontology.delete().where(id == ontology.getId())
q.execute() |
Inserts the specified reference into this repository.
def insertReference(self, reference):
"""
Inserts the specified reference into this repository.
"""
models.Reference.create(
id=reference.getId(),
referencesetid=reference.getParentContainer().getId(),
name=reference.getLocalId(),
length=reference.getLength(),
isderived=reference.getIsDerived(),
species=json.dumps(reference.getSpecies()),
md5checksum=reference.getMd5Checksum(),
sourceaccessions=json.dumps(reference.getSourceAccessions()),
sourceuri=reference.getSourceUri()) |
Inserts the specified referenceSet into this repository.
def insertReferenceSet(self, referenceSet):
"""
Inserts the specified referenceSet into this repository.
"""
try:
models.Referenceset.create(
id=referenceSet.getId(),
name=referenceSet.getLocalId(),
description=referenceSet.getDescription(),
assemblyid=referenceSet.getAssemblyId(),
isderived=referenceSet.getIsDerived(),
species=json.dumps(referenceSet.getSpecies()),
md5checksum=referenceSet.getMd5Checksum(),
sourceaccessions=json.dumps(
referenceSet.getSourceAccessions()),
sourceuri=referenceSet.getSourceUri(),
dataurl=referenceSet.getDataUrl())
for reference in referenceSet.getReferences():
self.insertReference(reference)
except Exception:
raise exceptions.DuplicateNameException(
referenceSet.getLocalId()) |
Inserts the specified dataset into this repository.
def insertDataset(self, dataset):
"""
Inserts the specified dataset into this repository.
"""
try:
models.Dataset.create(
id=dataset.getId(),
name=dataset.getLocalId(),
description=dataset.getDescription(),
attributes=json.dumps(dataset.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
dataset.getLocalId()) |
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
def removeDataset(self, dataset):
"""
Removes the specified dataset from this repository. This performs
a cascading removal of all items within this dataset.
"""
for datasetRecord in models.Dataset.select().where(
models.Dataset.id == dataset.getId()):
datasetRecord.delete_instance(recursive=True) |
Remove a phenotype association set from the repo
def removePhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Remove a phenotype association set from the repo
"""
q = models.Phenotypeassociationset.delete().where(
models.Phenotypeassociationset.id ==
phenotypeAssociationSet.getId())
q.execute() |
Removes the specified featureSet from this repository.
def removeFeatureSet(self, featureSet):
"""
Removes the specified featureSet from this repository.
"""
q = models.Featureset.delete().where(
models.Featureset.id == featureSet.getId())
q.execute() |
Removes the specified continuousSet from this repository.
def removeContinuousSet(self, continuousSet):
"""
Removes the specified continuousSet from this repository.
"""
q = models.ContinuousSet.delete().where(
models.ContinuousSet.id == continuousSet.getId())
q.execute() |
Inserts the specified readGroup into the DB.
def insertReadGroup(self, readGroup):
"""
Inserts the specified readGroup into the DB.
"""
statsJson = json.dumps(protocol.toJsonDict(readGroup.getStats()))
experimentJson = json.dumps(
protocol.toJsonDict(readGroup.getExperiment()))
try:
models.Readgroup.create(
id=readGroup.getId(),
readgroupsetid=readGroup.getParentContainer().getId(),
name=readGroup.getLocalId(),
predictedinsertedsize=readGroup.getPredictedInsertSize(),
samplename=readGroup.getSampleName(),
description=readGroup.getDescription(),
stats=statsJson,
experiment=experimentJson,
biosampleid=readGroup.getBiosampleId(),
attributes=json.dumps(readGroup.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
Removes the specified readGroupSet from this repository. This performs
a cascading removal of all items within this readGroupSet.
def removeReadGroupSet(self, readGroupSet):
"""
Removes the specified readGroupSet from this repository. This performs
a cascading removal of all items within this readGroupSet.
"""
for readGroupSetRecord in models.Readgroupset.select().where(
models.Readgroupset.id == readGroupSet.getId()):
readGroupSetRecord.delete_instance(recursive=True) |
Removes the specified variantSet from this repository. This performs
a cascading removal of all items within this variantSet.
def removeVariantSet(self, variantSet):
"""
Removes the specified variantSet from this repository. This performs
a cascading removal of all items within this variantSet.
"""
for variantSetRecord in models.Variantset.select().where(
models.Variantset.id == variantSet.getId()):
variantSetRecord.delete_instance(recursive=True) |
Removes the specified biosample from this repository.
def removeBiosample(self, biosample):
"""
Removes the specified biosample from this repository.
"""
q = models.Biosample.delete().where(
models.Biosample.id == biosample.getId())
q.execute() |
Removes the specified individual from this repository.
def removeIndividual(self, individual):
"""
Removes the specified individual from this repository.
"""
q = models.Individual.delete().where(
models.Individual.id == individual.getId())
q.execute() |
Inserts a the specified readGroupSet into this repository.
def insertReadGroupSet(self, readGroupSet):
"""
Inserts a the specified readGroupSet into this repository.
"""
programsJson = json.dumps(
[protocol.toJsonDict(program) for program in
readGroupSet.getPrograms()])
statsJson = json.dumps(protocol.toJsonDict(readGroupSet.getStats()))
try:
models.Readgroupset.create(
id=readGroupSet.getId(),
datasetid=readGroupSet.getParentContainer().getId(),
referencesetid=readGroupSet.getReferenceSet().getId(),
name=readGroupSet.getLocalId(),
programs=programsJson,
stats=statsJson,
dataurl=readGroupSet.getDataUrl(),
indexfile=readGroupSet.getIndexFile(),
attributes=json.dumps(readGroupSet.getAttributes()))
for readGroup in readGroupSet.getReadGroups():
self.insertReadGroup(readGroup)
except Exception as e:
raise exceptions.RepoManagerException(e) |
Removes the specified referenceSet from this repository. This performs
a cascading removal of all references within this referenceSet.
However, it does not remove any of the ReadGroupSets or items that
refer to this ReferenceSet. These must be deleted before the
referenceSet can be removed.
def removeReferenceSet(self, referenceSet):
"""
Removes the specified referenceSet from this repository. This performs
a cascading removal of all references within this referenceSet.
However, it does not remove any of the ReadGroupSets or items that
refer to this ReferenceSet. These must be deleted before the
referenceSet can be removed.
"""
try:
q = models.Reference.delete().where(
models.Reference.referencesetid == referenceSet.getId())
q.execute()
q = models.Referenceset.delete().where(
models.Referenceset.id == referenceSet.getId())
q.execute()
except Exception:
msg = ("Unable to delete reference set. "
"There are objects currently in the registry which are "
"aligned against it. Remove these objects before removing "
"the reference set.")
raise exceptions.RepoManagerException(msg) |
Inserts a the specified variantAnnotationSet into this repository.
def insertVariantAnnotationSet(self, variantAnnotationSet):
"""
Inserts a the specified variantAnnotationSet into this repository.
"""
analysisJson = json.dumps(
protocol.toJsonDict(variantAnnotationSet.getAnalysis()))
try:
models.Variantannotationset.create(
id=variantAnnotationSet.getId(),
variantsetid=variantAnnotationSet.getParentContainer().getId(),
ontologyid=variantAnnotationSet.getOntology().getId(),
name=variantAnnotationSet.getLocalId(),
analysis=analysisJson,
annotationtype=variantAnnotationSet.getAnnotationType(),
created=variantAnnotationSet.getCreationTime(),
updated=variantAnnotationSet.getUpdatedTime(),
attributes=json.dumps(variantAnnotationSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
Inserts a the specified callSet into this repository.
def insertCallSet(self, callSet):
"""
Inserts a the specified callSet into this repository.
"""
try:
models.Callset.create(
id=callSet.getId(),
name=callSet.getLocalId(),
variantsetid=callSet.getParentContainer().getId(),
biosampleid=callSet.getBiosampleId(),
attributes=json.dumps(callSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
Inserts a the specified variantSet into this repository.
def insertVariantSet(self, variantSet):
"""
Inserts a the specified variantSet into this repository.
"""
# We cheat a little here with the VariantSetMetadata, and encode these
# within the table as a JSON dump. These should really be stored in
# their own table
metadataJson = json.dumps(
[protocol.toJsonDict(metadata) for metadata in
variantSet.getMetadata()])
urlMapJson = json.dumps(variantSet.getReferenceToDataUrlIndexMap())
try:
models.Variantset.create(
id=variantSet.getId(),
datasetid=variantSet.getParentContainer().getId(),
referencesetid=variantSet.getReferenceSet().getId(),
name=variantSet.getLocalId(),
created=datetime.datetime.now(),
updated=datetime.datetime.now(),
metadata=metadataJson,
dataurlindexmap=urlMapJson,
attributes=json.dumps(variantSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e)
for callSet in variantSet.getCallSets():
self.insertCallSet(callSet) |
Inserts a the specified featureSet into this repository.
def insertFeatureSet(self, featureSet):
"""
Inserts a the specified featureSet into this repository.
"""
# TODO add support for info and sourceUri fields.
try:
models.Featureset.create(
id=featureSet.getId(),
datasetid=featureSet.getParentContainer().getId(),
referencesetid=featureSet.getReferenceSet().getId(),
ontologyid=featureSet.getOntology().getId(),
name=featureSet.getLocalId(),
dataurl=featureSet.getDataUrl(),
attributes=json.dumps(featureSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
Inserts a the specified continuousSet into this repository.
def insertContinuousSet(self, continuousSet):
"""
Inserts a the specified continuousSet into this repository.
"""
# TODO add support for info and sourceUri fields.
try:
models.ContinuousSet.create(
id=continuousSet.getId(),
datasetid=continuousSet.getParentContainer().getId(),
referencesetid=continuousSet.getReferenceSet().getId(),
name=continuousSet.getLocalId(),
dataurl=continuousSet.getDataUrl(),
attributes=json.dumps(continuousSet.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
Inserts the specified Biosample into this repository.
def insertBiosample(self, biosample):
"""
Inserts the specified Biosample into this repository.
"""
try:
models.Biosample.create(
id=biosample.getId(),
datasetid=biosample.getParentContainer().getId(),
name=biosample.getLocalId(),
description=biosample.getDescription(),
disease=json.dumps(biosample.getDisease()),
created=biosample.getCreated(),
updated=biosample.getUpdated(),
individualid=biosample.getIndividualId(),
attributes=json.dumps(biosample.getAttributes()),
individualAgeAtCollection=json.dumps(
biosample.getIndividualAgeAtCollection()))
except Exception:
raise exceptions.DuplicateNameException(
biosample.getLocalId(),
biosample.getParentContainer().getLocalId()) |
Inserts the specified individual into this repository.
def insertIndividual(self, individual):
"""
Inserts the specified individual into this repository.
"""
try:
models.Individual.create(
id=individual.getId(),
datasetId=individual.getParentContainer().getId(),
name=individual.getLocalId(),
description=individual.getDescription(),
created=individual.getCreated(),
updated=individual.getUpdated(),
species=json.dumps(individual.getSpecies()),
sex=json.dumps(individual.getSex()),
attributes=json.dumps(individual.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
individual.getLocalId(),
individual.getParentContainer().getLocalId()) |
Inserts the specified phenotype annotation set into this repository.
def insertPhenotypeAssociationSet(self, phenotypeAssociationSet):
"""
Inserts the specified phenotype annotation set into this repository.
"""
datasetId = phenotypeAssociationSet.getParentContainer().getId()
attributes = json.dumps(phenotypeAssociationSet.getAttributes())
try:
models.Phenotypeassociationset.create(
id=phenotypeAssociationSet.getId(),
name=phenotypeAssociationSet.getLocalId(),
datasetid=datasetId,
dataurl=phenotypeAssociationSet._dataUrl,
attributes=attributes)
except Exception:
raise exceptions.DuplicateNameException(
phenotypeAssociationSet.getParentContainer().getId()) |
Inserts a the specified rnaQuantificationSet into this repository.
def insertRnaQuantificationSet(self, rnaQuantificationSet):
"""
Inserts a the specified rnaQuantificationSet into this repository.
"""
try:
models.Rnaquantificationset.create(
id=rnaQuantificationSet.getId(),
datasetid=rnaQuantificationSet.getParentContainer().getId(),
referencesetid=rnaQuantificationSet.getReferenceSet().getId(),
name=rnaQuantificationSet.getLocalId(),
dataurl=rnaQuantificationSet.getDataUrl(),
attributes=json.dumps(rnaQuantificationSet.getAttributes()))
except Exception:
raise exceptions.DuplicateNameException(
rnaQuantificationSet.getLocalId(),
rnaQuantificationSet.getParentContainer().getLocalId()) |
Removes the specified rnaQuantificationSet from this repository. This
performs a cascading removal of all items within this
rnaQuantificationSet.
def removeRnaQuantificationSet(self, rnaQuantificationSet):
"""
Removes the specified rnaQuantificationSet from this repository. This
performs a cascading removal of all items within this
rnaQuantificationSet.
"""
q = models.Rnaquantificationset.delete().where(
models.Rnaquantificationset.id == rnaQuantificationSet.getId())
q.execute() |
Accepts a peer datamodel object and adds it to the registry.
def insertPeer(self, peer):
"""
Accepts a peer datamodel object and adds it to the registry.
"""
try:
models.Peer.create(
url=peer.getUrl(),
attributes=json.dumps(peer.getAttributes()))
except Exception as e:
raise exceptions.RepoManagerException(e) |
Remove peers by URL.
def removePeer(self, url):
"""
Remove peers by URL.
"""
q = models.Peer.delete().where(
models.Peer.url == url)
q.execute() |
Initialise this data repository, creating any necessary directories
and file paths.
def initialise(self):
"""
Initialise this data repository, creating any necessary directories
and file paths.
"""
self._checkWriteMode()
self._createSystemTable()
self._createNetworkTables()
self._createOntologyTable()
self._createReferenceSetTable()
self._createReferenceTable()
self._createDatasetTable()
self._createReadGroupSetTable()
self._createReadGroupTable()
self._createCallSetTable()
self._createVariantSetTable()
self._createVariantAnnotationSetTable()
self._createFeatureSetTable()
self._createContinuousSetTable()
self._createBiosampleTable()
self._createIndividualTable()
self._createPhenotypeAssociationSetTable()
self._createRnaQuantificationSetTable() |
Loads this data repository into memory.
def load(self):
"""
Loads this data repository into memory.
"""
self._readSystemTable()
self._readOntologyTable()
self._readReferenceSetTable()
self._readReferenceTable()
self._readDatasetTable()
self._readReadGroupSetTable()
self._readReadGroupTable()
self._readVariantSetTable()
self._readCallSetTable()
self._readVariantAnnotationSetTable()
self._readFeatureSetTable()
self._readContinuousSetTable()
self._readBiosampleTable()
self._readIndividualTable()
self._readPhenotypeAssociationSetTable()
self._readRnaQuantificationSetTable() |
Populates the instance variables of this FeatureSet from the specified
DB row.
def populateFromRow(self, featureSetRecord):
"""
Populates the instance variables of this FeatureSet from the specified
DB row.
"""
self._dbFilePath = featureSetRecord.dataurl
self.setAttributesJson(featureSetRecord.attributes)
self.populateFromFile(self._dbFilePath) |
Populates the instance variables of this FeatureSet from the specified
data URL.
Initialize dataset, using the passed dict of sources
[{source,format}] see rdflib.parse() for more
If path is set, this backend will load itself
def populateFromFile(self, dataUrl):
"""
Populates the instance variables of this FeatureSet from the specified
data URL.
Initialize dataset, using the passed dict of sources
[{source,format}] see rdflib.parse() for more
If path is set, this backend will load itself
"""
self._dbFilePath = dataUrl
# initialize graph
self._rdfGraph = rdflib.ConjunctiveGraph()
# save the path
self._dataUrl = dataUrl
self._scanDataFiles(self._dataUrl, ['*.ttl'])
# extract version
cgdTTL = rdflib.URIRef("http://data.monarchinitiative.org/ttl/cgd.ttl")
versionInfo = rdflib.URIRef(
u'http://www.w3.org/2002/07/owl#versionInfo')
self._version = None
for _, _, obj in self._rdfGraph.triples((cgdTTL, versionInfo, None)):
self._version = obj.toPython()
# setup location cache
self._initializeLocationCache() |
find a feature and return ga4gh representation, use compoundId as
featureId
def getFeature(self, compoundId):
"""
find a feature and return ga4gh representation, use compoundId as
featureId
"""
feature = self._getFeatureById(compoundId.featureId)
feature.id = str(compoundId)
return feature |
find a feature and return ga4gh representation, use 'native' id as
featureId
def _getFeatureById(self, featureId):
"""
find a feature and return ga4gh representation, use 'native' id as
featureId
"""
featureRef = rdflib.URIRef(featureId)
featureDetails = self._detailTuples([featureRef])
feature = {}
for detail in featureDetails:
feature[detail['predicate']] = []
for detail in featureDetails:
feature[detail['predicate']].append(detail['object'])
pbFeature = protocol.Feature()
term = protocol.OntologyTerm()
# Schema for feature only supports one type of `type`
# here we default to first OBO defined
for featureType in sorted(feature[TYPE]):
if "obolibrary" in featureType:
term.term = self._featureTypeLabel(featureType)
term.term_id = featureType
pbFeature.feature_type.MergeFrom(term)
break
pbFeature.id = featureId
# Schema for feature only supports one type of `name` `symbol`
# here we default to shortest for symbol and longest for name
feature[LABEL].sort(key=len)
pbFeature.gene_symbol = feature[LABEL][0]
pbFeature.name = feature[LABEL][-1]
pbFeature.attributes.MergeFrom(protocol.Attributes())
for key in feature:
for val in sorted(feature[key]):
pbFeature.attributes.attr[key].values.add().string_value = val
if featureId in self._locationMap:
location = self._locationMap[featureId]
pbFeature.reference_name = location["chromosome"]
pbFeature.start = location["begin"]
pbFeature.end = location["end"]
return pbFeature |
formulate a sparql query string based on parameters
def _filterSearchFeaturesRequest(self, reference_name, gene_symbol, name,
start, end):
"""
formulate a sparql query string based on parameters
"""
filters = []
query = self._baseQuery()
filters = []
location = self._findLocation(reference_name, start, end)
if location:
filters.append("?feature = <{}>".format(location))
if gene_symbol:
filters.append('regex(?feature_label, "{}")')
if name:
filters.append(
'regex(?feature_label, "{}")'.format(name))
# apply filters
filter = "FILTER ({})".format(' && '.join(filters))
if len(filters) == 0:
filter = ""
query = query.replace("#%FILTER%", filter)
return query |
return a location key form the locationMap
def _findLocation(self, reference_name, start, end):
"""
return a location key form the locationMap
"""
try:
# TODO - sequence_annotations does not have build?
return self._locationMap['hg19'][reference_name][start][end]
except:
return None |
CGD uses Faldo ontology for locations, it's a bit complicated.
This function sets up an in memory cache of all locations, which
can be queried via:
locationMap[build][chromosome][begin][end] = location["_id"]
def _initializeLocationCache(self):
"""
CGD uses Faldo ontology for locations, it's a bit complicated.
This function sets up an in memory cache of all locations, which
can be queried via:
locationMap[build][chromosome][begin][end] = location["_id"]
"""
# cache of locations
self._locationMap = {}
locationMap = self._locationMap
triples = self._rdfGraph.triples
Ref = rdflib.URIRef
associations = []
for subj, _, _ in triples((None, RDF.type, Ref(ASSOCIATION))):
associations.append(subj.toPython())
locationIds = []
for association in associations:
for _, _, obj in triples((Ref(association),
Ref(HAS_SUBJECT), None)):
locationIds.append(obj.toPython())
locations = []
for _id in locationIds:
location = {}
location["_id"] = _id
for subj, predicate, obj in triples((Ref(location["_id"]),
None, None)):
if not predicate.toPython() in location:
location[predicate.toPython()] = []
bisect.insort(location[predicate.toPython()], obj.toPython())
if FALDO_LOCATION in location:
locations.append(location)
for location in locations:
for _id in location[FALDO_LOCATION]:
# lookup faldo region, ensure positions are sorted
faldoLocation = {}
faldoLocation["_id"] = _id
for subj, predicate, obj in triples((Ref(faldoLocation["_id"]),
None, None)):
if not predicate.toPython() in faldoLocation:
faldoLocation[predicate.toPython()] = []
bisect.insort(faldoLocation[predicate.toPython()],
obj.toPython())
faldoBegins = []
for _id in faldoLocation[FALDO_BEGIN]:
faldoBegin = {}
faldoBegin["_id"] = _id
for subj, predicate, obj in triples(
(Ref(faldoBegin["_id"]),
None, None)):
faldoBegin[predicate.toPython()] = obj.toPython()
faldoBegins.append(faldoBegin)
faldoReferences = []
for _id in faldoLocation[FALDO_BEGIN]:
faldoReference = {}
faldoReference["_id"] = faldoBegin[FALDO_REFERENCE]
for subj, predicate, obj in triples(
(Ref(faldoReference["_id"]),
None, None)):
faldoReference[predicate.toPython()] = obj.toPython()
faldoReferences.append(faldoReference)
faldoEnds = []
for _id in faldoLocation[FALDO_END]:
faldoEnd = {}
faldoEnd["_id"] = _id
for subj, predicate, obj in triples((Ref(faldoEnd["_id"]),
None, None)):
faldoEnd[predicate.toPython()] = obj.toPython()
faldoEnds.append(faldoEnd)
for idx, faldoReference in enumerate(faldoReferences):
if MEMBER_OF in faldoReference:
build = faldoReference[MEMBER_OF].split('/')[-1]
chromosome = faldoReference[LABEL].split(' ')[0]
begin = faldoBegins[idx][FALDO_POSITION]
end = faldoEnds[idx][FALDO_POSITION]
if build not in locationMap:
locationMap[build] = {}
if chromosome not in locationMap[build]:
locationMap[build][chromosome] = {}
if begin not in locationMap[build][chromosome]:
locationMap[build][chromosome][begin] = {}
if end not in locationMap[build][chromosome][begin]:
locationMap[build][chromosome][begin][end] = {}
locationMap[build][chromosome][begin][end] = \
location["_id"]
locationMap[location["_id"]] = {
"build": build,
"chromosome": chromosome,
"begin": begin,
"end": end,
} |
Appends the specified protocolElement to the value list for this
response.
def addValue(self, protocolElement):
"""
Appends the specified protocolElement to the value list for this
response.
"""
self._numElements += 1
self._bufferSize += protocolElement.ByteSize()
attr = getattr(self._protoObject, self._valueListName)
obj = attr.add()
obj.CopyFrom(protocolElement) |
Returns True if the response buffer is full, and False otherwise.
The buffer is full if either (1) the number of items in the value
list is >= pageSize or (2) the total length of the serialised
elements in the page is >= maxBufferSize.
If page_size or max_response_length were not set in the request
then they're not checked.
def isFull(self):
"""
Returns True if the response buffer is full, and False otherwise.
The buffer is full if either (1) the number of items in the value
list is >= pageSize or (2) the total length of the serialised
elements in the page is >= maxBufferSize.
If page_size or max_response_length were not set in the request
then they're not checked.
"""
return (
(self._pageSize > 0 and self._numElements >= self._pageSize) or
(self._bufferSize >= self._maxBufferSize)
) |
Returns a string version of the SearchResponse that has
been built by this SearchResponseBuilder.
def getSerializedResponse(self):
"""
Returns a string version of the SearchResponse that has
been built by this SearchResponseBuilder.
"""
self._protoObject.next_page_token = pb.string(self._nextPageToken)
s = protocol.toJson(self._protoObject)
return s |
Populates this Ontology using values in the specified DB row.
def populateFromRow(self, ontologyRecord):
"""
Populates this Ontology using values in the specified DB row.
"""
self._id = ontologyRecord.id
self._dataUrl = ontologyRecord.dataurl
self._readFile() |
Returns a GA4GH OntologyTerm object by name.
:param name: name of the ontology term, ex. "gene".
:return: GA4GH OntologyTerm object.
def getGaTermByName(self, name):
"""
Returns a GA4GH OntologyTerm object by name.
:param name: name of the ontology term, ex. "gene".
:return: GA4GH OntologyTerm object.
"""
# TODO what is the correct value when we have no mapping??
termIds = self.getTermIds(name)
if len(termIds) == 0:
termId = ""
# TODO add logging for missed term translation.
else:
# TODO what is the correct behaviour here when we have multiple
# IDs matching a given name?
termId = termIds[0]
term = protocol.OntologyTerm()
term.term = name
term.term_id = termId
return term |
Very heavy query: calls for the specified list of callSetIds
on chromosome 2 (11 pages, 90 seconds to fetch the entire thing
on a high-end desktop machine)
def _heavyQuery(variantSetId, callSetIds):
"""
Very heavy query: calls for the specified list of callSetIds
on chromosome 2 (11 pages, 90 seconds to fetch the entire thing
on a high-end desktop machine)
"""
request = protocol.SearchVariantsRequest()
request.reference_name = '2'
request.variant_set_id = variantSetId
for callSetId in callSetIds:
request.call_set_ids.add(callSetId)
request.page_size = 100
request.end = 100000
return request |
Returns (search result as JSON string, time elapsed during search)
def timeOneSearch(queryString):
"""
Returns (search result as JSON string, time elapsed during search)
"""
startTime = time.clock()
resultString = backend.runSearchVariants(queryString)
endTime = time.clock()
elapsedTime = endTime - startTime
return resultString, elapsedTime |
Repeat the query several times; perhaps don't go through *all* the
pages. Returns minimum time to run backend.searchVariants() to execute
the query (as far as pageLimit allows), *not* including JSON
processing to prepare queries or parse responses.
def benchmarkOneQuery(request, repeatLimit=3, pageLimit=3):
"""
Repeat the query several times; perhaps don't go through *all* the
pages. Returns minimum time to run backend.searchVariants() to execute
the query (as far as pageLimit allows), *not* including JSON
processing to prepare queries or parse responses.
"""
times = []
queryString = protocol.toJson(request)
for i in range(0, repeatLimit):
resultString, elapsedTime = timeOneSearch(queryString)
accruedTime = elapsedTime
pageCount = 1
token = extractNextPageToken(resultString)
# Iterate to go beyond the first page of results.
while token is not None and pageCount < pageLimit:
pageRequest = request
pageRequest.page_token = token
pageRequestString = protocol.toJson(pageRequest)
resultString, elapsedTime = timeOneSearch(pageRequestString)
accruedTime += elapsedTime
pageCount = pageCount + 1
token = extractNextPageToken(resultString)
times.append(accruedTime)
# TODO: more sophisticated statistics. Sometimes we want min(),
# sometimes mean = sum() / len(), sometimes other measures,
# perhaps exclude outliers...
# If we compute average we should throw out at least the first one.
# return sum(times[2:])/len(times[2:])
return min(times) |
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
def getExceptionClass(errorCode):
"""
Converts the specified error code into the corresponding class object.
Raises a KeyError if the errorCode is not found.
"""
classMap = {}
for name, class_ in inspect.getmembers(sys.modules[__name__]):
if inspect.isclass(class_) and issubclass(class_, BaseServerException):
classMap[class_.getErrorCode()] = class_
return classMap[errorCode] |
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
def toProtocolElement(self):
"""
Converts this exception into the GA4GH protocol type so that
it can be communicated back to the client.
"""
error = protocol.GAException()
error.error_code = self.getErrorCode()
error.message = self.getMessage()
return error |
Initialize new reference and perform checks.
def _init_goterm_ref(self, rec_curr, name, lnum):
"""Initialize new reference and perform checks."""
if rec_curr is None:
return GOTerm()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum) |
Initialize new typedef and perform checks.
def _init_typedef(self, typedef_curr, name, lnum):
"""Initialize new typedef and perform checks."""
if typedef_curr is None:
return TypeDef()
msg = "PREVIOUS {REC} WAS NOT TERMINATED AS EXPECTED".format(REC=name)
self._die(msg, lnum) |
Add new fields to the current reference.
def _add_to_ref(self, rec_curr, line, lnum):
"""Add new fields to the current reference."""
# Written by DV Klopfenstein
# Examples of record lines containing ':' include:
# id: GO:0000002
# name: mitochondrial genome maintenance
# namespace: biological_process
# def: "The maintenance of ...
# is_a: GO:0007005 ! mitochondrion organization
mtch = re.match(r'^(\S+):\s*(\S.*)$', line)
if mtch:
field_name = mtch.group(1)
field_value = mtch.group(2)
if field_name == "id":
self._chk_none(rec_curr.id, lnum)
rec_curr.id = field_value
elif field_name == "alt_id":
rec_curr.alt_ids.append(field_value)
elif field_name == "name":
self._chk_none(rec_curr.name, lnum)
rec_curr.name = field_value
elif field_name == "namespace":
self._chk_none(rec_curr.namespace, lnum)
rec_curr.namespace = field_value
elif field_name == "is_a":
rec_curr._parents.append(field_value.split()[0])
elif field_name == "is_obsolete" and field_value == "true":
rec_curr.is_obsolete = True
elif field_name in self.optional_attrs:
self.update_rec(rec_curr, field_name, field_value)
else:
self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum) |
Update current GOTerm with optional record.
def update_rec(self, rec, name, value):
"""Update current GOTerm with optional record."""
# 'def' is a reserved word in python, do not use it as a Class attr.
if name == "def":
name = "defn"
# If we have a relationship, then we will split this into a further
# dictionary.
if hasattr(rec, name):
if name not in self.attrs_scalar:
if name not in self.attrs_nested:
getattr(rec, name).add(value)
else:
self._add_nested(rec, name, value)
else:
raise Exception("ATTR({NAME}) ALREADY SET({VAL})".format(
NAME=name, VAL=getattr(rec, name)))
else: # Initialize new GOTerm attr
if name in self.attrs_scalar:
setattr(rec, name, value)
elif name not in self.attrs_nested:
setattr(rec, name, set([value]))
else:
name = '_{:s}'.format(name)
setattr(rec, name, defaultdict(list))
self._add_nested(rec, name, value) |
Add new fields to the current typedef.
def _add_to_typedef(self, typedef_curr, line, lnum):
"""Add new fields to the current typedef."""
mtch = re.match(r'^(\S+):\s*(\S.*)$', line)
if mtch:
field_name = mtch.group(1)
field_value = mtch.group(2).split('!')[0].rstrip()
if field_name == "id":
self._chk_none(typedef_curr.id, lnum)
typedef_curr.id = field_value
elif field_name == "name":
self._chk_none(typedef_curr.name, lnum)
typedef_curr.name = field_value
elif field_name == "transitive_over":
typedef_curr.transitive_over.append(field_value)
elif field_name == "inverse_of":
self._chk_none(typedef_curr.inverse_of, lnum)
typedef_curr.inverse_of = field_value
# Note: there are other tags that aren't imported here.
else:
self._die("UNEXPECTED FIELD CONTENT: {L}\n".format(L=line), lnum) |
Adds a term's nested attributes.
def _add_nested(self, rec, name, value):
"""Adds a term's nested attributes."""
# Remove comments and split term into typedef / target term.
(typedef, target_term) = value.split('!')[0].rstrip().split(' ')
# Save the nested term.
getattr(rec, name)[typedef].append(target_term) |
Prepare to store data from user-desired optional fields.
Not loading these optional fields by default saves in space and speed.
But allow the possibility for saving these fields, if the user desires,
Including:
comment consider def is_class_level is_metadata_tag is_transitive
relationship replaced_by subset synonym transitive_over xref
def _init_optional_attrs(self, optional_attrs):
"""Prepare to store data from user-desired optional fields.
Not loading these optional fields by default saves in space and speed.
But allow the possibility for saving these fields, if the user desires,
Including:
comment consider def is_class_level is_metadata_tag is_transitive
relationship replaced_by subset synonym transitive_over xref
"""
# Written by DV Klopfenstein
# Required attributes are always loaded. All others are optionally loaded.
self.attrs_req = ['id', 'alt_id', 'name', 'namespace', 'is_a', 'is_obsolete']
self.attrs_scalar = ['comment', 'defn',
'is_class_level', 'is_metadata_tag',
'is_transitive', 'transitive_over']
self.attrs_nested = frozenset(['relationship'])
# Allow user to specify either: 'def' or 'defn'
# 'def' is an obo field name, but 'defn' is legal Python attribute name
fnc = lambda aopt: aopt if aopt != "defn" else "def"
if optional_attrs is None:
optional_attrs = []
elif isinstance(optional_attrs, str):
optional_attrs = [fnc(optional_attrs)] if optional_attrs not in self.attrs_req else []
elif isinstance(optional_attrs, list) or isinstance(optional_attrs, set):
optional_attrs = set([fnc(f) for f in optional_attrs if f not in self.attrs_req])
else:
raise Exception("optional_attrs arg MUST BE A str, list, or set.")
self.optional_attrs = optional_attrs |
Raise an Exception if file read is unexpected.
def _die(self, msg, lnum):
"""Raise an Exception if file read is unexpected."""
raise Exception("**FATAL {FILE}({LNUM}): {MSG}\n".format(
FILE=self.obo_file, LNUM=lnum, MSG=msg)) |
Write hierarchy for a GO Term record.
def write_hier_rec(self, gos_printed, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None,
depth=1, dp="-"):
"""Write hierarchy for a GO Term record."""
# Added by DV Klopfenstein
GO_id = self.id
# Shortens hierarchy report by only printing the hierarchy
# for the sub-set of user-specified GO terms which are connected.
if include_only is not None and GO_id not in include_only:
return
nrp = short_prt and GO_id in gos_printed
if go_marks is not None:
out.write('{} '.format('>' if GO_id in go_marks else ' '))
if len_dash is not None:
# Default character indicating hierarchy level is '-'.
# '=' is used to indicate a hierarchical path printed in detail previously.
letter = '-' if not nrp or not self.children else '='
dp = ''.join([letter]*depth)
out.write('{DASHES:{N}} '.format(DASHES=dp, N=len_dash))
if num_child is not None:
out.write('{N:>5} '.format(N=len(self.get_all_children())))
out.write('{GO}\tL-{L:>02}\tD-{D:>02}\t{desc}\n'.format(
GO=self.id, L=self.level, D=self.depth, desc=self.name))
# Track GOs previously printed only if needed
if short_prt:
gos_printed.add(GO_id)
# Do not print hierarchy below this turn if it has already been printed
if nrp:
return
depth += 1
if max_depth is not None and depth > max_depth:
return
for p in self.children:
p.write_hier_rec(gos_printed, out, len_dash, max_depth, num_child, short_prt,
include_only, go_marks,
depth, dp) |
Write hierarchy for all GO Terms in obo file.
def write_hier_all(self, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False):
"""Write hierarchy for all GO Terms in obo file."""
# Print: [biological_process, molecular_function, and cellular_component]
for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:
self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None) |
Write hierarchy for a GO Term.
def write_hier(self, GO_id, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False,
include_only=None, go_marks=None):
"""Write hierarchy for a GO Term."""
gos_printed = set()
self[GO_id].write_hier_rec(gos_printed, out, len_dash, max_depth, num_child,
short_prt, include_only, go_marks) |
Returns all possible paths to the root node
Each path includes the term given. The order of the path is
top -> bottom, i.e. it starts with the root and ends with the
given term (inclusively).
Parameters:
-----------
- term:
the id of the GO term, where the paths begin (i.e. the
accession 'GO:0003682')
Returns:
--------
- a list of lists of GO Terms
def paths_to_top(self, term):
""" Returns all possible paths to the root node
Each path includes the term given. The order of the path is
top -> bottom, i.e. it starts with the root and ends with the
given term (inclusively).
Parameters:
-----------
- term:
the id of the GO term, where the paths begin (i.e. the
accession 'GO:0003682')
Returns:
--------
- a list of lists of GO Terms
"""
# error handling consistent with original authors
if term not in self:
print("Term %s not found!" % term, file=sys.stderr)
return
def _paths_to_top_recursive(rec):
if rec.level == 0:
return [[rec]]
paths = []
for parent in rec.parents:
top_paths = _paths_to_top_recursive(parent)
for top_path in top_paths:
top_path.append(rec)
paths.append(top_path)
return paths
go_term = self[term]
return _paths_to_top_recursive(go_term) |
draw AMIGO style network, lineage containing one query record.
def make_graph_pydot(self, recs, nodecolor,
edgecolor, dpi,
draw_parents=True, draw_children=True):
"""draw AMIGO style network, lineage containing one query record."""
import pydot
G = pydot.Dot(graph_type='digraph', dpi="{}".format(dpi)) # Directed Graph
edgeset = set()
usr_ids = [rec.id for rec in recs]
for rec in recs:
if draw_parents:
edgeset.update(rec.get_all_parent_edges())
if draw_children:
edgeset.update(rec.get_all_child_edges())
lw = self._label_wrap
rec_id_set = set([rec_id for endpts in edgeset for rec_id in endpts])
nodes = {str(ID):pydot.Node(
lw(ID).replace("GO:",""), # Node name
shape="box",
style="rounded, filled",
# Highlight query terms in plum:
fillcolor="beige" if ID not in usr_ids else "plum",
color=nodecolor)
for ID in rec_id_set}
# add nodes explicitly via add_node
for rec_id, node in nodes.items():
G.add_node(node)
for src, target in edgeset:
# default layout in graphviz is top->bottom, so we invert
# the direction and plot using dir="back"
G.add_edge(pydot.Edge(nodes[target], nodes[src],
shape="normal",
color=edgecolor,
label="is_a",
dir="back"))
return G |
Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names.
def sqliteRowsToDicts(sqliteRows):
"""
Unpacks sqlite rows as returned by fetchall
into an array of simple dicts.
:param sqliteRows: array of rows returned from fetchall DB call
:return: array of dicts, keyed by the column names.
"""
return map(lambda r: dict(zip(r.keys(), r)), sqliteRows) |
Construct a SQL LIMIT clause
def limitsSql(startIndex=0, maxResults=0):
"""
Construct a SQL LIMIT clause
"""
if startIndex and maxResults:
return " LIMIT {}, {}".format(startIndex, maxResults)
elif startIndex:
raise Exception("startIndex was provided, but maxResults was not")
elif maxResults:
return " LIMIT {}".format(maxResults)
else:
return "" |
Returns rows of a sql fetch query on demand
def iterativeFetch(query, batchSize=default_batch_size):
"""
Returns rows of a sql fetch query on demand
"""
while True:
rows = query.fetchmany(batchSize)
if not rows:
break
rowDicts = sqliteRowsToDicts(rows)
for rowDict in rowDicts:
yield rowDict |
Parses the specified pageToken and returns a list of the specified
number of values. Page tokens are assumed to consist of a fixed
number of integers seperated by colons. If the page token does
not conform to this specification, raise a InvalidPageToken
exception.
def _parsePageToken(pageToken, numValues):
"""
Parses the specified pageToken and returns a list of the specified
number of values. Page tokens are assumed to consist of a fixed
number of integers seperated by colons. If the page token does
not conform to this specification, raise a InvalidPageToken
exception.
"""
tokens = pageToken.split(":")
if len(tokens) != numValues:
msg = "Invalid number of values in page token"
raise exceptions.BadPageTokenException(msg)
try:
values = map(int, tokens)
except ValueError:
msg = "Malformed integers in page token"
raise exceptions.BadPageTokenException(msg)
return values |
Attempts to parse the specified key in the specified argument
dictionary into an integer. If the argument cannot be parsed,
raises a BadRequestIntegerException. If the key is not present,
return the specified default value.
def _parseIntegerArgument(args, key, defaultValue):
"""
Attempts to parse the specified key in the specified argument
dictionary into an integer. If the argument cannot be parsed,
raises a BadRequestIntegerException. If the key is not present,
return the specified default value.
"""
ret = defaultValue
try:
if key in args:
try:
ret = int(args[key])
except ValueError:
raise exceptions.BadRequestIntegerException(key, args[key])
except TypeError:
raise Exception((key, args))
return ret |
Starts a new iteration.
def _initialiseIteration(self):
"""
Starts a new iteration.
"""
self._searchIterator = self._search(
self._request.start,
self._request.end if self._request.end != 0 else None)
self._currentObject = next(self._searchIterator, None)
if self._currentObject is not None:
self._nextObject = next(self._searchIterator, None)
self._searchAnchor = self._request.start
self._distanceFromAnchor = 0
firstObjectStart = self._getStart(self._currentObject)
if firstObjectStart > self._request.start:
self._searchAnchor = firstObjectStart |
Picks up iteration from a previously provided page token. There are two
different phases here:
1) We are iterating over the initial set of intervals in which start
is < the search start coorindate.
2) We are iterating over the remaining intervals in which start >= to
the search start coordinate.
def _pickUpIteration(self, searchAnchor, objectsToSkip):
"""
Picks up iteration from a previously provided page token. There are two
different phases here:
1) We are iterating over the initial set of intervals in which start
is < the search start coorindate.
2) We are iterating over the remaining intervals in which start >= to
the search start coordinate.
"""
self._searchAnchor = searchAnchor
self._distanceFromAnchor = objectsToSkip
self._searchIterator = self._search(
searchAnchor,
self._request.end if self._request.end != 0 else None)
obj = next(self._searchIterator)
if searchAnchor == self._request.start:
# This is the initial set of intervals, we just skip forward
# objectsToSkip positions
for _ in range(objectsToSkip):
obj = next(self._searchIterator)
else:
# Now, we are past this initial set of intervals.
# First, we need to skip forward over the intervals where
# start < searchAnchor, as we've seen these already.
while self._getStart(obj) < searchAnchor:
obj = next(self._searchIterator)
# Now, we skip over objectsToSkip objects such that
# start == searchAnchor
for _ in range(objectsToSkip):
if self._getStart(obj) != searchAnchor:
raise exceptions.BadPageTokenException
obj = next(self._searchIterator)
self._currentObject = obj
self._nextObject = next(self._searchIterator, None) |
Returns the next (object, nextPageToken) pair.
def next(self):
"""
Returns the next (object, nextPageToken) pair.
"""
if self._currentObject is None:
raise StopIteration()
nextPageToken = None
if self._nextObject is not None:
start = self._getStart(self._nextObject)
# If start > the search anchor, move the search anchor. Otherwise,
# increment the distance from the anchor.
if start > self._searchAnchor:
self._searchAnchor = start
self._distanceFromAnchor = 0
else:
self._distanceFromAnchor += 1
nextPageToken = "{}:{}".format(
self._searchAnchor, self._distanceFromAnchor)
ret = self._extractProtocolObject(self._currentObject), nextPageToken
self._currentObject = self._nextObject
self._nextObject = next(self._searchIterator, None)
return ret |
Returns true when an annotation should be included.
def filterVariantAnnotation(self, vann):
"""
Returns true when an annotation should be included.
"""
# TODO reintroduce feature ID search
ret = False
if len(self._effects) != 0 and not vann.transcript_effects:
return False
elif len(self._effects) == 0:
return True
for teff in vann.transcript_effects:
if self.filterEffect(teff):
ret = True
return ret |
Returns true when any of the transcript effects
are present in the request.
def filterEffect(self, teff):
"""
Returns true when any of the transcript effects
are present in the request.
"""
ret = False
for effect in teff.effects:
ret = self._matchAnyEffects(effect) or ret
return ret |
Tests whether a requested effect and an effect
present in an annotation are equal.
def _checkIdEquality(self, requestedEffect, effect):
"""
Tests whether a requested effect and an effect
present in an annotation are equal.
"""
return self._idPresent(requestedEffect) and (
effect.term_id == requestedEffect.term_id) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.