repository_name stringclasses 316 values | func_path_in_repository stringlengths 6 223 | func_name stringlengths 1 134 | language stringclasses 1 value | func_code_string stringlengths 57 65.5k | func_documentation_string stringlengths 1 46.3k | split_name stringclasses 1 value | func_code_url stringlengths 91 315 | called_functions listlengths 1 156 ⌀ | enclosing_scope stringlengths 2 1.48M |
|---|---|---|---|---|---|---|---|---|---|
praw-dev/prawcore | examples/read_only_auth_trophies.py | main | python | def main():
if len(sys.argv) != 2:
print("Usage: {} USERNAME".format(sys.argv[0]))
return 1
authenticator = prawcore.TrustedAuthenticator(
prawcore.Requestor("prawcore_read_only_example"),
os.environ["PRAWCORE_CLIENT_ID"],
os.environ["PRAWCORE_CLIENT_SECRET"],
)
authorizer = prawcore.ReadOnlyAuthorizer(authenticator)
authorizer.refresh()
user = sys.argv[1]
with prawcore.session(authorizer) as session:
data = session.request("GET", "/api/v1/user/{}/trophies".format(user))
for trophy in data["data"]["trophies"]:
description = trophy["data"]["description"]
print(
trophy["data"]["name"]
+ (" ({})".format(description) if description else "")
)
return 0 | Provide the program's entry point when directly executed. | train | https://github.com/praw-dev/prawcore/blob/b16ae88a1f2bf98095ed6fe64851cb7add7ed752/examples/read_only_auth_trophies.py#L14-L39 | [
"def session(authorizer=None):\n \"\"\"Return a :class:`Session` instance.\n\n :param authorizer: An instance of :class:`Authorizer`.\n\n \"\"\"\n return Session(authorizer=authorizer)\n",
"def refresh(self):\n \"\"\"Obtain a new ReadOnly access token.\"\"\"\n self._request_token(grant_type=\"client_credentials\")\n"
] | #!/usr/bin/env python
"""This example outputs a user's list of trophies.
This program demonstrates the use of ``prawcore.ReadOnlyAuthorizer`` that does
not require an access token to make authenticated requests to reddit.
"""
import os
import prawcore
import sys
if __name__ == "__main__":
sys.exit(main())
|
wdecoster/nanoget | nanoget/extraction_functions.py | process_summary | python | def process_summary(summaryfile, **kwargs):
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy()) | Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L12-L90 | [
"def check_existance(f):\n \"\"\"Check if the file supplied as input exists.\"\"\"\n if not opath.isfile(f):\n logging.error(\"Nanoget: File provided doesn't exist or the path is incorrect: {}\".format(f))\n sys.exit(\"File provided doesn't exist or the path is incorrect: {}\".format(f))\n",
"def reduce_memory_usage(df):\n \"\"\"reduce memory usage of the dataframe\n\n - convert runIDs to categorical\n - downcast ints and floats\n \"\"\"\n usage_pre = df.memory_usage(deep=True).sum()\n if \"runIDs\" in df:\n df.loc[:, \"runIDs\"] = df.loc[:, \"runIDs\"].astype(\"category\")\n df_int = df.select_dtypes(include=['int'])\n df_float = df.select_dtypes(include=['float'])\n df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')\n df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')\n usage_post = df.memory_usage(deep=True).sum()\n logging.info(\"Reduced DataFrame memory usage from {}Mb to {}Mb\".format(\n usage_pre / 1024**2, usage_post / 1024**2))\n if usage_post > 4e9 and \"readIDs\" in df:\n logging.info(\"DataFrame of features is too big, dropping read identifiers.\")\n return df.drop([\"readIDs\"], axis=1, errors=\"ignore\")\n else:\n return df\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | check_bam | python | def check_bam(bam, samtype="bam"):
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile | Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L93-L117 | [
"def check_existance(f):\n \"\"\"Check if the file supplied as input exists.\"\"\"\n if not opath.isfile(f):\n logging.error(\"Nanoget: File provided doesn't exist or the path is incorrect: {}\".format(f))\n sys.exit(\"File provided doesn't exist or the path is incorrect: {}\".format(f))\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | process_ubam | python | def process_ubam(bam, **kwargs):
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf) | Extracting metrics from unaligned bam format
Extracting lengths | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L120-L139 | [
"def reduce_memory_usage(df):\n \"\"\"reduce memory usage of the dataframe\n\n - convert runIDs to categorical\n - downcast ints and floats\n \"\"\"\n usage_pre = df.memory_usage(deep=True).sum()\n if \"runIDs\" in df:\n df.loc[:, \"runIDs\"] = df.loc[:, \"runIDs\"].astype(\"category\")\n df_int = df.select_dtypes(include=['int'])\n df_float = df.select_dtypes(include=['float'])\n df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')\n df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')\n usage_post = df.memory_usage(deep=True).sum()\n logging.info(\"Reduced DataFrame memory usage from {}Mb to {}Mb\".format(\n usage_pre / 1024**2, usage_post / 1024**2))\n if usage_post > 4e9 and \"readIDs\" in df:\n logging.info(\"DataFrame of features is too big, dropping read identifiers.\")\n return df.drop([\"readIDs\"], axis=1, errors=\"ignore\")\n else:\n return df\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | process_bam | python | def process_bam(bam, **kwargs):
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf) | Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L142-L168 | [
"def reduce_memory_usage(df):\n \"\"\"reduce memory usage of the dataframe\n\n - convert runIDs to categorical\n - downcast ints and floats\n \"\"\"\n usage_pre = df.memory_usage(deep=True).sum()\n if \"runIDs\" in df:\n df.loc[:, \"runIDs\"] = df.loc[:, \"runIDs\"].astype(\"category\")\n df_int = df.select_dtypes(include=['int'])\n df_float = df.select_dtypes(include=['float'])\n df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')\n df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')\n usage_post = df.memory_usage(deep=True).sum()\n logging.info(\"Reduced DataFrame memory usage from {}Mb to {}Mb\".format(\n usage_pre / 1024**2, usage_post / 1024**2))\n if usage_post > 4e9 and \"readIDs\" in df:\n logging.info(\"DataFrame of features is too big, dropping read identifiers.\")\n return df.drop([\"readIDs\"], axis=1, errors=\"ignore\")\n else:\n return df\n",
"def check_bam(bam, samtype=\"bam\"):\n \"\"\"Check if bam file is valid.\n\n Bam file should:\n - exists\n - has an index (create if necessary)\n - is sorted by coordinate\n - has at least one mapped read\n \"\"\"\n ut.check_existance(bam)\n samfile = pysam.AlignmentFile(bam, \"rb\")\n if not samfile.has_index():\n pysam.index(bam)\n samfile = pysam.AlignmentFile(bam, \"rb\") # Need to reload the samfile after creating index\n logging.info(\"Nanoget: No index for bam file could be found, created index.\")\n if not samfile.header['HD']['SO'] == 'coordinate':\n logging.error(\"Nanoget: Bam file {} not sorted by coordinate!.\".format(bam))\n sys.exit(\"Please use a bam file sorted by coordinate.\")\n if samtype == \"bam\":\n logging.info(\"Nanoget: Bam file {} contains {} mapped and {} unmapped reads.\".format(\n bam, samfile.mapped, samfile.unmapped))\n if samfile.mapped == 0:\n logging.error(\"Nanoget: Bam file {} does not contain aligned reads.\".format(bam))\n sys.exit(\"FATAL: not a single read was mapped in bam file {}\".format(bam))\n return samfile\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | extract_from_bam | python | def extract_from_bam(params):
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary] | Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L200-L223 | null | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | get_pID | python | def get_pID(read):
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None | Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L226-L243 | [
"def parse_MD(MDlist):\n \"\"\"Parse MD string to get number of mismatches and deletions.\"\"\"\n return sum([len(item) for item in re.split('[0-9^]', MDlist)])\n",
"def parse_CIGAR(cigartuples):\n \"\"\"Count the insertions in the read using the CIGAR string.\"\"\"\n return sum([item[1] for item in cigartuples if item[0] == 1])\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | handle_compressed_input | python | def handle_compressed_input(inputfq, file_type="fastq"):
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq)) | Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L256-L277 | [
"def check_existance(f):\n \"\"\"Check if the file supplied as input exists.\"\"\"\n if not opath.isfile(f):\n logging.error(\"Nanoget: File provided doesn't exist or the path is incorrect: {}\".format(f))\n sys.exit(\"File provided doesn't exist or the path is incorrect: {}\".format(f))\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | process_fasta | python | def process_fasta(fasta, **kwargs):
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna()) | Combine metrics extracted from a fasta file. | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L280-L287 | [
"def reduce_memory_usage(df):\n \"\"\"reduce memory usage of the dataframe\n\n - convert runIDs to categorical\n - downcast ints and floats\n \"\"\"\n usage_pre = df.memory_usage(deep=True).sum()\n if \"runIDs\" in df:\n df.loc[:, \"runIDs\"] = df.loc[:, \"runIDs\"].astype(\"category\")\n df_int = df.select_dtypes(include=['int'])\n df_float = df.select_dtypes(include=['float'])\n df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')\n df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')\n usage_post = df.memory_usage(deep=True).sum()\n logging.info(\"Reduced DataFrame memory usage from {}Mb to {}Mb\".format(\n usage_pre / 1024**2, usage_post / 1024**2))\n if usage_post > 4e9 and \"readIDs\" in df:\n logging.info(\"DataFrame of features is too big, dropping read identifiers.\")\n return df.drop([\"readIDs\"], axis=1, errors=\"ignore\")\n else:\n return df\n",
"def handle_compressed_input(inputfq, file_type=\"fastq\"):\n \"\"\"Return handles from compressed files according to extension.\n\n Check for which fastq input is presented and open a handle accordingly\n Can read from compressed files (gz, bz2, bgz) or uncompressed\n Relies on file extensions to recognize compression\n \"\"\"\n ut.check_existance(inputfq)\n if inputfq.endswith(('.gz', 'bgz')):\n import gzip\n logging.info(\"Nanoget: Decompressing gzipped {} {}\".format(file_type, inputfq))\n return gzip.open(inputfq, 'rt')\n elif inputfq.endswith('.bz2'):\n import bz2\n logging.info(\"Nanoget: Decompressing bz2 compressed {} {}\".format(file_type, inputfq))\n return bz2.open(inputfq, 'rt')\n elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):\n return open(inputfq, 'r')\n else:\n logging.error(\"INPUT ERROR: Unrecognized file extension {}\".format(inputfq))\n sys.exit('INPUT ERROR:\\nUnrecognized file extension in {}\\n'\n 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | process_fastq_plain | python | def process_fastq_plain(fastq, **kwargs):
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna()) | Combine metrics extracted from a fastq file. | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L290-L297 | [
"def reduce_memory_usage(df):\n \"\"\"reduce memory usage of the dataframe\n\n - convert runIDs to categorical\n - downcast ints and floats\n \"\"\"\n usage_pre = df.memory_usage(deep=True).sum()\n if \"runIDs\" in df:\n df.loc[:, \"runIDs\"] = df.loc[:, \"runIDs\"].astype(\"category\")\n df_int = df.select_dtypes(include=['int'])\n df_float = df.select_dtypes(include=['float'])\n df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')\n df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')\n usage_post = df.memory_usage(deep=True).sum()\n logging.info(\"Reduced DataFrame memory usage from {}Mb to {}Mb\".format(\n usage_pre / 1024**2, usage_post / 1024**2))\n if usage_post > 4e9 and \"readIDs\" in df:\n logging.info(\"DataFrame of features is too big, dropping read identifiers.\")\n return df.drop([\"readIDs\"], axis=1, errors=\"ignore\")\n else:\n return df\n",
"def handle_compressed_input(inputfq, file_type=\"fastq\"):\n \"\"\"Return handles from compressed files according to extension.\n\n Check for which fastq input is presented and open a handle accordingly\n Can read from compressed files (gz, bz2, bgz) or uncompressed\n Relies on file extensions to recognize compression\n \"\"\"\n ut.check_existance(inputfq)\n if inputfq.endswith(('.gz', 'bgz')):\n import gzip\n logging.info(\"Nanoget: Decompressing gzipped {} {}\".format(file_type, inputfq))\n return gzip.open(inputfq, 'rt')\n elif inputfq.endswith('.bz2'):\n import bz2\n logging.info(\"Nanoget: Decompressing bz2 compressed {} {}\".format(file_type, inputfq))\n return bz2.open(inputfq, 'rt')\n elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):\n return open(inputfq, 'r')\n else:\n logging.error(\"INPUT ERROR: Unrecognized file extension {}\".format(inputfq))\n sys.exit('INPUT ERROR:\\nUnrecognized file extension in {}\\n'\n 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))\n",
"def extract_from_fastq(fq):\n \"\"\"Extract metrics from a fastq file.\n\n Return average quality and read length\n \"\"\"\n for rec in SeqIO.parse(fq, \"fastq\"):\n yield nanomath.ave_qual(rec.letter_annotations[\"phred_quality\"]), len(rec)\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | extract_from_fastq | python | def extract_from_fastq(fq):
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec) | Extract metrics from a fastq file.
Return average quality and read length | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L300-L306 | null | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | stream_fastq_full | python | def stream_fastq_full(fastq, threads):
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.") | Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L309-L322 | [
"def handle_compressed_input(inputfq, file_type=\"fastq\"):\n \"\"\"Return handles from compressed files according to extension.\n\n Check for which fastq input is presented and open a handle accordingly\n Can read from compressed files (gz, bz2, bgz) or uncompressed\n Relies on file extensions to recognize compression\n \"\"\"\n ut.check_existance(inputfq)\n if inputfq.endswith(('.gz', 'bgz')):\n import gzip\n logging.info(\"Nanoget: Decompressing gzipped {} {}\".format(file_type, inputfq))\n return gzip.open(inputfq, 'rt')\n elif inputfq.endswith('.bz2'):\n import bz2\n logging.info(\"Nanoget: Decompressing bz2 compressed {} {}\".format(file_type, inputfq))\n return bz2.open(inputfq, 'rt')\n elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):\n return open(inputfq, 'r')\n else:\n logging.error(\"INPUT ERROR: Unrecognized file extension {}\".format(inputfq))\n sys.exit('INPUT ERROR:\\nUnrecognized file extension in {}\\n'\n 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | extract_all_from_fastq | python | def extract_all_from_fastq(rec):
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"])) | Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L325-L333 | null | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | process_fastq_rich | python | def process_fastq_rich(fastq, **kwargs):
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df) | Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc()) | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L341-L374 | [
"def reduce_memory_usage(df):\n \"\"\"reduce memory usage of the dataframe\n\n - convert runIDs to categorical\n - downcast ints and floats\n \"\"\"\n usage_pre = df.memory_usage(deep=True).sum()\n if \"runIDs\" in df:\n df.loc[:, \"runIDs\"] = df.loc[:, \"runIDs\"].astype(\"category\")\n df_int = df.select_dtypes(include=['int'])\n df_float = df.select_dtypes(include=['float'])\n df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')\n df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')\n usage_post = df.memory_usage(deep=True).sum()\n logging.info(\"Reduced DataFrame memory usage from {}Mb to {}Mb\".format(\n usage_pre / 1024**2, usage_post / 1024**2))\n if usage_post > 4e9 and \"readIDs\" in df:\n logging.info(\"DataFrame of features is too big, dropping read identifiers.\")\n return df.drop([\"readIDs\"], axis=1, errors=\"ignore\")\n else:\n return df\n",
"def handle_compressed_input(inputfq, file_type=\"fastq\"):\n \"\"\"Return handles from compressed files according to extension.\n\n Check for which fastq input is presented and open a handle accordingly\n Can read from compressed files (gz, bz2, bgz) or uncompressed\n Relies on file extensions to recognize compression\n \"\"\"\n ut.check_existance(inputfq)\n if inputfq.endswith(('.gz', 'bgz')):\n import gzip\n logging.info(\"Nanoget: Decompressing gzipped {} {}\".format(file_type, inputfq))\n return gzip.open(inputfq, 'rt')\n elif inputfq.endswith('.bz2'):\n import bz2\n logging.info(\"Nanoget: Decompressing bz2 compressed {} {}\".format(file_type, inputfq))\n return bz2.open(inputfq, 'rt')\n elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):\n return open(inputfq, 'r')\n else:\n logging.error(\"INPUT ERROR: Unrecognized file extension {}\".format(inputfq))\n sys.exit('INPUT ERROR:\\nUnrecognized file extension in {}\\n'\n 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))\n",
"def info_to_dict(info):\n \"\"\"Get the key-value pairs from the albacore/minknow fastq description and return dict\"\"\"\n return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | readfq | python | def readfq(fp):
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break | Generator function adapted from https://github.com/lh3/readfq. | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L377-L409 | null | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | fq_minimal | python | def fq_minimal(fq):
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None | Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L412-L427 | null | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def process_fastq_minimal(fastq, **kwargs):
"""Swiftly extract minimal features (length and timestamp) from a rich fastq file"""
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df)
|
wdecoster/nanoget | nanoget/extraction_functions.py | process_fastq_minimal | python | def process_fastq_minimal(fastq, **kwargs):
infastq = handle_compressed_input(fastq)
try:
df = pd.DataFrame(
data=[rec for rec in fq_minimal(infastq) if rec],
columns=["timestamp", "lengths"]
)
except IndexError:
logging.error("Fatal: Incorrect file structure for fastq_minimal")
sys.exit("Error: file does not match expected structure for fastq_minimal")
return ut.reduce_memory_usage(df) | Swiftly extract minimal features (length and timestamp) from a rich fastq file | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/extraction_functions.py#L430-L441 | [
"def reduce_memory_usage(df):\n \"\"\"reduce memory usage of the dataframe\n\n - convert runIDs to categorical\n - downcast ints and floats\n \"\"\"\n usage_pre = df.memory_usage(deep=True).sum()\n if \"runIDs\" in df:\n df.loc[:, \"runIDs\"] = df.loc[:, \"runIDs\"].astype(\"category\")\n df_int = df.select_dtypes(include=['int'])\n df_float = df.select_dtypes(include=['float'])\n df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')\n df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')\n usage_post = df.memory_usage(deep=True).sum()\n logging.info(\"Reduced DataFrame memory usage from {}Mb to {}Mb\".format(\n usage_pre / 1024**2, usage_post / 1024**2))\n if usage_post > 4e9 and \"readIDs\" in df:\n logging.info(\"DataFrame of features is too big, dropping read identifiers.\")\n return df.drop([\"readIDs\"], axis=1, errors=\"ignore\")\n else:\n return df\n",
"def handle_compressed_input(inputfq, file_type=\"fastq\"):\n \"\"\"Return handles from compressed files according to extension.\n\n Check for which fastq input is presented and open a handle accordingly\n Can read from compressed files (gz, bz2, bgz) or uncompressed\n Relies on file extensions to recognize compression\n \"\"\"\n ut.check_existance(inputfq)\n if inputfq.endswith(('.gz', 'bgz')):\n import gzip\n logging.info(\"Nanoget: Decompressing gzipped {} {}\".format(file_type, inputfq))\n return gzip.open(inputfq, 'rt')\n elif inputfq.endswith('.bz2'):\n import bz2\n logging.info(\"Nanoget: Decompressing bz2 compressed {} {}\".format(file_type, inputfq))\n return bz2.open(inputfq, 'rt')\n elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):\n return open(inputfq, 'r')\n else:\n logging.error(\"INPUT ERROR: Unrecognized file extension {}\".format(inputfq))\n sys.exit('INPUT ERROR:\\nUnrecognized file extension in {}\\n'\n 'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))\n",
"def fq_minimal(fq):\n \"\"\"Minimal fastq metrics extractor.\n\n Quickly parse a fasta/fastq file - but makes expectations on the file format\n There will be dragons if unexpected format is used\n Expects a fastq_rich format, but extracts only timestamp and length\n \"\"\"\n try:\n while True:\n time = next(fq)[1:].split(\" \")[4][11:-1]\n length = len(next(fq))\n next(fq)\n next(fq)\n yield time, length\n except StopIteration:\n yield None\n"
] | import logging
import nanoget.utils as ut
import pandas as pd
import sys
import pysam
import nanomath
import re
from Bio import SeqIO
import concurrent.futures as cfutures
def process_summary(summaryfile, **kwargs):
"""Extracting information from an albacore summary file.
Only reads which have a >0 length are returned.
The fields below may or may not exist, depending on the type of sequencing performed.
Fields 1-14 are for 1D sequencing.
Fields 1-23 for 2D sequencing.
Fields 24-27, 2-5, 22-23 for 1D^2 (1D2) sequencing
Fields 28-38 for barcoded workflows
1 filename
2 read_id
3 run_id
4 channel
5 start_time
6 duration
7 num_events
8 template_start
9 num_events_template
10 template_duration
11 num_called_template
12 sequence_length_template
13 mean_qscore_template
14 strand_score_template
15 complement_start
16 num_events_complement
17 complement_duration
18 num_called_complement
19 sequence_length_complement
20 mean_qscore_complement
21 strand_score_complement
22 sequence_length_2d
23 mean_qscore_2d
24 filename1
25 filename2
26 read_id1
27 read_id2
28 barcode_arrangement
29 barcode_score
30 barcode_full_arrangement
31 front_score
32 rear_score
33 front_begin_index
34 front_foundseq_length
35 rear_end_index
36 rear_foundseq_length
37 kit
38 variant
"""
logging.info("Nanoget: Collecting metrics from summary file {} for {} sequencing".format(
summaryfile, kwargs["readtype"]))
ut.check_existance(summaryfile)
if kwargs["readtype"] == "1D":
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_template", "mean_qscore_template"]
elif kwargs["readtype"] in ["2D", "1D2"]:
cols = ["read_id", "run_id", "channel", "start_time", "duration",
"sequence_length_2d", "mean_qscore_2d"]
if kwargs["barcoded"]:
cols.append("barcode_arrangement")
logging.info("Nanoget: Extracting metrics per barcode.")
try:
datadf = pd.read_csv(
filepath_or_buffer=summaryfile,
sep="\t",
usecols=cols,
)
except ValueError:
logging.error("Nanoget: did not find expected columns in summary file {}:\n {}".format(
summaryfile, ', '.join(cols)))
sys.exit("ERROR: expected columns in summary file {} not found:\n {}".format(
summaryfile, ', '.join(cols)))
if kwargs["barcoded"]:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration",
"lengths", "quals", "barcode"]
else:
datadf.columns = ["readIDs", "runIDs", "channelIDs", "time", "duration", "lengths", "quals"]
logging.info("Nanoget: Finished collecting statistics from summary file {}".format(summaryfile))
return ut.reduce_memory_usage(datadf.loc[datadf["lengths"] != 0].copy())
def check_bam(bam, samtype="bam"):
"""Check if bam file is valid.
Bam file should:
- exists
- has an index (create if necessary)
- is sorted by coordinate
- has at least one mapped read
"""
ut.check_existance(bam)
samfile = pysam.AlignmentFile(bam, "rb")
if not samfile.has_index():
pysam.index(bam)
samfile = pysam.AlignmentFile(bam, "rb") # Need to reload the samfile after creating index
logging.info("Nanoget: No index for bam file could be found, created index.")
if not samfile.header['HD']['SO'] == 'coordinate':
logging.error("Nanoget: Bam file {} not sorted by coordinate!.".format(bam))
sys.exit("Please use a bam file sorted by coordinate.")
if samtype == "bam":
logging.info("Nanoget: Bam file {} contains {} mapped and {} unmapped reads.".format(
bam, samfile.mapped, samfile.unmapped))
if samfile.mapped == 0:
logging.error("Nanoget: Bam file {} does not contain aligned reads.".format(bam))
sys.exit("FATAL: not a single read was mapped in bam file {}".format(bam))
return samfile
def process_ubam(bam, **kwargs):
"""Extracting metrics from unaligned bam format
Extracting lengths
"""
logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam))
samfile = pysam.AlignmentFile(bam, "rb", check_sq=False)
if not samfile.has_index():
pysam.index(bam)
# Need to reload the samfile after creating index
samfile = pysam.AlignmentFile(bam, "rb")
logging.info("Nanoget: No index for bam file could be found, created index.")
datadf = pd.DataFrame(
data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length)
for read in samfile.fetch(until_eof=True)],
columns=["readIDs", "quals", "lengths"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: ubam {} contains {} reads.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_bam(bam, **kwargs):
"""Combines metrics from bam after extraction.
Processing function: calls pool of worker functions
to extract from a bam file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from bam file {}.".format(bam))
samfile = check_bam(bam)
chromosomes = samfile.references
params = zip([bam] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: bam {} contains {} primary alignments.".format(
bam, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def process_cram(cram, **kwargs):
"""Combines metrics from cram after extraction.
Processing function: calls pool of worker functions
to extract from a cram file the following metrics:
-lengths
-aligned lengths
-qualities
-aligned qualities
-mapping qualities
-edit distances to the reference genome scaled by read length
Returned in a pandas DataFrame
"""
logging.info("Nanoget: Starting to collect statistics from cram file {}.".format(cram))
samfile = check_bam(cram, samtype="cram")
chromosomes = samfile.references
params = zip([cram] * len(chromosomes), chromosomes)
with cfutures.ProcessPoolExecutor() as executor:
datadf = pd.DataFrame(
data=[res for sublist in executor.map(extract_from_bam, params) for res in sublist],
columns=["readIDs", "quals", "aligned_quals", "lengths",
"aligned_lengths", "mapQ", "percentIdentity"]) \
.dropna(axis='columns', how='all') \
.dropna(axis='index', how='any')
logging.info("Nanoget: cram {} contains {} primary alignments.".format(
cram, datadf["lengths"].size))
return ut.reduce_memory_usage(datadf)
def extract_from_bam(params):
"""Extracts metrics from bam.
Worker function per chromosome
loop over a bam file and create list with tuples containing metrics:
-qualities
-aligned qualities
-lengths
-aligned lengths
-mapping qualities
-edit distances to the reference genome scaled by read length
"""
bam, chromosome = params
samfile = pysam.AlignmentFile(bam, "rb")
return [
(read.query_name,
nanomath.ave_qual(read.query_qualities),
nanomath.ave_qual(read.query_alignment_qualities),
read.query_length,
read.query_alignment_length,
read.mapping_quality,
get_pID(read))
for read in samfile.fetch(reference=chromosome, multiple_iterators=True)
if not read.is_secondary]
def get_pID(read):
"""Return the percent identity of a read.
based on the NM tag if present,
if not calculate from MD tag and CIGAR string
read.query_alignment_length can be zero in the case of ultra long reads aligned with minimap2 -L
"""
try:
return 100 * (1 - read.get_tag("NM") / read.query_alignment_length)
except KeyError:
try:
return 100 * (1 - (parse_MD(read.get_tag("MD")) + parse_CIGAR(read.cigartuples))
/ read.query_alignment_length)
except KeyError:
return None
except ZeroDivisionError:
return None
def parse_MD(MDlist):
"""Parse MD string to get number of mismatches and deletions."""
return sum([len(item) for item in re.split('[0-9^]', MDlist)])
def parse_CIGAR(cigartuples):
"""Count the insertions in the read using the CIGAR string."""
return sum([item[1] for item in cigartuples if item[0] == 1])
def handle_compressed_input(inputfq, file_type="fastq"):
"""Return handles from compressed files according to extension.
Check for which fastq input is presented and open a handle accordingly
Can read from compressed files (gz, bz2, bgz) or uncompressed
Relies on file extensions to recognize compression
"""
ut.check_existance(inputfq)
if inputfq.endswith(('.gz', 'bgz')):
import gzip
logging.info("Nanoget: Decompressing gzipped {} {}".format(file_type, inputfq))
return gzip.open(inputfq, 'rt')
elif inputfq.endswith('.bz2'):
import bz2
logging.info("Nanoget: Decompressing bz2 compressed {} {}".format(file_type, inputfq))
return bz2.open(inputfq, 'rt')
elif inputfq.endswith(('.fastq', '.fq', 'fasta', '.fa', '.fas')):
return open(inputfq, 'r')
else:
logging.error("INPUT ERROR: Unrecognized file extension {}".format(inputfq))
sys.exit('INPUT ERROR:\nUnrecognized file extension in {}\n'
'Supported are gz, bz2, bgz, fastq, fq, fasta, fa and fas'.format(inputfq))
def process_fasta(fasta, **kwargs):
"""Combine metrics extracted from a fasta file."""
logging.info("Nanoget: Starting to collect statistics from a fasta file.")
inputfasta = handle_compressed_input(fasta, file_type="fasta")
return ut.reduce_memory_usage(pd.DataFrame(
data=[len(rec) for rec in SeqIO.parse(inputfasta, "fasta")],
columns=["lengths"]
).dropna())
def process_fastq_plain(fastq, **kwargs):
"""Combine metrics extracted from a fastq file."""
logging.info("Nanoget: Starting to collect statistics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
return ut.reduce_memory_usage(pd.DataFrame(
data=[res for res in extract_from_fastq(inputfastq) if res],
columns=["quals", "lengths"]
).dropna())
def extract_from_fastq(fq):
"""Extract metrics from a fastq file.
Return average quality and read length
"""
for rec in SeqIO.parse(fq, "fastq"):
yield nanomath.ave_qual(rec.letter_annotations["phred_quality"]), len(rec)
def stream_fastq_full(fastq, threads):
"""Generator for returning metrics extracted from fastq.
Extract from a fastq file:
-readname
-average and median quality
-read_lenght
"""
logging.info("Nanoget: Starting to collect full metrics from plain fastq file.")
inputfastq = handle_compressed_input(fastq)
with cfutures.ProcessPoolExecutor(max_workers=threads) as executor:
for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")):
yield results
logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
def extract_all_from_fastq(rec):
"""Extract metrics from a fastq file.
Return identifier, read length, average quality and median quality
"""
return (rec.id,
len(rec),
nanomath.ave_qual(rec.letter_annotations["phred_quality"]),
nanomath.median_qual(rec.letter_annotations["phred_quality"]))
def info_to_dict(info):
"""Get the key-value pairs from the albacore/minknow fastq description and return dict"""
return {field.split('=')[0]: field.split('=')[1] for field in info.split(' ')[1:]}
def process_fastq_rich(fastq, **kwargs):
"""Extract metrics from a richer fastq file.
Extract information from fastq files generated by albacore or MinKNOW,
containing richer information in the header (key-value pairs)
read=<int> [72]
ch=<int> [159]
start_time=<timestamp> [2016-07-15T14:23:22Z] # UTC ISO 8601 ISO 3339 timestamp
Z indicates UTC time, T is the delimiter between date expression and time expression
dateutil.parser.parse("2016-07-15T14:23:22Z") imported as dparse
-> datetime.datetime(2016, 7, 15, 14, 23, 22, tzinfo=tzutc())
"""
logging.info("Nanoget: Starting to collect statistics from rich fastq file.")
inputfastq = handle_compressed_input(fastq)
res = []
for record in SeqIO.parse(inputfastq, "fastq"):
try:
read_info = info_to_dict(record.description)
res.append(
(nanomath.ave_qual(record.letter_annotations["phred_quality"]),
len(record),
read_info["ch"],
read_info["start_time"],
read_info["runid"]))
except KeyError:
logging.error("Nanoget: keyerror when processing record {}".format(record.description))
sys.exit("Unexpected fastq identifier:\n{}\n\n \
missing one or more of expected fields 'ch', 'start_time' or 'runid'".format(
record.description))
df = pd.DataFrame(
data=res,
columns=["quals", "lengths", "channelIDs", "timestamp", "runIDs"]).dropna()
df["channelIDs"] = df["channelIDs"].astype("int64")
return ut.reduce_memory_usage(df)
def readfq(fp):
"""Generator function adapted from https://github.com/lh3/readfq."""
last = None # this is a buffer keeping the last unprocessed line
while True: # mimic closure; is it a bad idea?
if not last: # the first record or a record following a fastq
for l in fp: # search for the start of the next record
if l[0] in '>@': # fasta/q header line
last = l[:-1] # save this line
break
if not last:
break
name, seqs, last = last[1:].partition(" ")[0], [], None
for l in fp: # read the sequence
if l[0] in '@+>':
last = l[:-1]
break
seqs.append(l[:-1])
if not last or last[0] != '+': # this is a fasta record
yield name, ''.join(seqs), None # yield a fasta record
if not last:
break
else: # this is a fastq record
seq, leng, seqs = ''.join(seqs), 0, []
for l in fp: # read the quality
seqs.append(l[:-1])
leng += len(l) - 1
if leng >= len(seq): # have read enough quality
last = None
yield name, seq, ''.join(seqs) # yield a fastq record
break
if last: # reach EOF before reading enough quality
yield name, seq, None # yield a fasta record instead
break
def fq_minimal(fq):
"""Minimal fastq metrics extractor.
Quickly parse a fasta/fastq file - but makes expectations on the file format
There will be dragons if unexpected format is used
Expects a fastq_rich format, but extracts only timestamp and length
"""
try:
while True:
time = next(fq)[1:].split(" ")[4][11:-1]
length = len(next(fq))
next(fq)
next(fq)
yield time, length
except StopIteration:
yield None
|
wdecoster/nanoget | nanoget/nanoget.py | get_input | python | def get_input(source, files, threads=4, readtype="1D",
combine="simple", names=None, barcoded=False):
proc_functions = {
'fastq': ex.process_fastq_plain,
'fasta': ex.process_fasta,
'bam': ex.process_bam,
'summary': ex.process_summary,
'fastq_rich': ex.process_fastq_rich,
'fastq_minimal': ex.process_fastq_minimal,
'cram': ex.process_cram,
'ubam': ex.process_ubam, }
filethreads = min(len(files), threads)
threadsleft = threads - filethreads
with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor:
extration_function = partial(proc_functions[source],
threads=threadsleft,
readtype=readtype,
barcoded=barcoded)
datadf = combine_dfs(
dfs=[out for out in executor.map(extration_function, files)],
names=names or files,
method=combine)
if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any():
datadf.drop("readIDs", axis='columns', inplace=True)
datadf = calculate_start_time(datadf)
logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf)))
if len(datadf) == 0:
logging.critical("Nanoget: no reads retrieved.".format(len(datadf)))
sys.exit("Fatal: No reads found in input.")
else:
return datadf | Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/nanoget.py#L29-L82 | [
"def combine_dfs(dfs, names, method):\n \"\"\"Combine dataframes.\n\n Combination is either done simple by just concatenating the DataFrames\n or performs tracking by adding the name of the dataset as a column.\"\"\"\n if method == \"track\":\n res = list()\n for df, identifier in zip(dfs, names):\n df[\"dataset\"] = identifier\n res.append(df)\n return pd.concat(res, ignore_index=True)\n elif method == \"simple\":\n return pd.concat(dfs, ignore_index=True)\n",
"def calculate_start_time(df):\n \"\"\"Calculate the star_time per read.\n\n Time data is either\n a \"time\" (in seconds, derived from summary files) or\n a \"timestamp\" (in UTC, derived from fastq_rich format)\n and has to be converted appropriately in a datetime format time_arr\n\n For both the time_zero is the minimal value of the time_arr,\n which is then used to subtract from all other times\n\n In the case of method=track (and dataset is a column in the df) then this\n subtraction is done per dataset\n \"\"\"\n if \"time\" in df:\n df[\"time_arr\"] = pd.Series(df[\"time\"], dtype='datetime64[s]')\n elif \"timestamp\" in df:\n df[\"time_arr\"] = pd.Series(df[\"timestamp\"], dtype=\"datetime64[ns]\")\n else:\n return df\n if \"dataset\" in df:\n for dset in df[\"dataset\"].unique():\n time_zero = df.loc[df[\"dataset\"] == dset, \"time_arr\"].min()\n df.loc[df[\"dataset\"] == dset, \"start_time\"] = \\\n df.loc[df[\"dataset\"] == dset, \"time_arr\"] - time_zero\n else:\n df[\"start_time\"] = df[\"time_arr\"] - df[\"time_arr\"].min()\n return df.drop([\"time\", \"timestamp\", \"time_arr\"], axis=1, errors=\"ignore\")\n"
] | """
This module provides functions to extract useful metrics
from Oxford Nanopore sequencing reads and alignments.
Data can be presented in the following formats, using the following functions:
- A sorted bam file
process_bam(bamfile, threads)
- A standard fastq file
process_fastq_plain(fastqfile, 'threads')
- A fastq file with metadata from MinKNOW or Albacore
process_fastq_rich(fastqfile)
- A sequencing_summary file generated by Albacore
process_summary(sequencing_summary.txt, 'readtype')
Fastq files can be compressed using gzip, bzip2 or bgzip.
The data is returned as a pandas DataFrame with standardized headernames for convenient extraction.
The functions perform logging while being called and extracting data.
"""
import sys
import logging
import pandas as pd
from functools import partial
import concurrent.futures as cfutures
import nanoget.extraction_functions as ex
def combine_dfs(dfs, names, method):
"""Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column."""
if method == "track":
res = list()
for df, identifier in zip(dfs, names):
df["dataset"] = identifier
res.append(df)
return pd.concat(res, ignore_index=True)
elif method == "simple":
return pd.concat(dfs, ignore_index=True)
def calculate_start_time(df):
"""Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset
"""
if "time" in df:
df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]')
elif "timestamp" in df:
df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]")
else:
return df
if "dataset" in df:
for dset in df["dataset"].unique():
time_zero = df.loc[df["dataset"] == dset, "time_arr"].min()
df.loc[df["dataset"] == dset, "start_time"] = \
df.loc[df["dataset"] == dset, "time_arr"] - time_zero
else:
df["start_time"] = df["time_arr"] - df["time_arr"].min()
return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore")
|
wdecoster/nanoget | nanoget/nanoget.py | combine_dfs | python | def combine_dfs(dfs, names, method):
if method == "track":
res = list()
for df, identifier in zip(dfs, names):
df["dataset"] = identifier
res.append(df)
return pd.concat(res, ignore_index=True)
elif method == "simple":
return pd.concat(dfs, ignore_index=True) | Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column. | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/nanoget.py#L85-L97 | null | """
This module provides functions to extract useful metrics
from Oxford Nanopore sequencing reads and alignments.
Data can be presented in the following formats, using the following functions:
- A sorted bam file
process_bam(bamfile, threads)
- A standard fastq file
process_fastq_plain(fastqfile, 'threads')
- A fastq file with metadata from MinKNOW or Albacore
process_fastq_rich(fastqfile)
- A sequencing_summary file generated by Albacore
process_summary(sequencing_summary.txt, 'readtype')
Fastq files can be compressed using gzip, bzip2 or bgzip.
The data is returned as a pandas DataFrame with standardized headernames for convenient extraction.
The functions perform logging while being called and extracting data.
"""
import sys
import logging
import pandas as pd
from functools import partial
import concurrent.futures as cfutures
import nanoget.extraction_functions as ex
def get_input(source, files, threads=4, readtype="1D",
combine="simple", names=None, barcoded=False):
"""Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
"""
proc_functions = {
'fastq': ex.process_fastq_plain,
'fasta': ex.process_fasta,
'bam': ex.process_bam,
'summary': ex.process_summary,
'fastq_rich': ex.process_fastq_rich,
'fastq_minimal': ex.process_fastq_minimal,
'cram': ex.process_cram,
'ubam': ex.process_ubam, }
filethreads = min(len(files), threads)
threadsleft = threads - filethreads
with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor:
extration_function = partial(proc_functions[source],
threads=threadsleft,
readtype=readtype,
barcoded=barcoded)
datadf = combine_dfs(
dfs=[out for out in executor.map(extration_function, files)],
names=names or files,
method=combine)
if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any():
datadf.drop("readIDs", axis='columns', inplace=True)
datadf = calculate_start_time(datadf)
logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf)))
if len(datadf) == 0:
logging.critical("Nanoget: no reads retrieved.".format(len(datadf)))
sys.exit("Fatal: No reads found in input.")
else:
return datadf
def calculate_start_time(df):
"""Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset
"""
if "time" in df:
df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]')
elif "timestamp" in df:
df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]")
else:
return df
if "dataset" in df:
for dset in df["dataset"].unique():
time_zero = df.loc[df["dataset"] == dset, "time_arr"].min()
df.loc[df["dataset"] == dset, "start_time"] = \
df.loc[df["dataset"] == dset, "time_arr"] - time_zero
else:
df["start_time"] = df["time_arr"] - df["time_arr"].min()
return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore")
|
wdecoster/nanoget | nanoget/nanoget.py | calculate_start_time | python | def calculate_start_time(df):
if "time" in df:
df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]')
elif "timestamp" in df:
df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]")
else:
return df
if "dataset" in df:
for dset in df["dataset"].unique():
time_zero = df.loc[df["dataset"] == dset, "time_arr"].min()
df.loc[df["dataset"] == dset, "start_time"] = \
df.loc[df["dataset"] == dset, "time_arr"] - time_zero
else:
df["start_time"] = df["time_arr"] - df["time_arr"].min()
return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore") | Calculate the star_time per read.
Time data is either
a "time" (in seconds, derived from summary files) or
a "timestamp" (in UTC, derived from fastq_rich format)
and has to be converted appropriately in a datetime format time_arr
For both the time_zero is the minimal value of the time_arr,
which is then used to subtract from all other times
In the case of method=track (and dataset is a column in the df) then this
subtraction is done per dataset | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/nanoget.py#L100-L127 | null | """
This module provides functions to extract useful metrics
from Oxford Nanopore sequencing reads and alignments.
Data can be presented in the following formats, using the following functions:
- A sorted bam file
process_bam(bamfile, threads)
- A standard fastq file
process_fastq_plain(fastqfile, 'threads')
- A fastq file with metadata from MinKNOW or Albacore
process_fastq_rich(fastqfile)
- A sequencing_summary file generated by Albacore
process_summary(sequencing_summary.txt, 'readtype')
Fastq files can be compressed using gzip, bzip2 or bgzip.
The data is returned as a pandas DataFrame with standardized headernames for convenient extraction.
The functions perform logging while being called and extracting data.
"""
import sys
import logging
import pandas as pd
from functools import partial
import concurrent.futures as cfutures
import nanoget.extraction_functions as ex
def get_input(source, files, threads=4, readtype="1D",
combine="simple", names=None, barcoded=False):
"""Get input and process accordingly.
Data can be:
- a uncompressed, bgzip, bzip2 or gzip compressed fastq file
- a uncompressed, bgzip, bzip2 or gzip compressed fasta file
- a rich fastq containing additional key=value information in the description,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a (compressed) sequencing_summary.txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from, which is done in parallel
Arguments:
- source: defines the input data type and the function that needs to be called
- files: is a list of one or more files to operate on, from the type of <source>
- threads: is the amount of workers which can be used
- readtype: (only relevant for summary input) and specifies which columns have to be extracted
- combine: is either 'simple' or 'track', with the difference that with 'track' an additional
field is created with the name of the dataset
- names: if combine="track", the names to be used for the datasets. Needs to have same length as
files, or None
"""
proc_functions = {
'fastq': ex.process_fastq_plain,
'fasta': ex.process_fasta,
'bam': ex.process_bam,
'summary': ex.process_summary,
'fastq_rich': ex.process_fastq_rich,
'fastq_minimal': ex.process_fastq_minimal,
'cram': ex.process_cram,
'ubam': ex.process_ubam, }
filethreads = min(len(files), threads)
threadsleft = threads - filethreads
with cfutures.ProcessPoolExecutor(max_workers=filethreads) as executor:
extration_function = partial(proc_functions[source],
threads=threadsleft,
readtype=readtype,
barcoded=barcoded)
datadf = combine_dfs(
dfs=[out for out in executor.map(extration_function, files)],
names=names or files,
method=combine)
if "readIDs" in datadf and pd.isna(datadf["readIDs"]).any():
datadf.drop("readIDs", axis='columns', inplace=True)
datadf = calculate_start_time(datadf)
logging.info("Nanoget: Gathered all metrics of {} reads".format(len(datadf)))
if len(datadf) == 0:
logging.critical("Nanoget: no reads retrieved.".format(len(datadf)))
sys.exit("Fatal: No reads found in input.")
else:
return datadf
def combine_dfs(dfs, names, method):
"""Combine dataframes.
Combination is either done simple by just concatenating the DataFrames
or performs tracking by adding the name of the dataset as a column."""
if method == "track":
res = list()
for df, identifier in zip(dfs, names):
df["dataset"] = identifier
res.append(df)
return pd.concat(res, ignore_index=True)
elif method == "simple":
return pd.concat(dfs, ignore_index=True)
|
wdecoster/nanoget | nanoget/utils.py | reduce_memory_usage | python | def reduce_memory_usage(df):
usage_pre = df.memory_usage(deep=True).sum()
if "runIDs" in df:
df.loc[:, "runIDs"] = df.loc[:, "runIDs"].astype("category")
df_int = df.select_dtypes(include=['int'])
df_float = df.select_dtypes(include=['float'])
df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')
df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')
usage_post = df.memory_usage(deep=True).sum()
logging.info("Reduced DataFrame memory usage from {}Mb to {}Mb".format(
usage_pre / 1024**2, usage_post / 1024**2))
if usage_post > 4e9 and "readIDs" in df:
logging.info("DataFrame of features is too big, dropping read identifiers.")
return df.drop(["readIDs"], axis=1, errors="ignore")
else:
return df | reduce memory usage of the dataframe
- convert runIDs to categorical
- downcast ints and floats | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/utils.py#L7-L27 | null | import sys
import logging
import pandas as pd
from os import path as opath
def check_existance(f):
"""Check if the file supplied as input exists."""
if not opath.isfile(f):
logging.error("Nanoget: File provided doesn't exist or the path is incorrect: {}".format(f))
sys.exit("File provided doesn't exist or the path is incorrect: {}".format(f))
|
wdecoster/nanoget | nanoget/utils.py | check_existance | python | def check_existance(f):
if not opath.isfile(f):
logging.error("Nanoget: File provided doesn't exist or the path is incorrect: {}".format(f))
sys.exit("File provided doesn't exist or the path is incorrect: {}".format(f)) | Check if the file supplied as input exists. | train | https://github.com/wdecoster/nanoget/blob/fb7306220e261849b96785fab02dd2f35a0e3b60/nanoget/utils.py#L30-L34 | null | import sys
import logging
import pandas as pd
from os import path as opath
def reduce_memory_usage(df):
"""reduce memory usage of the dataframe
- convert runIDs to categorical
- downcast ints and floats
"""
usage_pre = df.memory_usage(deep=True).sum()
if "runIDs" in df:
df.loc[:, "runIDs"] = df.loc[:, "runIDs"].astype("category")
df_int = df.select_dtypes(include=['int'])
df_float = df.select_dtypes(include=['float'])
df.loc[:, df_int.columns] = df_int.apply(pd.to_numeric, downcast='unsigned')
df.loc[:, df_float.columns] = df_float.apply(pd.to_numeric, downcast='float')
usage_post = df.memory_usage(deep=True).sum()
logging.info("Reduced DataFrame memory usage from {}Mb to {}Mb".format(
usage_pre / 1024**2, usage_post / 1024**2))
if usage_post > 4e9 and "readIDs" in df:
logging.info("DataFrame of features is too big, dropping read identifiers.")
return df.drop(["readIDs"], axis=1, errors="ignore")
else:
return df
|
camptocamp/marabunta | marabunta/runner.py | VersionRunner.perform | python | def perform(self):
db_versions = self.table.versions()
version = self.version
if (version.is_processed(db_versions) and
not self.config.force_version == self.version.number):
self.log(
u'version {} is already installed'.format(version.number)
)
return
self.start()
try:
self._perform_version(version)
except Exception:
if sys.version_info < (3, 4):
msg = traceback.format_exc().decode('utf8', errors='ignore')
else:
msg = traceback.format_exc()
error = u'\n'.join(self.logs + [u'\n', msg])
self.table.record_log(version.number, error)
raise
self.finish() | Perform the version upgrade on the database. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/runner.py#L154-L178 | [
"def log(self, message, decorated=True, stdout=True):\n self.logs.append(message)\n if not stdout:\n return\n if decorated:\n app_message = u'version {}: {}'.format(\n self.version.number,\n message,\n )\n print_decorated(app_message)\n else:\n safe_print(message)\n",
"def start(self):\n self.log(u'start')\n self.table.start_version(self.version.number, datetime.now())\n",
"def finish(self):\n self.log(u'done')\n module_table = IrModuleModule(self.database)\n addons_state = module_table.read_state()\n self.table.finish_version(self.version.number, datetime.now(),\n u'\\n'.join(self.logs),\n [state._asdict() for state in addons_state])\n",
"def _perform_version(self, version):\n \"\"\"Inner method for version upgrade.\n\n Not intended for standalone use. This method performs the actual\n version upgrade with all the pre, post operations and addons upgrades.\n\n :param version: The migration version to upgrade to\n :type version: Instance of Version class\n \"\"\"\n if version.is_noop():\n self.log(u'version {} is a noop'.format(version.number))\n else:\n self.log(u'execute base pre-operations')\n for operation in version.pre_operations():\n operation.execute(self.log)\n if self.config.mode:\n self.log(u'execute %s pre-operations' % self.config.mode)\n for operation in version.pre_operations(mode=self.config.mode):\n operation.execute(self.log)\n\n self.perform_addons()\n\n self.log(u'execute base post-operations')\n for operation in version.post_operations():\n operation.execute(self.log)\n if self.config.mode:\n self.log(u'execute %s post-operations' % self.config.mode)\n for operation in version.post_operations(self.config.mode):\n operation.execute(self.log)\n"
] | class VersionRunner(object):
def __init__(self, runner, version):
self.runner = runner
self.table = runner.table
self.migration = runner.migration
self.config = runner.config
self.database = runner.database
self.version = version
self.logs = []
def log(self, message, decorated=True, stdout=True):
self.logs.append(message)
if not stdout:
return
if decorated:
app_message = u'version {}: {}'.format(
self.version.number,
message,
)
print_decorated(app_message)
else:
safe_print(message)
def start(self):
self.log(u'start')
self.table.start_version(self.version.number, datetime.now())
def finish(self):
self.log(u'done')
module_table = IrModuleModule(self.database)
addons_state = module_table.read_state()
self.table.finish_version(self.version.number, datetime.now(),
u'\n'.join(self.logs),
[state._asdict() for state in addons_state])
def _perform_version(self, version):
"""Inner method for version upgrade.
Not intended for standalone use. This method performs the actual
version upgrade with all the pre, post operations and addons upgrades.
:param version: The migration version to upgrade to
:type version: Instance of Version class
"""
if version.is_noop():
self.log(u'version {} is a noop'.format(version.number))
else:
self.log(u'execute base pre-operations')
for operation in version.pre_operations():
operation.execute(self.log)
if self.config.mode:
self.log(u'execute %s pre-operations' % self.config.mode)
for operation in version.pre_operations(mode=self.config.mode):
operation.execute(self.log)
self.perform_addons()
self.log(u'execute base post-operations')
for operation in version.post_operations():
operation.execute(self.log)
if self.config.mode:
self.log(u'execute %s post-operations' % self.config.mode)
for operation in version.post_operations(self.config.mode):
operation.execute(self.log)
def perform_addons(self):
version = self.version
module_table = IrModuleModule(self.database)
addons_state = module_table.read_state()
upgrade_operation = version.upgrade_addons_operation(
addons_state,
mode=self.config.mode
)
# exclude the addons already installed or updated during this run
# when 'allow_serie' is active
exclude = self.runner.upgraded_addons
self.log(u'installation / upgrade of addons')
operation = upgrade_operation.operation(exclude_addons=exclude)
if operation:
operation.execute(self.log)
self.runner.upgraded_addons |= (upgrade_operation.to_install |
upgrade_operation.to_upgrade)
|
camptocamp/marabunta | marabunta/runner.py | VersionRunner._perform_version | python | def _perform_version(self, version):
if version.is_noop():
self.log(u'version {} is a noop'.format(version.number))
else:
self.log(u'execute base pre-operations')
for operation in version.pre_operations():
operation.execute(self.log)
if self.config.mode:
self.log(u'execute %s pre-operations' % self.config.mode)
for operation in version.pre_operations(mode=self.config.mode):
operation.execute(self.log)
self.perform_addons()
self.log(u'execute base post-operations')
for operation in version.post_operations():
operation.execute(self.log)
if self.config.mode:
self.log(u'execute %s post-operations' % self.config.mode)
for operation in version.post_operations(self.config.mode):
operation.execute(self.log) | Inner method for version upgrade.
Not intended for standalone use. This method performs the actual
version upgrade with all the pre, post operations and addons upgrades.
:param version: The migration version to upgrade to
:type version: Instance of Version class | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/runner.py#L180-L208 | [
"def log(self, message, decorated=True, stdout=True):\n self.logs.append(message)\n if not stdout:\n return\n if decorated:\n app_message = u'version {}: {}'.format(\n self.version.number,\n message,\n )\n print_decorated(app_message)\n else:\n safe_print(message)\n",
"def perform_addons(self):\n version = self.version\n\n module_table = IrModuleModule(self.database)\n addons_state = module_table.read_state()\n\n upgrade_operation = version.upgrade_addons_operation(\n addons_state,\n mode=self.config.mode\n )\n # exclude the addons already installed or updated during this run\n # when 'allow_serie' is active\n exclude = self.runner.upgraded_addons\n self.log(u'installation / upgrade of addons')\n operation = upgrade_operation.operation(exclude_addons=exclude)\n if operation:\n operation.execute(self.log)\n self.runner.upgraded_addons |= (upgrade_operation.to_install |\n upgrade_operation.to_upgrade)\n"
] | class VersionRunner(object):
def __init__(self, runner, version):
self.runner = runner
self.table = runner.table
self.migration = runner.migration
self.config = runner.config
self.database = runner.database
self.version = version
self.logs = []
def log(self, message, decorated=True, stdout=True):
self.logs.append(message)
if not stdout:
return
if decorated:
app_message = u'version {}: {}'.format(
self.version.number,
message,
)
print_decorated(app_message)
else:
safe_print(message)
def start(self):
self.log(u'start')
self.table.start_version(self.version.number, datetime.now())
def finish(self):
self.log(u'done')
module_table = IrModuleModule(self.database)
addons_state = module_table.read_state()
self.table.finish_version(self.version.number, datetime.now(),
u'\n'.join(self.logs),
[state._asdict() for state in addons_state])
def perform(self):
"""Perform the version upgrade on the database.
"""
db_versions = self.table.versions()
version = self.version
if (version.is_processed(db_versions) and
not self.config.force_version == self.version.number):
self.log(
u'version {} is already installed'.format(version.number)
)
return
self.start()
try:
self._perform_version(version)
except Exception:
if sys.version_info < (3, 4):
msg = traceback.format_exc().decode('utf8', errors='ignore')
else:
msg = traceback.format_exc()
error = u'\n'.join(self.logs + [u'\n', msg])
self.table.record_log(version.number, error)
raise
self.finish()
def perform_addons(self):
version = self.version
module_table = IrModuleModule(self.database)
addons_state = module_table.read_state()
upgrade_operation = version.upgrade_addons_operation(
addons_state,
mode=self.config.mode
)
# exclude the addons already installed or updated during this run
# when 'allow_serie' is active
exclude = self.runner.upgraded_addons
self.log(u'installation / upgrade of addons')
operation = upgrade_operation.operation(exclude_addons=exclude)
if operation:
operation.execute(self.log)
self.runner.upgraded_addons |= (upgrade_operation.to_install |
upgrade_operation.to_upgrade)
|
camptocamp/marabunta | marabunta/output.py | safe_print | python | def safe_print(ustring, errors='replace', **kwargs):
encoding = sys.stdout.encoding or 'utf-8'
if sys.version_info[0] == 3:
print(ustring, **kwargs)
else:
bytestr = ustring.encode(encoding, errors=errors)
print(bytestr, **kwargs) | Safely print a unicode string | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/output.py#L26-L33 | null | # -*- coding: utf-8 -*-
# Copyright 2016-2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
from __future__ import print_function
import sys
LOG_DECORATION = u'|> '
supports_colors = hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()
def print_decorated(message, *args, **kwargs):
if supports_colors:
template = u'\033[1m{}{}\033[0m'
else:
template = u'{}{}'
message = template.format(
LOG_DECORATION,
message,
)
safe_print(message, *args, **kwargs)
|
camptocamp/marabunta | marabunta/config.py | get_args_parser | python | def get_args_parser():
parser = argparse.ArgumentParser(
description='Marabunta: Migrating ants for Odoo')
parser.add_argument('--migration-file', '-f',
action=EnvDefault,
envvar='MARABUNTA_MIGRATION_FILE',
required=True,
help='The yaml file containing the migration steps')
parser.add_argument('--database', '-d',
action=EnvDefault,
envvar='MARABUNTA_DATABASE',
required=True,
help="Odoo's database")
parser.add_argument('--db-user', '-u',
action=EnvDefault,
envvar='MARABUNTA_DB_USER',
required=True,
help="Odoo's database user")
parser.add_argument('--db-password', '-w',
action=EnvDefault,
envvar='MARABUNTA_DB_PASSWORD',
required=True,
help="Odoo's database password")
parser.add_argument('--db-port', '-p',
default=os.environ.get('MARABUNTA_DB_PORT', 5432),
help="Odoo's database port")
parser.add_argument('--db-host', '-H',
default=os.environ.get('MARABUNTA_DB_HOST',
'localhost'),
help="Odoo's database host")
parser.add_argument('--mode',
action=EnvDefault,
envvar='MARABUNTA_MODE',
required=False,
help="Specify the mode in which we run the migration,"
"such as 'demo' or 'prod'. Additional operations "
"of this mode will be executed after the main "
"operations and the addons list of this mode "
"will be merged with the main addons list.")
parser.add_argument('--allow-serie',
action=BoolEnvDefault,
required=False,
envvar='MARABUNTA_ALLOW_SERIE',
help='Allow to run more than 1 version upgrade at a '
'time.')
parser.add_argument('--force-version',
required=False,
default=os.environ.get('MARABUNTA_FORCE_VERSION'),
help='Force upgrade of a version, even if it has '
'already been applied.')
group = parser.add_argument_group(
title='Web',
description='Configuration related to the internal web server, '
'used to publish a maintenance page during the migration.',
)
group.add_argument('--web-host',
required=False,
default=os.environ.get('MARABUNTA_WEB_HOST', '0.0.0.0'),
help='Host for the web server')
group.add_argument('--web-port',
required=False,
default=os.environ.get('MARABUNTA_WEB_PORT', 8069),
help='Port for the web server')
group.add_argument('--web-custom-html',
required=False,
default=os.environ.get(
'MARABUNTA_WEB_CUSTOM_HTML'
),
help='Path to a custom html file to publish')
return parser | Return a parser for command line options. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/config.py#L90-L161 | null | # -*- coding: utf-8 -*-
# Copyright 2016-2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
from distutils.util import strtobool
import argparse
import os
class Config(object):
def __init__(self,
migration_file,
database,
db_user=None,
db_password=None,
db_port=5432,
db_host='localhost',
mode=None,
allow_serie=False,
force_version=None,
web_host='localhost',
web_port=8069,
web_custom_html=None):
self.migration_file = migration_file
self.database = database
self.db_user = db_user
self.db_password = db_password
self.db_port = db_port
self.db_host = db_host
self.mode = mode
self.allow_serie = allow_serie
self.force_version = force_version
if force_version and not allow_serie:
self.allow_serie = True
self.web_host = web_host
self.web_port = web_port
self.web_custom_html = web_custom_html
@classmethod
def from_parse_args(cls, args):
"""Constructor from command line args.
:param args: parse command line arguments
:type args: argparse.ArgumentParser
"""
return cls(args.migration_file,
args.database,
db_user=args.db_user,
db_password=args.db_password,
db_port=args.db_port,
db_host=args.db_host,
mode=args.mode,
allow_serie=args.allow_serie,
force_version=args.force_version,
web_host=args.web_host,
web_port=args.web_port,
web_custom_html=args.web_custom_html,
)
class EnvDefault(argparse.Action):
def __init__(self, envvar, required=True, default=None, **kwargs):
if not default and envvar:
default = self.get_default(envvar)
if required and default is not None:
required = False
super(EnvDefault, self).__init__(default=default, required=required,
**kwargs)
def get_default(self, envvar):
return os.getenv(envvar)
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
class BoolEnvDefault(EnvDefault):
def get_default(self, envvar):
val = os.getenv(envvar, '')
try:
return strtobool(val.lower())
except ValueError:
return False
|
camptocamp/marabunta | marabunta/config.py | Config.from_parse_args | python | def from_parse_args(cls, args):
return cls(args.migration_file,
args.database,
db_user=args.db_user,
db_password=args.db_password,
db_port=args.db_port,
db_host=args.db_host,
mode=args.mode,
allow_serie=args.allow_serie,
force_version=args.force_version,
web_host=args.web_host,
web_port=args.web_port,
web_custom_html=args.web_custom_html,
) | Constructor from command line args.
:param args: parse command line arguments
:type args: argparse.ArgumentParser | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/config.py#L40-L60 | null | class Config(object):
def __init__(self,
migration_file,
database,
db_user=None,
db_password=None,
db_port=5432,
db_host='localhost',
mode=None,
allow_serie=False,
force_version=None,
web_host='localhost',
web_port=8069,
web_custom_html=None):
self.migration_file = migration_file
self.database = database
self.db_user = db_user
self.db_password = db_password
self.db_port = db_port
self.db_host = db_host
self.mode = mode
self.allow_serie = allow_serie
self.force_version = force_version
if force_version and not allow_serie:
self.allow_serie = True
self.web_host = web_host
self.web_port = web_port
self.web_custom_html = web_custom_html
@classmethod
|
camptocamp/marabunta | marabunta/model.py | Version.is_processed | python | def is_processed(self, db_versions):
return self.number in (v.number for v in db_versions if v.date_done) | Check if version is already applied in the database.
:param db_versions: | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L123-L128 | null | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/model.py | Version.is_noop | python | def is_noop(self):
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop | Check if version is a no operation version. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L130-L138 | null | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/model.py | Version._get_version_mode | python | def _get_version_mode(self, mode=None):
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode | Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L145-L153 | null | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/model.py | Version.add_operation | python | def add_operation(self, operation_type, operation, mode=None):
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
) | Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation` | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L155-L174 | [
"def _get_version_mode(self, mode=None):\n \"\"\"Return a VersionMode for a mode name.\n\n When the mode is None, we are working with the 'base' mode.\n \"\"\"\n version_mode = self._version_modes.get(mode)\n if not version_mode:\n version_mode = self._version_modes[mode] = VersionMode(name=mode)\n return version_mode\n",
"def add_pre(self, operation):\n self.pre_operations.append(operation)\n",
"def add_post(self, operation):\n self.post_operations.append(operation)\n"
] | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/model.py | Version.add_backup_operation | python | def add_backup_operation(self, backup, mode=None):
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup | Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L176-L189 | null | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/model.py | Version.pre_operations | python | def pre_operations(self, mode=None):
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations | Return pre-operations only for the mode asked | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L204-L207 | [
"def _get_version_mode(self, mode=None):\n \"\"\"Return a VersionMode for a mode name.\n\n When the mode is None, we are working with the 'base' mode.\n \"\"\"\n version_mode = self._version_modes.get(mode)\n if not version_mode:\n version_mode = self._version_modes[mode] = VersionMode(name=mode)\n return version_mode\n"
] | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/model.py | Version.post_operations | python | def post_operations(self, mode=None):
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations | Return post-operations only for the mode asked | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L209-L212 | [
"def _get_version_mode(self, mode=None):\n \"\"\"Return a VersionMode for a mode name.\n\n When the mode is None, we are working with the 'base' mode.\n \"\"\"\n version_mode = self._version_modes.get(mode)\n if not version_mode:\n version_mode = self._version_modes[mode] = VersionMode(name=mode)\n return version_mode\n"
] | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
def upgrade_addons_operation(self, addons_state, mode=None):
""" Return merged set of main addons and mode's addons """
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade)
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/model.py | Version.upgrade_addons_operation | python | def upgrade_addons_operation(self, addons_state, mode=None):
installed = set(a.name for a in addons_state
if a.state in ('installed', 'to upgrade'))
base_mode = self._get_version_mode()
addons_list = base_mode.upgrade_addons.copy()
if mode:
add_mode = self._get_version_mode(mode=mode)
addons_list |= add_mode.upgrade_addons
to_install = addons_list - installed
to_upgrade = installed & addons_list
return UpgradeAddonsOperation(self.options, to_install, to_upgrade) | Return merged set of main addons and mode's addons | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/model.py#L214-L228 | [
"def _get_version_mode(self, mode=None):\n \"\"\"Return a VersionMode for a mode name.\n\n When the mode is None, we are working with the 'base' mode.\n \"\"\"\n version_mode = self._version_modes.get(mode)\n if not version_mode:\n version_mode = self._version_modes[mode] = VersionMode(name=mode)\n return version_mode\n"
] | class Version(object):
def __init__(self, number, options):
"""Base class for a migration version.
:param number: Valid version number
:type number: String
:param options: Version options
:type options: Instance of a MigrationOption class
"""
try:
MarabuntaVersion().parse(number)
except ValueError:
raise ConfigurationError(
u'{} is not a valid version'.format(number)
)
self.number = number
self._version_modes = {}
self.options = options
self.backup = False
def is_processed(self, db_versions):
"""Check if version is already applied in the database.
:param db_versions:
"""
return self.number in (v.number for v in db_versions if v.date_done)
def is_noop(self):
"""Check if version is a no operation version.
"""
has_operations = [mode.pre_operations or mode.post_operations
for mode in self._version_modes.values()]
has_upgrade_addons = [mode.upgrade_addons or mode.remove_addons
for mode in self._version_modes.values()]
noop = not any((has_upgrade_addons, has_operations))
return noop
def skip(self, db_versions):
"""Version is either noop, or it has been processed already.
"""
return self.is_noop() or self.is_processed(db_versions)
def _get_version_mode(self, mode=None):
"""Return a VersionMode for a mode name.
When the mode is None, we are working with the 'base' mode.
"""
version_mode = self._version_modes.get(mode)
if not version_mode:
version_mode = self._version_modes[mode] = VersionMode(name=mode)
return version_mode
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
)
def add_backup_operation(self, backup, mode=None):
"""Add a backup operation to the version.
:param backup: To either add or skip the backup
:type backup: Boolean
:param mode: Name of the mode in which the operation is executed
For now, backups are mode-independent
:type mode: String
"""
try:
if self.options.backup:
self.options.backup.ignore_if_operation().execute()
except OperationError:
self.backup = backup
def add_upgrade_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_upgrade_addons(addons)
def add_remove_addons(self, addons, mode=None):
version_mode = self._get_version_mode(mode=mode)
version_mode.add_remove_addons(addons)
raise ConfigurationError(
u'Removing addons is not yet supported because it cannot be done '
u'using the command line. You have to uninstall addons using '
u'an Odoo (\'import openerp\') script'
)
def pre_operations(self, mode=None):
""" Return pre-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.pre_operations
def post_operations(self, mode=None):
""" Return post-operations only for the mode asked """
version_mode = self._get_version_mode(mode=mode)
return version_mode.post_operations
def remove_addons_operation(self):
raise NotImplementedError
def __repr__(self):
return u'Version<{}>'.format(self.number)
|
camptocamp/marabunta | marabunta/core.py | migrate | python | def migrate(config):
webapp = WebApp(config.web_host, config.web_port,
custom_maintenance_file=config.web_custom_html)
webserver = WebServer(webapp)
webserver.daemon = True
webserver.start()
migration_parser = YamlParser.parse_from_file(config.migration_file)
migration = migration_parser.parse()
database = Database(config)
with database.connect() as lock_connection:
application_lock = ApplicationLock(lock_connection)
application_lock.start()
while not application_lock.acquired:
time.sleep(0.5)
else:
if application_lock.replica:
# when a replica could finally acquire a lock, it
# means that the concurrent process has finished the
# migration or that it failed to run it.
# In both cases after the lock is released, this process will
# verify if it has still to do something (if the other process
# failed mainly).
application_lock.stop = True
application_lock.join()
# we are not in the replica or the lock is released: go on for the
# migration
try:
table = MigrationTable(database)
runner = Runner(config, migration, database, table)
runner.perform()
finally:
application_lock.stop = True
application_lock.join() | Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/core.py#L103-L146 | [
"def parse_from_file(cls, filename):\n \"\"\"Construct YamlParser from a filename.\"\"\"\n with open(filename, 'r') as fh:\n return cls.parser_from_buffer(fh)\n",
"def perform(self):\n self.table.create_if_not_exists()\n\n db_versions = self.table.versions()\n\n if not self.config.force_version:\n unfinished = [db_version for db_version\n in db_versions\n if not db_version.date_done]\n if unfinished:\n raise MigrationError(\n u'Upgrade of version {} has been attempted and failed. '\n u'You may want to restore the backup or to run again the '\n u'migration with the MARABUNTA_FORCE_VERSION '\n u'environment variable '\n u'or to fix it manually (in that case, you will have to '\n u'update the \\'marabunta_version\\' table yourself.'\n .format(u','.join(v.number for v in unfinished))\n )\n\n unprocessed = [version for version in self.migration.versions\n if not version.skip(db_versions)]\n\n if not self.config.allow_serie:\n if len(unprocessed) > 1:\n raise MigrationError(\n u'Only one version can be upgraded at a time.\\n'\n u'The following versions need to be applied: {}.\\n'.format(\n [v.number for v in unprocessed]\n )\n )\n\n if not self.config.force_version and db_versions and unprocessed:\n installed = max(MarabuntaVersion(v.number) for v in db_versions)\n next_unprocess = min(\n MarabuntaVersion(v.number) for v in unprocessed\n )\n if installed > next_unprocess:\n raise MigrationError(\n u'The version you are trying to install ({}) is below '\n u'the current database version.'.format(\n next_unprocess, installed\n )\n )\n\n backup_options = self.migration.options.backup\n run_backup = (\n backup_options and (\n # If we are forcing a version, we want a backup\n self.config.force_version\n # If any of the version not yet processed, including the noop\n # versions, need a backup, we run it. (note: by default,\n # noop versions don't trigger a backup but it can be\n # explicitly activated)\n or any(version.backup for version in self.migration.versions\n if not version.is_processed(db_versions))\n )\n )\n if run_backup:\n backup_operation = backup_options.command_operation(self.config)\n backup_operation.execute(self.log)\n\n for version in self.migration.versions:\n # when we force-execute one version, we skip all the others\n if self.config.force_version:\n if self.config.force_version != version.number:\n continue\n else:\n self.log(\n u'force-execute version {}'.format(version.number)\n )\n\n self.log(u'processing version {}'.format(version.number))\n VersionRunner(self, version).perform()\n"
] | # -*- coding: utf-8 -*-
# Copyright 2016-2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
"""
Marabunta is a name given to the migration of the legionary ants or to the ants
themselves. Restless, they eat and digest everything in their way.
This tool aims to run migrations for Odoo versions as efficiencly as a
Marabunta migration.
It loads migration instructions from a YAML file and run the operations if
required.
"""
from __future__ import print_function
import logging
import time
import threading
from .config import Config, get_args_parser
from .database import Database, MigrationTable
from .output import safe_print
from .parser import YamlParser
from .runner import Runner
from .web import WebApp
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
logging.getLogger('werkzeug').setLevel(logging.ERROR)
# The number below has been generated as below:
# pg_lock accepts an int8 so we build an hash composed with
# contextual information and we throw away some bits
# lock_name = 'marabunta'
# hasher = hashlib.sha1()
# hasher.update('{}'.format(lock_name))
# lock_ident = struct.unpack('q', hasher.digest()[:8])
# we just need an integer
ADVISORY_LOCK_IDENT = 7141416871301361999
def pg_advisory_lock(cursor, lock_ident):
cursor.execute('SELECT pg_try_advisory_xact_lock(%s);', (lock_ident,))
acquired = cursor.fetchone()[0]
return acquired
class ApplicationLock(threading.Thread):
def __init__(self, connection):
self.acquired = False
self.connection = connection
self.replica = False
self.stop = False
super(ApplicationLock, self).__init__()
def run(self):
with self.connection.cursor() as cursor:
# If the migration is run concurrently (in several
# containers, hosts, ...), only 1 is allowed to proceed
# with the migration. It will be the first one to win
# the advisory lock. The others will be flagged as 'replica'.
while not pg_advisory_lock(cursor, ADVISORY_LOCK_IDENT):
if not self.replica: # print only the first time
safe_print('A concurrent process is already '
'running the migration')
self.replica = True
time.sleep(0.5)
else:
self.acquired = True
idx = 0
while not self.stop:
# keep the connection alive to maintain the advisory
# lock by running a query every 30 seconds
if idx == 60:
cursor.execute("SELECT 1")
idx = 0
idx += 1
# keep the sleep small to be able to exit quickly
# when 'stop' is set to True
time.sleep(0.5)
class WebServer(threading.Thread):
def __init__(self, app):
super(WebServer, self).__init__()
self.app = app
def run(self):
self.app.serve()
def main():
"""Parse the command line and run :func:`migrate`."""
parser = get_args_parser()
args = parser.parse_args()
config = Config.from_parse_args(args)
migrate(config)
if __name__ == '__main__':
main()
|
camptocamp/marabunta | marabunta/core.py | main | python | def main():
parser = get_args_parser()
args = parser.parse_args()
config = Config.from_parse_args(args)
migrate(config) | Parse the command line and run :func:`migrate`. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/core.py#L149-L154 | [
"def get_args_parser():\n \"\"\"Return a parser for command line options.\"\"\"\n parser = argparse.ArgumentParser(\n description='Marabunta: Migrating ants for Odoo')\n parser.add_argument('--migration-file', '-f',\n action=EnvDefault,\n envvar='MARABUNTA_MIGRATION_FILE',\n required=True,\n help='The yaml file containing the migration steps')\n parser.add_argument('--database', '-d',\n action=EnvDefault,\n envvar='MARABUNTA_DATABASE',\n required=True,\n help=\"Odoo's database\")\n parser.add_argument('--db-user', '-u',\n action=EnvDefault,\n envvar='MARABUNTA_DB_USER',\n required=True,\n help=\"Odoo's database user\")\n parser.add_argument('--db-password', '-w',\n action=EnvDefault,\n envvar='MARABUNTA_DB_PASSWORD',\n required=True,\n help=\"Odoo's database password\")\n parser.add_argument('--db-port', '-p',\n default=os.environ.get('MARABUNTA_DB_PORT', 5432),\n help=\"Odoo's database port\")\n parser.add_argument('--db-host', '-H',\n default=os.environ.get('MARABUNTA_DB_HOST',\n 'localhost'),\n help=\"Odoo's database host\")\n parser.add_argument('--mode',\n action=EnvDefault,\n envvar='MARABUNTA_MODE',\n required=False,\n help=\"Specify the mode in which we run the migration,\"\n \"such as 'demo' or 'prod'. Additional operations \"\n \"of this mode will be executed after the main \"\n \"operations and the addons list of this mode \"\n \"will be merged with the main addons list.\")\n parser.add_argument('--allow-serie',\n action=BoolEnvDefault,\n required=False,\n envvar='MARABUNTA_ALLOW_SERIE',\n help='Allow to run more than 1 version upgrade at a '\n 'time.')\n parser.add_argument('--force-version',\n required=False,\n default=os.environ.get('MARABUNTA_FORCE_VERSION'),\n help='Force upgrade of a version, even if it has '\n 'already been applied.')\n\n group = parser.add_argument_group(\n title='Web',\n description='Configuration related to the internal web server, '\n 'used to publish a maintenance page during the migration.',\n )\n group.add_argument('--web-host',\n required=False,\n default=os.environ.get('MARABUNTA_WEB_HOST', '0.0.0.0'),\n help='Host for the web server')\n group.add_argument('--web-port',\n required=False,\n default=os.environ.get('MARABUNTA_WEB_PORT', 8069),\n help='Port for the web server')\n group.add_argument('--web-custom-html',\n required=False,\n default=os.environ.get(\n 'MARABUNTA_WEB_CUSTOM_HTML'\n ),\n help='Path to a custom html file to publish')\n return parser\n",
"def migrate(config):\n \"\"\"Perform a migration according to config.\n\n :param config: The configuration to be applied\n :type config: Config\n \"\"\"\n webapp = WebApp(config.web_host, config.web_port,\n custom_maintenance_file=config.web_custom_html)\n\n webserver = WebServer(webapp)\n webserver.daemon = True\n webserver.start()\n\n migration_parser = YamlParser.parse_from_file(config.migration_file)\n migration = migration_parser.parse()\n\n database = Database(config)\n\n with database.connect() as lock_connection:\n application_lock = ApplicationLock(lock_connection)\n application_lock.start()\n\n while not application_lock.acquired:\n time.sleep(0.5)\n else:\n if application_lock.replica:\n # when a replica could finally acquire a lock, it\n # means that the concurrent process has finished the\n # migration or that it failed to run it.\n # In both cases after the lock is released, this process will\n # verify if it has still to do something (if the other process\n # failed mainly).\n application_lock.stop = True\n application_lock.join()\n # we are not in the replica or the lock is released: go on for the\n # migration\n\n try:\n table = MigrationTable(database)\n runner = Runner(config, migration, database, table)\n runner.perform()\n finally:\n application_lock.stop = True\n application_lock.join()\n",
"def from_parse_args(cls, args):\n \"\"\"Constructor from command line args.\n\n :param args: parse command line arguments\n :type args: argparse.ArgumentParser\n\n \"\"\"\n\n return cls(args.migration_file,\n args.database,\n db_user=args.db_user,\n db_password=args.db_password,\n db_port=args.db_port,\n db_host=args.db_host,\n mode=args.mode,\n allow_serie=args.allow_serie,\n force_version=args.force_version,\n web_host=args.web_host,\n web_port=args.web_port,\n web_custom_html=args.web_custom_html,\n )\n"
] | # -*- coding: utf-8 -*-
# Copyright 2016-2017 Camptocamp SA
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html)
"""
Marabunta is a name given to the migration of the legionary ants or to the ants
themselves. Restless, they eat and digest everything in their way.
This tool aims to run migrations for Odoo versions as efficiencly as a
Marabunta migration.
It loads migration instructions from a YAML file and run the operations if
required.
"""
from __future__ import print_function
import logging
import time
import threading
from .config import Config, get_args_parser
from .database import Database, MigrationTable
from .output import safe_print
from .parser import YamlParser
from .runner import Runner
from .web import WebApp
from pkg_resources import get_distribution, DistributionNotFound
try:
__version__ = get_distribution(__name__).version
except DistributionNotFound:
# package is not installed
pass
logging.getLogger('werkzeug').setLevel(logging.ERROR)
# The number below has been generated as below:
# pg_lock accepts an int8 so we build an hash composed with
# contextual information and we throw away some bits
# lock_name = 'marabunta'
# hasher = hashlib.sha1()
# hasher.update('{}'.format(lock_name))
# lock_ident = struct.unpack('q', hasher.digest()[:8])
# we just need an integer
ADVISORY_LOCK_IDENT = 7141416871301361999
def pg_advisory_lock(cursor, lock_ident):
cursor.execute('SELECT pg_try_advisory_xact_lock(%s);', (lock_ident,))
acquired = cursor.fetchone()[0]
return acquired
class ApplicationLock(threading.Thread):
def __init__(self, connection):
self.acquired = False
self.connection = connection
self.replica = False
self.stop = False
super(ApplicationLock, self).__init__()
def run(self):
with self.connection.cursor() as cursor:
# If the migration is run concurrently (in several
# containers, hosts, ...), only 1 is allowed to proceed
# with the migration. It will be the first one to win
# the advisory lock. The others will be flagged as 'replica'.
while not pg_advisory_lock(cursor, ADVISORY_LOCK_IDENT):
if not self.replica: # print only the first time
safe_print('A concurrent process is already '
'running the migration')
self.replica = True
time.sleep(0.5)
else:
self.acquired = True
idx = 0
while not self.stop:
# keep the connection alive to maintain the advisory
# lock by running a query every 30 seconds
if idx == 60:
cursor.execute("SELECT 1")
idx = 0
idx += 1
# keep the sleep small to be able to exit quickly
# when 'stop' is set to True
time.sleep(0.5)
class WebServer(threading.Thread):
def __init__(self, app):
super(WebServer, self).__init__()
self.app = app
def run(self):
self.app.serve()
def migrate(config):
"""Perform a migration according to config.
:param config: The configuration to be applied
:type config: Config
"""
webapp = WebApp(config.web_host, config.web_port,
custom_maintenance_file=config.web_custom_html)
webserver = WebServer(webapp)
webserver.daemon = True
webserver.start()
migration_parser = YamlParser.parse_from_file(config.migration_file)
migration = migration_parser.parse()
database = Database(config)
with database.connect() as lock_connection:
application_lock = ApplicationLock(lock_connection)
application_lock.start()
while not application_lock.acquired:
time.sleep(0.5)
else:
if application_lock.replica:
# when a replica could finally acquire a lock, it
# means that the concurrent process has finished the
# migration or that it failed to run it.
# In both cases after the lock is released, this process will
# verify if it has still to do something (if the other process
# failed mainly).
application_lock.stop = True
application_lock.join()
# we are not in the replica or the lock is released: go on for the
# migration
try:
table = MigrationTable(database)
runner = Runner(config, migration, database, table)
runner.perform()
finally:
application_lock.stop = True
application_lock.join()
if __name__ == '__main__':
main()
|
camptocamp/marabunta | marabunta/parser.py | YamlParser.parser_from_buffer | python | def parser_from_buffer(cls, fp):
yaml = YAML(typ="safe")
return cls(yaml.load(fp)) | Construct YamlParser from a file pointer. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/parser.py#L87-L90 | null | class YamlParser(object):
def __init__(self, parsed):
self.parsed = parsed
@classmethod
@classmethod
def parse_from_file(cls, filename):
"""Construct YamlParser from a filename."""
with open(filename, 'r') as fh:
return cls.parser_from_buffer(fh)
def check_dict_expected_keys(self, expected_keys, current, dict_name):
""" Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
"""
if not isinstance(current, dict):
raise ParseError(u"'{}' key must be a dict".format(dict_name),
YAML_EXAMPLE)
expected_keys = set(expected_keys)
current_keys = {key for key in current}
extra_keys = current_keys - expected_keys
if extra_keys:
message = u"{}: the keys {} are unexpected. (allowed keys: {})"
raise ParseError(
message.format(
dict_name,
list(extra_keys),
list(expected_keys),
),
YAML_EXAMPLE,
)
def parse(self):
"""Check input and return a :class:`Migration` instance."""
if not self.parsed.get('migration'):
raise ParseError(u"'migration' key is missing", YAML_EXAMPLE)
self.check_dict_expected_keys(
{'options', 'versions'}, self.parsed['migration'], 'migration',
)
return self._parse_migrations()
def _parse_migrations(self):
"""Build a :class:`Migration` instance."""
migration = self.parsed['migration']
options = self._parse_options(migration)
versions = self._parse_versions(migration, options)
return Migration(versions, options)
def _parse_options(self, migration):
"""Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances."""
options = migration.get('options', {})
install_command = options.get('install_command')
backup = options.get('backup')
if backup:
self.check_dict_expected_keys(
{'command', 'ignore_if', 'stop_on_failure'},
options['backup'], 'backup',
)
backup = MigrationBackupOption(
command=backup.get('command'),
ignore_if=backup.get('ignore_if'),
stop_on_failure=backup.get('stop_on_failure', True),
)
return MigrationOption(
install_command=install_command,
backup=backup,
)
def _parse_versions(self, migration, options):
versions = migration.get('versions') or []
if not isinstance(versions, list):
raise ParseError(u"'versions' key must be a list", YAML_EXAMPLE)
if versions[0]['version'] != FIRST_VERSION:
warnings_msg = u'First version should be named `setup`'
warnings.warn(warnings_msg, FutureWarning)
return [self._parse_version(version, options) for version in versions]
def _parse_operations(self, version, operations, mode=None):
self.check_dict_expected_keys(
{'pre', 'post'}, operations, 'operations',
)
for operation_type, commands in operations.items():
if not isinstance(commands, list):
raise ParseError(u"'%s' key must be a list" %
(operation_type,), YAML_EXAMPLE)
for command in commands:
version.add_operation(
operation_type,
Operation(command),
mode=mode,
)
def _parse_addons(self, version, addons, mode=None):
self.check_dict_expected_keys(
{'upgrade', 'remove'}, addons, 'addons',
)
upgrade = addons.get('upgrade') or []
if upgrade:
if not isinstance(upgrade, list):
raise ParseError(u"'upgrade' key must be a list", YAML_EXAMPLE)
version.add_upgrade_addons(upgrade, mode=mode)
remove = addons.get('remove') or []
if remove:
if not isinstance(remove, list):
raise ParseError(u"'remove' key must be a list", YAML_EXAMPLE)
version.add_remove_addons(remove, mode=mode)
def _parse_backup(self, version, backup=True, mode=None):
if not isinstance(backup, bool):
raise ParseError(u"'backup' key must be a boolean", YAML_EXAMPLE)
version.add_backup_operation(backup, mode=mode)
def _parse_version(self, parsed_version, options):
self.check_dict_expected_keys(
{'version', 'operations', 'addons', 'modes', 'backup'},
parsed_version, 'versions',
)
number = parsed_version.get('version')
version = Version(number, options)
# parse the main operations, backup and addons
operations = parsed_version.get('operations') or {}
self._parse_operations(version, operations)
addons = parsed_version.get('addons') or {}
self._parse_addons(version, addons)
# parse the modes operations and addons
modes = parsed_version.get('modes', {})
if not isinstance(modes, dict):
raise ParseError(u"'modes' key must be a dict", YAML_EXAMPLE)
for mode_name, mode in modes.items():
self.check_dict_expected_keys(
{'operations', 'addons'}, mode, mode_name,
)
mode_operations = mode.get('operations') or {}
self._parse_operations(version, mode_operations, mode=mode_name)
mode_addons = mode.get('addons') or {}
self._parse_addons(version, mode_addons, mode=mode_name)
# backup should be added last, as it depends if the version is noop
backup = parsed_version.get('backup')
if backup is None:
if version.is_noop():
# For noop steps backup defaults to False
backup = False
else:
backup = True
self._parse_backup(version, backup)
return version
|
camptocamp/marabunta | marabunta/parser.py | YamlParser.check_dict_expected_keys | python | def check_dict_expected_keys(self, expected_keys, current, dict_name):
if not isinstance(current, dict):
raise ParseError(u"'{}' key must be a dict".format(dict_name),
YAML_EXAMPLE)
expected_keys = set(expected_keys)
current_keys = {key for key in current}
extra_keys = current_keys - expected_keys
if extra_keys:
message = u"{}: the keys {} are unexpected. (allowed keys: {})"
raise ParseError(
message.format(
dict_name,
list(extra_keys),
list(expected_keys),
),
YAML_EXAMPLE,
) | Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/parser.py#L98-L118 | null | class YamlParser(object):
def __init__(self, parsed):
self.parsed = parsed
@classmethod
def parser_from_buffer(cls, fp):
"""Construct YamlParser from a file pointer."""
yaml = YAML(typ="safe")
return cls(yaml.load(fp))
@classmethod
def parse_from_file(cls, filename):
"""Construct YamlParser from a filename."""
with open(filename, 'r') as fh:
return cls.parser_from_buffer(fh)
def parse(self):
"""Check input and return a :class:`Migration` instance."""
if not self.parsed.get('migration'):
raise ParseError(u"'migration' key is missing", YAML_EXAMPLE)
self.check_dict_expected_keys(
{'options', 'versions'}, self.parsed['migration'], 'migration',
)
return self._parse_migrations()
def _parse_migrations(self):
"""Build a :class:`Migration` instance."""
migration = self.parsed['migration']
options = self._parse_options(migration)
versions = self._parse_versions(migration, options)
return Migration(versions, options)
def _parse_options(self, migration):
"""Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances."""
options = migration.get('options', {})
install_command = options.get('install_command')
backup = options.get('backup')
if backup:
self.check_dict_expected_keys(
{'command', 'ignore_if', 'stop_on_failure'},
options['backup'], 'backup',
)
backup = MigrationBackupOption(
command=backup.get('command'),
ignore_if=backup.get('ignore_if'),
stop_on_failure=backup.get('stop_on_failure', True),
)
return MigrationOption(
install_command=install_command,
backup=backup,
)
def _parse_versions(self, migration, options):
versions = migration.get('versions') or []
if not isinstance(versions, list):
raise ParseError(u"'versions' key must be a list", YAML_EXAMPLE)
if versions[0]['version'] != FIRST_VERSION:
warnings_msg = u'First version should be named `setup`'
warnings.warn(warnings_msg, FutureWarning)
return [self._parse_version(version, options) for version in versions]
def _parse_operations(self, version, operations, mode=None):
self.check_dict_expected_keys(
{'pre', 'post'}, operations, 'operations',
)
for operation_type, commands in operations.items():
if not isinstance(commands, list):
raise ParseError(u"'%s' key must be a list" %
(operation_type,), YAML_EXAMPLE)
for command in commands:
version.add_operation(
operation_type,
Operation(command),
mode=mode,
)
def _parse_addons(self, version, addons, mode=None):
self.check_dict_expected_keys(
{'upgrade', 'remove'}, addons, 'addons',
)
upgrade = addons.get('upgrade') or []
if upgrade:
if not isinstance(upgrade, list):
raise ParseError(u"'upgrade' key must be a list", YAML_EXAMPLE)
version.add_upgrade_addons(upgrade, mode=mode)
remove = addons.get('remove') or []
if remove:
if not isinstance(remove, list):
raise ParseError(u"'remove' key must be a list", YAML_EXAMPLE)
version.add_remove_addons(remove, mode=mode)
def _parse_backup(self, version, backup=True, mode=None):
if not isinstance(backup, bool):
raise ParseError(u"'backup' key must be a boolean", YAML_EXAMPLE)
version.add_backup_operation(backup, mode=mode)
def _parse_version(self, parsed_version, options):
self.check_dict_expected_keys(
{'version', 'operations', 'addons', 'modes', 'backup'},
parsed_version, 'versions',
)
number = parsed_version.get('version')
version = Version(number, options)
# parse the main operations, backup and addons
operations = parsed_version.get('operations') or {}
self._parse_operations(version, operations)
addons = parsed_version.get('addons') or {}
self._parse_addons(version, addons)
# parse the modes operations and addons
modes = parsed_version.get('modes', {})
if not isinstance(modes, dict):
raise ParseError(u"'modes' key must be a dict", YAML_EXAMPLE)
for mode_name, mode in modes.items():
self.check_dict_expected_keys(
{'operations', 'addons'}, mode, mode_name,
)
mode_operations = mode.get('operations') or {}
self._parse_operations(version, mode_operations, mode=mode_name)
mode_addons = mode.get('addons') or {}
self._parse_addons(version, mode_addons, mode=mode_name)
# backup should be added last, as it depends if the version is noop
backup = parsed_version.get('backup')
if backup is None:
if version.is_noop():
# For noop steps backup defaults to False
backup = False
else:
backup = True
self._parse_backup(version, backup)
return version
|
camptocamp/marabunta | marabunta/parser.py | YamlParser.parse | python | def parse(self):
if not self.parsed.get('migration'):
raise ParseError(u"'migration' key is missing", YAML_EXAMPLE)
self.check_dict_expected_keys(
{'options', 'versions'}, self.parsed['migration'], 'migration',
)
return self._parse_migrations() | Check input and return a :class:`Migration` instance. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/parser.py#L120-L127 | null | class YamlParser(object):
def __init__(self, parsed):
self.parsed = parsed
@classmethod
def parser_from_buffer(cls, fp):
"""Construct YamlParser from a file pointer."""
yaml = YAML(typ="safe")
return cls(yaml.load(fp))
@classmethod
def parse_from_file(cls, filename):
"""Construct YamlParser from a filename."""
with open(filename, 'r') as fh:
return cls.parser_from_buffer(fh)
def check_dict_expected_keys(self, expected_keys, current, dict_name):
""" Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
"""
if not isinstance(current, dict):
raise ParseError(u"'{}' key must be a dict".format(dict_name),
YAML_EXAMPLE)
expected_keys = set(expected_keys)
current_keys = {key for key in current}
extra_keys = current_keys - expected_keys
if extra_keys:
message = u"{}: the keys {} are unexpected. (allowed keys: {})"
raise ParseError(
message.format(
dict_name,
list(extra_keys),
list(expected_keys),
),
YAML_EXAMPLE,
)
def _parse_migrations(self):
"""Build a :class:`Migration` instance."""
migration = self.parsed['migration']
options = self._parse_options(migration)
versions = self._parse_versions(migration, options)
return Migration(versions, options)
def _parse_options(self, migration):
"""Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances."""
options = migration.get('options', {})
install_command = options.get('install_command')
backup = options.get('backup')
if backup:
self.check_dict_expected_keys(
{'command', 'ignore_if', 'stop_on_failure'},
options['backup'], 'backup',
)
backup = MigrationBackupOption(
command=backup.get('command'),
ignore_if=backup.get('ignore_if'),
stop_on_failure=backup.get('stop_on_failure', True),
)
return MigrationOption(
install_command=install_command,
backup=backup,
)
def _parse_versions(self, migration, options):
versions = migration.get('versions') or []
if not isinstance(versions, list):
raise ParseError(u"'versions' key must be a list", YAML_EXAMPLE)
if versions[0]['version'] != FIRST_VERSION:
warnings_msg = u'First version should be named `setup`'
warnings.warn(warnings_msg, FutureWarning)
return [self._parse_version(version, options) for version in versions]
def _parse_operations(self, version, operations, mode=None):
self.check_dict_expected_keys(
{'pre', 'post'}, operations, 'operations',
)
for operation_type, commands in operations.items():
if not isinstance(commands, list):
raise ParseError(u"'%s' key must be a list" %
(operation_type,), YAML_EXAMPLE)
for command in commands:
version.add_operation(
operation_type,
Operation(command),
mode=mode,
)
def _parse_addons(self, version, addons, mode=None):
self.check_dict_expected_keys(
{'upgrade', 'remove'}, addons, 'addons',
)
upgrade = addons.get('upgrade') or []
if upgrade:
if not isinstance(upgrade, list):
raise ParseError(u"'upgrade' key must be a list", YAML_EXAMPLE)
version.add_upgrade_addons(upgrade, mode=mode)
remove = addons.get('remove') or []
if remove:
if not isinstance(remove, list):
raise ParseError(u"'remove' key must be a list", YAML_EXAMPLE)
version.add_remove_addons(remove, mode=mode)
def _parse_backup(self, version, backup=True, mode=None):
if not isinstance(backup, bool):
raise ParseError(u"'backup' key must be a boolean", YAML_EXAMPLE)
version.add_backup_operation(backup, mode=mode)
def _parse_version(self, parsed_version, options):
self.check_dict_expected_keys(
{'version', 'operations', 'addons', 'modes', 'backup'},
parsed_version, 'versions',
)
number = parsed_version.get('version')
version = Version(number, options)
# parse the main operations, backup and addons
operations = parsed_version.get('operations') or {}
self._parse_operations(version, operations)
addons = parsed_version.get('addons') or {}
self._parse_addons(version, addons)
# parse the modes operations and addons
modes = parsed_version.get('modes', {})
if not isinstance(modes, dict):
raise ParseError(u"'modes' key must be a dict", YAML_EXAMPLE)
for mode_name, mode in modes.items():
self.check_dict_expected_keys(
{'operations', 'addons'}, mode, mode_name,
)
mode_operations = mode.get('operations') or {}
self._parse_operations(version, mode_operations, mode=mode_name)
mode_addons = mode.get('addons') or {}
self._parse_addons(version, mode_addons, mode=mode_name)
# backup should be added last, as it depends if the version is noop
backup = parsed_version.get('backup')
if backup is None:
if version.is_noop():
# For noop steps backup defaults to False
backup = False
else:
backup = True
self._parse_backup(version, backup)
return version
|
camptocamp/marabunta | marabunta/parser.py | YamlParser._parse_migrations | python | def _parse_migrations(self):
migration = self.parsed['migration']
options = self._parse_options(migration)
versions = self._parse_versions(migration, options)
return Migration(versions, options) | Build a :class:`Migration` instance. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/parser.py#L129-L134 | null | class YamlParser(object):
def __init__(self, parsed):
self.parsed = parsed
@classmethod
def parser_from_buffer(cls, fp):
"""Construct YamlParser from a file pointer."""
yaml = YAML(typ="safe")
return cls(yaml.load(fp))
@classmethod
def parse_from_file(cls, filename):
"""Construct YamlParser from a filename."""
with open(filename, 'r') as fh:
return cls.parser_from_buffer(fh)
def check_dict_expected_keys(self, expected_keys, current, dict_name):
""" Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
"""
if not isinstance(current, dict):
raise ParseError(u"'{}' key must be a dict".format(dict_name),
YAML_EXAMPLE)
expected_keys = set(expected_keys)
current_keys = {key for key in current}
extra_keys = current_keys - expected_keys
if extra_keys:
message = u"{}: the keys {} are unexpected. (allowed keys: {})"
raise ParseError(
message.format(
dict_name,
list(extra_keys),
list(expected_keys),
),
YAML_EXAMPLE,
)
def parse(self):
"""Check input and return a :class:`Migration` instance."""
if not self.parsed.get('migration'):
raise ParseError(u"'migration' key is missing", YAML_EXAMPLE)
self.check_dict_expected_keys(
{'options', 'versions'}, self.parsed['migration'], 'migration',
)
return self._parse_migrations()
def _parse_options(self, migration):
"""Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances."""
options = migration.get('options', {})
install_command = options.get('install_command')
backup = options.get('backup')
if backup:
self.check_dict_expected_keys(
{'command', 'ignore_if', 'stop_on_failure'},
options['backup'], 'backup',
)
backup = MigrationBackupOption(
command=backup.get('command'),
ignore_if=backup.get('ignore_if'),
stop_on_failure=backup.get('stop_on_failure', True),
)
return MigrationOption(
install_command=install_command,
backup=backup,
)
def _parse_versions(self, migration, options):
versions = migration.get('versions') or []
if not isinstance(versions, list):
raise ParseError(u"'versions' key must be a list", YAML_EXAMPLE)
if versions[0]['version'] != FIRST_VERSION:
warnings_msg = u'First version should be named `setup`'
warnings.warn(warnings_msg, FutureWarning)
return [self._parse_version(version, options) for version in versions]
def _parse_operations(self, version, operations, mode=None):
self.check_dict_expected_keys(
{'pre', 'post'}, operations, 'operations',
)
for operation_type, commands in operations.items():
if not isinstance(commands, list):
raise ParseError(u"'%s' key must be a list" %
(operation_type,), YAML_EXAMPLE)
for command in commands:
version.add_operation(
operation_type,
Operation(command),
mode=mode,
)
def _parse_addons(self, version, addons, mode=None):
self.check_dict_expected_keys(
{'upgrade', 'remove'}, addons, 'addons',
)
upgrade = addons.get('upgrade') or []
if upgrade:
if not isinstance(upgrade, list):
raise ParseError(u"'upgrade' key must be a list", YAML_EXAMPLE)
version.add_upgrade_addons(upgrade, mode=mode)
remove = addons.get('remove') or []
if remove:
if not isinstance(remove, list):
raise ParseError(u"'remove' key must be a list", YAML_EXAMPLE)
version.add_remove_addons(remove, mode=mode)
def _parse_backup(self, version, backup=True, mode=None):
if not isinstance(backup, bool):
raise ParseError(u"'backup' key must be a boolean", YAML_EXAMPLE)
version.add_backup_operation(backup, mode=mode)
def _parse_version(self, parsed_version, options):
self.check_dict_expected_keys(
{'version', 'operations', 'addons', 'modes', 'backup'},
parsed_version, 'versions',
)
number = parsed_version.get('version')
version = Version(number, options)
# parse the main operations, backup and addons
operations = parsed_version.get('operations') or {}
self._parse_operations(version, operations)
addons = parsed_version.get('addons') or {}
self._parse_addons(version, addons)
# parse the modes operations and addons
modes = parsed_version.get('modes', {})
if not isinstance(modes, dict):
raise ParseError(u"'modes' key must be a dict", YAML_EXAMPLE)
for mode_name, mode in modes.items():
self.check_dict_expected_keys(
{'operations', 'addons'}, mode, mode_name,
)
mode_operations = mode.get('operations') or {}
self._parse_operations(version, mode_operations, mode=mode_name)
mode_addons = mode.get('addons') or {}
self._parse_addons(version, mode_addons, mode=mode_name)
# backup should be added last, as it depends if the version is noop
backup = parsed_version.get('backup')
if backup is None:
if version.is_noop():
# For noop steps backup defaults to False
backup = False
else:
backup = True
self._parse_backup(version, backup)
return version
|
camptocamp/marabunta | marabunta/parser.py | YamlParser._parse_options | python | def _parse_options(self, migration):
options = migration.get('options', {})
install_command = options.get('install_command')
backup = options.get('backup')
if backup:
self.check_dict_expected_keys(
{'command', 'ignore_if', 'stop_on_failure'},
options['backup'], 'backup',
)
backup = MigrationBackupOption(
command=backup.get('command'),
ignore_if=backup.get('ignore_if'),
stop_on_failure=backup.get('stop_on_failure', True),
)
return MigrationOption(
install_command=install_command,
backup=backup,
) | Build :class:`MigrationOption` and
:class:`MigrationBackupOption` instances. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/parser.py#L136-L155 | null | class YamlParser(object):
def __init__(self, parsed):
self.parsed = parsed
@classmethod
def parser_from_buffer(cls, fp):
"""Construct YamlParser from a file pointer."""
yaml = YAML(typ="safe")
return cls(yaml.load(fp))
@classmethod
def parse_from_file(cls, filename):
"""Construct YamlParser from a filename."""
with open(filename, 'r') as fh:
return cls.parser_from_buffer(fh)
def check_dict_expected_keys(self, expected_keys, current, dict_name):
""" Check that we don't have unknown keys in a dictionary.
It does not raise an error if we have less keys than expected.
"""
if not isinstance(current, dict):
raise ParseError(u"'{}' key must be a dict".format(dict_name),
YAML_EXAMPLE)
expected_keys = set(expected_keys)
current_keys = {key for key in current}
extra_keys = current_keys - expected_keys
if extra_keys:
message = u"{}: the keys {} are unexpected. (allowed keys: {})"
raise ParseError(
message.format(
dict_name,
list(extra_keys),
list(expected_keys),
),
YAML_EXAMPLE,
)
def parse(self):
"""Check input and return a :class:`Migration` instance."""
if not self.parsed.get('migration'):
raise ParseError(u"'migration' key is missing", YAML_EXAMPLE)
self.check_dict_expected_keys(
{'options', 'versions'}, self.parsed['migration'], 'migration',
)
return self._parse_migrations()
def _parse_migrations(self):
"""Build a :class:`Migration` instance."""
migration = self.parsed['migration']
options = self._parse_options(migration)
versions = self._parse_versions(migration, options)
return Migration(versions, options)
def _parse_versions(self, migration, options):
versions = migration.get('versions') or []
if not isinstance(versions, list):
raise ParseError(u"'versions' key must be a list", YAML_EXAMPLE)
if versions[0]['version'] != FIRST_VERSION:
warnings_msg = u'First version should be named `setup`'
warnings.warn(warnings_msg, FutureWarning)
return [self._parse_version(version, options) for version in versions]
def _parse_operations(self, version, operations, mode=None):
self.check_dict_expected_keys(
{'pre', 'post'}, operations, 'operations',
)
for operation_type, commands in operations.items():
if not isinstance(commands, list):
raise ParseError(u"'%s' key must be a list" %
(operation_type,), YAML_EXAMPLE)
for command in commands:
version.add_operation(
operation_type,
Operation(command),
mode=mode,
)
def _parse_addons(self, version, addons, mode=None):
self.check_dict_expected_keys(
{'upgrade', 'remove'}, addons, 'addons',
)
upgrade = addons.get('upgrade') or []
if upgrade:
if not isinstance(upgrade, list):
raise ParseError(u"'upgrade' key must be a list", YAML_EXAMPLE)
version.add_upgrade_addons(upgrade, mode=mode)
remove = addons.get('remove') or []
if remove:
if not isinstance(remove, list):
raise ParseError(u"'remove' key must be a list", YAML_EXAMPLE)
version.add_remove_addons(remove, mode=mode)
def _parse_backup(self, version, backup=True, mode=None):
if not isinstance(backup, bool):
raise ParseError(u"'backup' key must be a boolean", YAML_EXAMPLE)
version.add_backup_operation(backup, mode=mode)
def _parse_version(self, parsed_version, options):
self.check_dict_expected_keys(
{'version', 'operations', 'addons', 'modes', 'backup'},
parsed_version, 'versions',
)
number = parsed_version.get('version')
version = Version(number, options)
# parse the main operations, backup and addons
operations = parsed_version.get('operations') or {}
self._parse_operations(version, operations)
addons = parsed_version.get('addons') or {}
self._parse_addons(version, addons)
# parse the modes operations and addons
modes = parsed_version.get('modes', {})
if not isinstance(modes, dict):
raise ParseError(u"'modes' key must be a dict", YAML_EXAMPLE)
for mode_name, mode in modes.items():
self.check_dict_expected_keys(
{'operations', 'addons'}, mode, mode_name,
)
mode_operations = mode.get('operations') or {}
self._parse_operations(version, mode_operations, mode=mode_name)
mode_addons = mode.get('addons') or {}
self._parse_addons(version, mode_addons, mode=mode_name)
# backup should be added last, as it depends if the version is noop
backup = parsed_version.get('backup')
if backup is None:
if version.is_noop():
# For noop steps backup defaults to False
backup = False
else:
backup = True
self._parse_backup(version, backup)
return version
|
camptocamp/marabunta | marabunta/database.py | MigrationTable.versions | python | def versions(self):
if self._versions is None:
with self.database.cursor_autocommit() as cursor:
query = """
SELECT number,
date_start,
date_done,
log,
addons
FROM {}
""".format(self.table_name)
cursor.execute(query)
rows = cursor.fetchall()
versions = []
for row in rows:
row = list(row)
# convert 'addons' to json
row[4] = json.loads(row[4]) if row[4] else []
versions.append(
self.VersionRecord(*row)
)
self._versions = versions
return self._versions | Read versions from the table
The versions are kept in cache for the next reads. | train | https://github.com/camptocamp/marabunta/blob/ec3a7a725c7426d6ed642e0a80119b37880eb91e/marabunta/database.py#L77-L103 | null | class MigrationTable(object):
def __init__(self, database):
self.database = database
self.table_name = 'marabunta_version'
self.VersionRecord = VersionRecord
self._versions = None
def create_if_not_exists(self):
with self.database.cursor_autocommit() as cursor:
query = """
CREATE TABLE IF NOT EXISTS {} (
number VARCHAR NOT NULL,
date_start TIMESTAMP NOT NULL,
date_done TIMESTAMP,
log TEXT,
addons TEXT,
CONSTRAINT version_pk PRIMARY KEY (number)
);
""".format(self.table_name)
cursor.execute(query)
def start_version(self, number, start):
with self.database.cursor_autocommit() as cursor:
query = """
SELECT number FROM {}
WHERE number = %s
""".format(self.table_name)
cursor.execute(query, (number,))
if cursor.fetchone():
query = """
UPDATE {}
SET date_start = %s,
date_done = NULL,
log = NULL,
addons = NULL
WHERE number = %s
""".format(self.table_name)
cursor.execute(query, (start, number))
else:
query = """
INSERT INTO {}
(number, date_start)
VALUES (%s, %s)
""".format(self.table_name)
cursor.execute(query, (number, start))
self._versions = None # reset versions cache
def record_log(self, number, log):
with self.database.cursor_autocommit() as cursor:
query = """
UPDATE {}
SET log = %s
WHERE number = %s
""".format(self.table_name)
cursor.execute(query, (log, number))
self._versions = None # reset versions cache
def finish_version(self, number, end, log, addons):
with self.database.cursor_autocommit() as cursor:
query = """
UPDATE {}
SET date_done = %s,
log = %s,
addons = %s
WHERE number = %s
""".format(self.table_name)
cursor.execute(query, (end, log, json.dumps(addons), number))
self._versions = None # reset versions cache
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/filewatcher.py | FilesystemEventHandler.on_deleted | python | def on_deleted(self, event):
key = 'filesystem:file_deleted'
data = {
'filepath': event.src_path,
'is_directory': event.is_directory,
'dirpath': os.path.dirname(event.src_path)
}
bmsg = BroadcastMessage(key=key, data=data)
BroadcastManager.broadcast(bmsg) | Event Handler when a file is deleted | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/filewatcher.py#L27-L39 | [
"def broadcast(cls, message):\n if not isinstance(message, BroadcastMessage):\n raise TypeError(\"message must be an instance of BroadcastMessage.\")\n\n json_msg = message.to_json_string()\n for client in cls._clients:\n try:\n client.write_message(json_msg)\n except:\n # TODO handle custom exception\n pass\n"
] | class FilesystemEventHandler(watchdog.events.FileSystemEventHandler):
"""
Subclass of `watchdog.events.FilesystemEventHandler`
Manages on_created, on_deleted, on_moved events
"""
def on_created(self, event):
"""
Event Handler when a file is created
"""
key = 'filesystem:file_created'
data = {
'filepath': event.src_path,
'is_directory': event.is_directory,
'dirpath': os.path.dirname(event.src_path)
}
bmsg = BroadcastMessage(key=key, data=data)
BroadcastManager.broadcast(bmsg)
def on_moved(self, event):
"""
Event Handler when a file is moved
"""
key = 'filesystem:file_moved'
data = {
'src_filepath': event.src_path,
'dest_filepath': event.dest_path,
'is_directory': event.is_directory,
'src_dirpath': os.path.dirname(event.src_path),
'dest_dirpath': os.path.dirname(event.dest_path)
}
bmsg = BroadcastMessage(key=key, data=data)
BroadcastManager.broadcast(bmsg)
def on_modified(self, event):
"""
Event handler when a file is modified.
"""
key = 'filesystem:file_modified'
data = {
'filepath': event.src_path,
'is_directory': event.is_directory,
'dirpath': os.path.dirname(event.src_path)
}
bmsg = BroadcastMessage(key=key, data=data)
BroadcastManager.broadcast(bmsg)
|
SandstoneHPC/sandstone-ide | sandstone/lib/websocket_client.py | WebSocketClient.connect | python | def connect(self, url):
headers = httputil.HTTPHeaders({'Content-Type': APPLICATION_JSON})
request = httpclient.HTTPRequest(url=url,
connect_timeout=self.connect_timeout,
request_timeout=self.request_timeout,
headers=headers)
ws = websocket_connect(url)
ws.add_done_callback(self._connect_callback) | Connect to the server.
:param str url: server URL. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/websocket_client.py#L30-L41 | null | class WebSocketClient():
"""Base for web socket clients.
"""
def __init__(self, data=None, connect_timeout=DEFAULT_CONNECT_TIMEOUT,
request_timeout=DEFAULT_REQUEST_TIMEOUT):
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.data = data
def send(self, data):
"""Send message to the server
:param str data: message.
"""
if not self._ws_connection:
raise RuntimeError('Web socket connection is closed.')
self._ws_connection.write_message(json.dumps(data))
def close(self):
"""Close connection.
"""
if not self._ws_connection:
raise RuntimeError('Web socket connection is already closed.')
self._ws_connection.close()
def _connect_callback(self, future):
if future.exception() is None:
self._ws_connection = future.result()
self._ws_connection.write_message(escape.json_encode(self.data))
# self._on_connection_success()
# self._read_messages()
else:
self._on_connection_error(future.exception())
@gen.coroutine
def _read_messages(self):
while True:
msg = yield self._ws_connection.read_message()
if msg is None:
self._on_connection_close()
break
self._on_message(msg)
def _on_message(self, msg):
"""This is called when new message is available from the server.
:param str msg: server message.
"""
pass
def _on_connection_success(self):
"""This is called on successful connection ot the server.
"""
pass
def _on_connection_close(self):
"""This is called when server closed the connection.
"""
pass
def _on_connection_error(self, exception):
"""This is called in case if connection to the server could
not established.
"""
pass
|
SandstoneHPC/sandstone-ide | sandstone/lib/websocket_client.py | WebSocketClient.send | python | def send(self, data):
if not self._ws_connection:
raise RuntimeError('Web socket connection is closed.')
self._ws_connection.write_message(json.dumps(data)) | Send message to the server
:param str data: message. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/websocket_client.py#L43-L50 | null | class WebSocketClient():
"""Base for web socket clients.
"""
def __init__(self, data=None, connect_timeout=DEFAULT_CONNECT_TIMEOUT,
request_timeout=DEFAULT_REQUEST_TIMEOUT):
self.connect_timeout = connect_timeout
self.request_timeout = request_timeout
self.data = data
def connect(self, url):
"""Connect to the server.
:param str url: server URL.
"""
headers = httputil.HTTPHeaders({'Content-Type': APPLICATION_JSON})
request = httpclient.HTTPRequest(url=url,
connect_timeout=self.connect_timeout,
request_timeout=self.request_timeout,
headers=headers)
ws = websocket_connect(url)
ws.add_done_callback(self._connect_callback)
def close(self):
"""Close connection.
"""
if not self._ws_connection:
raise RuntimeError('Web socket connection is already closed.')
self._ws_connection.close()
def _connect_callback(self, future):
if future.exception() is None:
self._ws_connection = future.result()
self._ws_connection.write_message(escape.json_encode(self.data))
# self._on_connection_success()
# self._read_messages()
else:
self._on_connection_error(future.exception())
@gen.coroutine
def _read_messages(self):
while True:
msg = yield self._ws_connection.read_message()
if msg is None:
self._on_connection_close()
break
self._on_message(msg)
def _on_message(self, msg):
"""This is called when new message is available from the server.
:param str msg: server message.
"""
pass
def _on_connection_success(self):
"""This is called on successful connection ot the server.
"""
pass
def _on_connection_close(self):
"""This is called when server closed the connection.
"""
pass
def _on_connection_error(self, exception):
"""This is called in case if connection to the server could
not established.
"""
pass
|
SandstoneHPC/sandstone-ide | sandstone/lib/handlers/rest.py | JSONHandler.initialize | python | def initialize(self,*args,**kwargs):
super(JSONHandler,self).initialize(*args,**kwargs)
content_type = self.request.headers.get('Content-Type', '')
if 'application/json' in content_type.lower():
self._parse_json_body_arguments() | Only try to parse as JSON if the JSON content type
header is set. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/handlers/rest.py#L17-L25 | [
"def _parse_json_body_arguments(self):\n if self.request.body == '':\n return\n try:\n body_args = json.loads(self.request.body)\n for k,v in body_args.iteritems():\n self.request.body_arguments[k] = v\n except:\n raise tornado.web.HTTPError(400)\n"
] | class JSONHandler(BaseHandler):
"""
This handler adds support for parsing JSON-encoded body
arguments for use in REST endpoints. This handler will
raise an HTTP 400 if the body is not empty, and cannot
be parsed as JSON.
"""
_ARG_DEFAULT = object()
def _parse_json_body_arguments(self):
if self.request.body == '':
return
try:
body_args = json.loads(self.request.body)
for k,v in body_args.iteritems():
self.request.body_arguments[k] = v
except:
raise tornado.web.HTTPError(400)
def get_body_argument(self,name,default=_ARG_DEFAULT,strip=True):
arg = self.request.body_arguments.get(name,default)
if not arg:
if default is self._ARG_DEFAULT:
raise self.MissingArgumentError(name)
return default
return arg
def get_body_arguments(self,name,strip=True):
args = copy.deepcopy(self.request.body_arguments)
return args
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FilesystemHandler._move | python | def _move(self):
newpath = self.action['newpath']
try:
self.fs.move(self.fp,newpath)
except OSError:
raise tornado.web.HTTPError(400)
return newpath | Called during a PUT request where the action specifies
a move operation. Returns resource URI of the destination file. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L32-L42 | null | class FilesystemHandler(JSONHandler,FSMixin):
"""
This handler implements the root filesystem resource for the
filesystem REST API.
"""
def _copy(self):
"""
Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file.
"""
copypath = self.action['copypath']
try:
self.fs.copy(self.fp,copypath)
except OSError:
raise tornado.web.HTTPError(400)
return copypath
def _rename(self):
"""
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
"""
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
@sandstone.lib.decorators.authenticated
def get(self):
"""
Return details for the filesystem, including configured volumes.
"""
res = self.fs.get_filesystem_details()
res = res.to_dict()
self.write(res)
@sandstone.lib.decorators.authenticated
def put(self):
"""
Provides move, copy, and rename functionality. An action must be
specified when calling this method.
"""
self.fp = self.get_body_argument('filepath')
self.action = self.get_body_argument('action')
try:
ptype = self.fs.get_type_from_path(self.fp)
except OSError:
raise tornado.web.HTTPError(404)
if ptype == 'directory':
self.handler_name = 'filesystem:directories-details'
else:
self.handler_name = 'filesystem:files-details'
if self.action['action'] == 'move':
newpath = self._move()
self.write({'filepath':newpath})
elif self.action['action'] == 'copy':
newpath = self._copy()
self.write({'filepath':newpath})
elif self.action['action'] == 'rename':
newpath = self._rename()
self.write({'filepath':newpath})
else:
raise tornado.web.HTTPError(400)
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FilesystemHandler._copy | python | def _copy(self):
copypath = self.action['copypath']
try:
self.fs.copy(self.fp,copypath)
except OSError:
raise tornado.web.HTTPError(400)
return copypath | Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L44-L54 | null | class FilesystemHandler(JSONHandler,FSMixin):
"""
This handler implements the root filesystem resource for the
filesystem REST API.
"""
def _move(self):
"""
Called during a PUT request where the action specifies
a move operation. Returns resource URI of the destination file.
"""
newpath = self.action['newpath']
try:
self.fs.move(self.fp,newpath)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
def _rename(self):
"""
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
"""
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
@sandstone.lib.decorators.authenticated
def get(self):
"""
Return details for the filesystem, including configured volumes.
"""
res = self.fs.get_filesystem_details()
res = res.to_dict()
self.write(res)
@sandstone.lib.decorators.authenticated
def put(self):
"""
Provides move, copy, and rename functionality. An action must be
specified when calling this method.
"""
self.fp = self.get_body_argument('filepath')
self.action = self.get_body_argument('action')
try:
ptype = self.fs.get_type_from_path(self.fp)
except OSError:
raise tornado.web.HTTPError(404)
if ptype == 'directory':
self.handler_name = 'filesystem:directories-details'
else:
self.handler_name = 'filesystem:files-details'
if self.action['action'] == 'move':
newpath = self._move()
self.write({'filepath':newpath})
elif self.action['action'] == 'copy':
newpath = self._copy()
self.write({'filepath':newpath})
elif self.action['action'] == 'rename':
newpath = self._rename()
self.write({'filepath':newpath})
else:
raise tornado.web.HTTPError(400)
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FilesystemHandler._rename | python | def _rename(self):
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath | Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L56-L66 | null | class FilesystemHandler(JSONHandler,FSMixin):
"""
This handler implements the root filesystem resource for the
filesystem REST API.
"""
def _move(self):
"""
Called during a PUT request where the action specifies
a move operation. Returns resource URI of the destination file.
"""
newpath = self.action['newpath']
try:
self.fs.move(self.fp,newpath)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
def _copy(self):
"""
Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file.
"""
copypath = self.action['copypath']
try:
self.fs.copy(self.fp,copypath)
except OSError:
raise tornado.web.HTTPError(400)
return copypath
@sandstone.lib.decorators.authenticated
def get(self):
"""
Return details for the filesystem, including configured volumes.
"""
res = self.fs.get_filesystem_details()
res = res.to_dict()
self.write(res)
@sandstone.lib.decorators.authenticated
def put(self):
"""
Provides move, copy, and rename functionality. An action must be
specified when calling this method.
"""
self.fp = self.get_body_argument('filepath')
self.action = self.get_body_argument('action')
try:
ptype = self.fs.get_type_from_path(self.fp)
except OSError:
raise tornado.web.HTTPError(404)
if ptype == 'directory':
self.handler_name = 'filesystem:directories-details'
else:
self.handler_name = 'filesystem:files-details'
if self.action['action'] == 'move':
newpath = self._move()
self.write({'filepath':newpath})
elif self.action['action'] == 'copy':
newpath = self._copy()
self.write({'filepath':newpath})
elif self.action['action'] == 'rename':
newpath = self._rename()
self.write({'filepath':newpath})
else:
raise tornado.web.HTTPError(400)
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FilesystemHandler.get | python | def get(self):
res = self.fs.get_filesystem_details()
res = res.to_dict()
self.write(res) | Return details for the filesystem, including configured volumes. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L69-L75 | null | class FilesystemHandler(JSONHandler,FSMixin):
"""
This handler implements the root filesystem resource for the
filesystem REST API.
"""
def _move(self):
"""
Called during a PUT request where the action specifies
a move operation. Returns resource URI of the destination file.
"""
newpath = self.action['newpath']
try:
self.fs.move(self.fp,newpath)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
def _copy(self):
"""
Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file.
"""
copypath = self.action['copypath']
try:
self.fs.copy(self.fp,copypath)
except OSError:
raise tornado.web.HTTPError(400)
return copypath
def _rename(self):
"""
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
"""
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
@sandstone.lib.decorators.authenticated
@sandstone.lib.decorators.authenticated
def put(self):
"""
Provides move, copy, and rename functionality. An action must be
specified when calling this method.
"""
self.fp = self.get_body_argument('filepath')
self.action = self.get_body_argument('action')
try:
ptype = self.fs.get_type_from_path(self.fp)
except OSError:
raise tornado.web.HTTPError(404)
if ptype == 'directory':
self.handler_name = 'filesystem:directories-details'
else:
self.handler_name = 'filesystem:files-details'
if self.action['action'] == 'move':
newpath = self._move()
self.write({'filepath':newpath})
elif self.action['action'] == 'copy':
newpath = self._copy()
self.write({'filepath':newpath})
elif self.action['action'] == 'rename':
newpath = self._rename()
self.write({'filepath':newpath})
else:
raise tornado.web.HTTPError(400)
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FilesystemHandler.put | python | def put(self):
self.fp = self.get_body_argument('filepath')
self.action = self.get_body_argument('action')
try:
ptype = self.fs.get_type_from_path(self.fp)
except OSError:
raise tornado.web.HTTPError(404)
if ptype == 'directory':
self.handler_name = 'filesystem:directories-details'
else:
self.handler_name = 'filesystem:files-details'
if self.action['action'] == 'move':
newpath = self._move()
self.write({'filepath':newpath})
elif self.action['action'] == 'copy':
newpath = self._copy()
self.write({'filepath':newpath})
elif self.action['action'] == 'rename':
newpath = self._rename()
self.write({'filepath':newpath})
else:
raise tornado.web.HTTPError(400) | Provides move, copy, and rename functionality. An action must be
specified when calling this method. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L78-L105 | [
"def _move(self):\n \"\"\"\n Called during a PUT request where the action specifies\n a move operation. Returns resource URI of the destination file.\n \"\"\"\n newpath = self.action['newpath']\n try:\n self.fs.move(self.fp,newpath)\n except OSError:\n raise tornado.web.HTTPError(400)\n return newpath\n",
"def _copy(self):\n \"\"\"\n Called during a PUT request where the action specifies\n a copy operation. Returns resource URI of the new file.\n \"\"\"\n copypath = self.action['copypath']\n try:\n self.fs.copy(self.fp,copypath)\n except OSError:\n raise tornado.web.HTTPError(400)\n return copypath\n",
"def _rename(self):\n \"\"\"\n Called during a PUT request where the action specifies\n a rename operation. Returns resource URI of the renamed file.\n \"\"\"\n newname = self.action['newname']\n try:\n newpath = self.fs.rename(self.fp,newname)\n except OSError:\n raise tornado.web.HTTPError(400)\n return newpath\n",
"def get_body_argument(self,name,default=_ARG_DEFAULT,strip=True):\n arg = self.request.body_arguments.get(name,default)\n if not arg:\n if default is self._ARG_DEFAULT:\n raise self.MissingArgumentError(name)\n return default\n return arg\n"
] | class FilesystemHandler(JSONHandler,FSMixin):
"""
This handler implements the root filesystem resource for the
filesystem REST API.
"""
def _move(self):
"""
Called during a PUT request where the action specifies
a move operation. Returns resource URI of the destination file.
"""
newpath = self.action['newpath']
try:
self.fs.move(self.fp,newpath)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
def _copy(self):
"""
Called during a PUT request where the action specifies
a copy operation. Returns resource URI of the new file.
"""
copypath = self.action['copypath']
try:
self.fs.copy(self.fp,copypath)
except OSError:
raise tornado.web.HTTPError(400)
return copypath
def _rename(self):
"""
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
"""
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath
@sandstone.lib.decorators.authenticated
def get(self):
"""
Return details for the filesystem, including configured volumes.
"""
res = self.fs.get_filesystem_details()
res = res.to_dict()
self.write(res)
@sandstone.lib.decorators.authenticated
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FilewatcherCreateHandler.post | python | def post(self, *args):
filepath = self.get_body_argument('filepath')
if not self.fs.exists(filepath):
raise tornado.web.HTTPError(404)
Filewatcher.add_directory_to_watch(filepath)
self.write({'msg':'Watcher added for {}'.format(filepath)}) | Start a new filewatcher at the specified path. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L113-L122 | [
"def add_directory_to_watch(cls, directory):\n normalized_path = os.path.abspath(directory)\n if normalized_path not in cls._watches:\n # Add the watcher\n watch = cls._observer.schedule(cls._event_handler, normalized_path, recursive=False)\n count = 1\n # add client count and observer to list of observers\n cls._watches[normalized_path] = (count,watch)\n else:\n count, watch = cls._watches[normalized_path]\n count += 1\n cls._watches[normalized_path] = (count,watch)\n",
"def get_body_argument(self,name,default=_ARG_DEFAULT,strip=True):\n arg = self.request.body_arguments.get(name,default)\n if not arg:\n if default is self._ARG_DEFAULT:\n raise self.MissingArgumentError(name)\n return default\n return arg\n"
] | class FilewatcherCreateHandler(JSONHandler,FSMixin):
"""
This handlers implements the filewatcher create REST API.
"""
@sandstone.lib.decorators.authenticated
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FilewatcherDeleteHandler.delete | python | def delete(self, filepath):
Filewatcher.remove_directory_to_watch(filepath)
self.write({'msg':'Watcher deleted for {}'.format(filepath)}) | Stop and delete the specified filewatcher. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L129-L134 | [
"def remove_directory_to_watch(cls, directory):\n normalized_path = os.path.abspath(directory)\n if normalized_path in cls._watches:\n # get the watch from the _watches dict and remove it\n count, watch = cls._watches[normalized_path]\n count -= 1\n if count < 1:\n cls._observer.unschedule(watch)\n del cls._watches[normalized_path]\n else:\n cls._watches[normalized_path] = (count,watch)\n"
] | class FilewatcherDeleteHandler(JSONHandler,FSMixin):
"""
This handlers implements the filewatcher delete REST API.
"""
@sandstone.lib.decorators.authenticated
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileHandler.get | python | def get(self, filepath):
try:
res = self.fs.get_file_details(filepath)
res = res.to_dict()
self.write(res)
except OSError:
raise tornado.web.HTTPError(404) | Get file details for the specified file. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L144-L153 | null | class FileHandler(JSONHandler,FSMixin):
"""
This handler implements the file resource for the
filesystem REST API.
"""
@sandstone.lib.decorators.authenticated
@sandstone.lib.decorators.authenticated
def put(self, filepath):
"""
Change the group or permissions of the specified file. Action
must be specified when calling this method.
"""
action = self.get_body_argument('action')
if action['action'] == 'update_group':
newgrp = action['group']
try:
self.fs.update_group(filepath,newgrp)
self.write({'msg':'Updated group for {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
elif action['action'] == 'update_permissions':
newperms = action['permissions']
try:
self.fs.update_permissions(filepath,newperms)
self.write({'msg':'Updated permissions for {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(400)
@sandstone.lib.decorators.authenticated
def delete(self, filepath):
"""
Delete the specified file.
"""
try:
self.fs.delete(filepath)
self.write({'msg':'File deleted at {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileHandler.put | python | def put(self, filepath):
action = self.get_body_argument('action')
if action['action'] == 'update_group':
newgrp = action['group']
try:
self.fs.update_group(filepath,newgrp)
self.write({'msg':'Updated group for {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
elif action['action'] == 'update_permissions':
newperms = action['permissions']
try:
self.fs.update_permissions(filepath,newperms)
self.write({'msg':'Updated permissions for {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(400) | Change the group or permissions of the specified file. Action
must be specified when calling this method. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L156-L178 | [
"def get_body_argument(self,name,default=_ARG_DEFAULT,strip=True):\n arg = self.request.body_arguments.get(name,default)\n if not arg:\n if default is self._ARG_DEFAULT:\n raise self.MissingArgumentError(name)\n return default\n return arg\n"
] | class FileHandler(JSONHandler,FSMixin):
"""
This handler implements the file resource for the
filesystem REST API.
"""
@sandstone.lib.decorators.authenticated
def get(self, filepath):
"""
Get file details for the specified file.
"""
try:
res = self.fs.get_file_details(filepath)
res = res.to_dict()
self.write(res)
except OSError:
raise tornado.web.HTTPError(404)
@sandstone.lib.decorators.authenticated
@sandstone.lib.decorators.authenticated
def delete(self, filepath):
"""
Delete the specified file.
"""
try:
self.fs.delete(filepath)
self.write({'msg':'File deleted at {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileHandler.delete | python | def delete(self, filepath):
try:
self.fs.delete(filepath)
self.write({'msg':'File deleted at {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404) | Delete the specified file. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L181-L189 | null | class FileHandler(JSONHandler,FSMixin):
"""
This handler implements the file resource for the
filesystem REST API.
"""
@sandstone.lib.decorators.authenticated
def get(self, filepath):
"""
Get file details for the specified file.
"""
try:
res = self.fs.get_file_details(filepath)
res = res.to_dict()
self.write(res)
except OSError:
raise tornado.web.HTTPError(404)
@sandstone.lib.decorators.authenticated
def put(self, filepath):
"""
Change the group or permissions of the specified file. Action
must be specified when calling this method.
"""
action = self.get_body_argument('action')
if action['action'] == 'update_group':
newgrp = action['group']
try:
self.fs.update_group(filepath,newgrp)
self.write({'msg':'Updated group for {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
elif action['action'] == 'update_permissions':
newperms = action['permissions']
try:
self.fs.update_permissions(filepath,newperms)
self.write({'msg':'Updated permissions for {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
else:
raise tornado.web.HTTPError(400)
@sandstone.lib.decorators.authenticated
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | DirectoryHandler.get | python | def get(self, filepath):
contents = self.get_argument('contents', False)
if contents == u'true':
contents = True
else:
contents = False
dir_sizes = self.get_argument('dir_sizes', False)
if dir_sizes == u'true':
dir_sizes = True
else:
dir_sizes = False
try:
res = self.fs.get_directory_details(filepath,contents=contents,dir_sizes=dir_sizes)
res = res.to_dict()
self.write(res)
except OSError:
raise tornado.web.HTTPError(404) | Get directory details for the specified file. If contents is
set to True (default) then the directory contents will be sent
along with the directory details. If dir_size is set to True
(default=False) then du -hs will be run against subdirectories
for accurate content sizes. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L221-L245 | null | class DirectoryHandler(FileHandler):
"""
This handler implements the directory resource for the
filesystem REST API.
"""
@sandstone.lib.decorators.authenticated
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | DirectoryCreateHandler.post | python | def post(self):
filepath = self.get_body_argument('filepath')
try:
self.fs.create_directory(filepath)
encoded_filepath = tornado.escape.url_escape(filepath,plus=True)
resource_uri = self.reverse_url('filesystem:directories-details', encoded_filepath)
self.write({'uri':resource_uri})
except OSError:
raise tornado.web.HTTPError(404) | Create a new directory at the specified path. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L255-L267 | [
"def get_body_argument(self,name,default=_ARG_DEFAULT,strip=True):\n arg = self.request.body_arguments.get(name,default)\n if not arg:\n if default is self._ARG_DEFAULT:\n raise self.MissingArgumentError(name)\n return default\n return arg\n"
] | class DirectoryCreateHandler(JSONHandler,FSMixin):
"""
This handler implements the directory create resource for the
filesystem REST API. Returns the resource URI if successfully
created.
"""
@sandstone.lib.decorators.authenticated
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileContentsHandler.get | python | def get(self, filepath):
exists = self.fs.exists(filepath)
if exists:
mime = magic.Magic(mime=True)
mime_type = mime.from_file(filepath)
if mime_type in self.unsupported_types:
self.set_status(204)
return
else:
contents = self.fs.read_file(filepath)
self.write({'filepath':filepath,'contents': contents})
else:
raise tornado.web.HTTPError(404) | Get the contents of the specified file. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L277-L292 | null | class FileContentsHandler(JSONHandler, FSMixin):
"""
This handler provides read and write functionality for file contents.
"""
unsupported_types = ['application/octet-stream']
@sandstone.lib.decorators.authenticated
@sandstone.lib.decorators.authenticated
def post(self, filepath):
"""
Write the given contents to the specified file. This is not
an append, all file contents will be replaced by the contents
given.
"""
try:
content = self.get_body_argument('content')
self.fs.write_file(filepath, content)
self.write({'msg': 'Updated file at {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404)
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileContentsHandler.post | python | def post(self, filepath):
try:
content = self.get_body_argument('content')
self.fs.write_file(filepath, content)
self.write({'msg': 'Updated file at {}'.format(filepath)})
except OSError:
raise tornado.web.HTTPError(404) | Write the given contents to the specified file. This is not
an append, all file contents will be replaced by the contents
given. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L295-L306 | [
"def get_body_argument(self,name,default=_ARG_DEFAULT,strip=True):\n arg = self.request.body_arguments.get(name,default)\n if not arg:\n if default is self._ARG_DEFAULT:\n raise self.MissingArgumentError(name)\n return default\n return arg\n"
] | class FileContentsHandler(JSONHandler, FSMixin):
"""
This handler provides read and write functionality for file contents.
"""
unsupported_types = ['application/octet-stream']
@sandstone.lib.decorators.authenticated
def get(self, filepath):
"""
Get the contents of the specified file.
"""
exists = self.fs.exists(filepath)
if exists:
mime = magic.Magic(mime=True)
mime_type = mime.from_file(filepath)
if mime_type in self.unsupported_types:
self.set_status(204)
return
else:
contents = self.fs.read_file(filepath)
self.write({'filepath':filepath,'contents': contents})
else:
raise tornado.web.HTTPError(404)
@sandstone.lib.decorators.authenticated
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileDownloadHandler.get_content | python | def get_content(self, start=None, end=None):
with open(self.filepath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return | Retrieve the content of the requested resource which is located
at the given absolute path.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L387-L414 | null | class FileDownloadHandler(BaseHandler,FSMixin):
"""
This handler provides file downloads and hosting. FileContentsHandler will
eventually be deprecated by this handler.
"""
def head(self, filepath):
return self.get(filepath, include_body=False)
@sandstone.lib.decorators.authenticated
@gen.coroutine
def get(self, filepath, include_body=True):
if not self.fs.exists(filepath):
raise tornado.web.HTTPError(404)
# Set up our path instance variables.
self.filepath = filepath
del filepath # make sure we don't refer to filepath instead of self.filepath again
self.set_headers()
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def set_headers(self):
"""
Sets the content headers on the response.
"""
self.set_header("Accept-Ranges", "bytes")
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.filepath)
return self._stat_result
def get_content_size(self):
"""
Retrieve the total size of the resource at the given path.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_content_type(self):
"""
Returns the ``Content-Type`` header to be used for this request.
"""
mime_type, encoding = mimetypes.guess_type(self.filepath)
if encoding == "gzip":
return "application/gzip"
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileDownloadHandler.set_headers | python | def set_headers(self):
self.set_header("Accept-Ranges", "bytes")
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type) | Sets the content headers on the response. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L416-L424 | [
"def get_content_type(self):\n \"\"\"\n Returns the ``Content-Type`` header to be used for this request.\n \"\"\"\n mime_type, encoding = mimetypes.guess_type(self.filepath)\n if encoding == \"gzip\":\n return \"application/gzip\"\n elif encoding is not None:\n return \"application/octet-stream\"\n elif mime_type is not None:\n return mime_type\n # if mime_type not detected, use application/octet-stream\n else:\n return \"application/octet-stream\"\n"
] | class FileDownloadHandler(BaseHandler,FSMixin):
"""
This handler provides file downloads and hosting. FileContentsHandler will
eventually be deprecated by this handler.
"""
def head(self, filepath):
return self.get(filepath, include_body=False)
@sandstone.lib.decorators.authenticated
@gen.coroutine
def get(self, filepath, include_body=True):
if not self.fs.exists(filepath):
raise tornado.web.HTTPError(404)
# Set up our path instance variables.
self.filepath = filepath
del filepath # make sure we don't refer to filepath instead of self.filepath again
self.set_headers()
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def get_content(self, start=None, end=None):
"""
Retrieve the content of the requested resource which is located
at the given absolute path.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
"""
with open(self.filepath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.filepath)
return self._stat_result
def get_content_size(self):
"""
Retrieve the total size of the resource at the given path.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
def get_content_type(self):
"""
Returns the ``Content-Type`` header to be used for this request.
"""
mime_type, encoding = mimetypes.guess_type(self.filepath)
if encoding == "gzip":
return "application/gzip"
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream"
|
SandstoneHPC/sandstone-ide | sandstone/lib/filesystem/handlers.py | FileDownloadHandler.get_content_type | python | def get_content_type(self):
mime_type, encoding = mimetypes.guess_type(self.filepath)
if encoding == "gzip":
return "application/gzip"
elif encoding is not None:
return "application/octet-stream"
elif mime_type is not None:
return mime_type
# if mime_type not detected, use application/octet-stream
else:
return "application/octet-stream" | Returns the ``Content-Type`` header to be used for this request. | train | https://github.com/SandstoneHPC/sandstone-ide/blob/7a47947fb07281c3e3018042863dc67e7e56dc04/sandstone/lib/filesystem/handlers.py#L438-L451 | null | class FileDownloadHandler(BaseHandler,FSMixin):
"""
This handler provides file downloads and hosting. FileContentsHandler will
eventually be deprecated by this handler.
"""
def head(self, filepath):
return self.get(filepath, include_body=False)
@sandstone.lib.decorators.authenticated
@gen.coroutine
def get(self, filepath, include_body=True):
if not self.fs.exists(filepath):
raise tornado.web.HTTPError(404)
# Set up our path instance variables.
self.filepath = filepath
del filepath # make sure we don't refer to filepath instead of self.filepath again
self.set_headers()
request_range = None
range_header = self.request.headers.get("Range")
if range_header:
# As per RFC 2616 14.16, if an invalid Range header is specified,
# the request will be treated as if the header didn't exist.
request_range = httputil._parse_request_range(range_header)
size = self.get_content_size()
if request_range:
start, end = request_range
if (start is not None and start >= size) or end == 0:
# As per RFC 2616 14.35.1, a range is not satisfiable only: if
# the first requested byte is equal to or greater than the
# content, or when a suffix with length 0 is specified
self.set_status(416) # Range Not Satisfiable
self.set_header("Content-Type", "text/plain")
self.set_header("Content-Range", "bytes */%s" % (size, ))
return
if start is not None and start < 0:
start += size
if end is not None and end > size:
# Clients sometimes blindly use a large range to limit their
# download size; cap the endpoint at the actual file size.
end = size
# Note: only return HTTP 206 if less than the entire range has been
# requested. Not only is this semantically correct, but Chrome
# refuses to play audio if it gets an HTTP 206 in response to
# ``Range: bytes=0-``.
if size != (end or size) - (start or 0):
self.set_status(206) # Partial Content
self.set_header("Content-Range",
httputil._get_content_range(start, end, size))
else:
start = end = None
if start is not None and end is not None:
content_length = end - start
elif end is not None:
content_length = end
elif start is not None:
content_length = size - start
else:
content_length = size
self.set_header("Content-Length", content_length)
if include_body:
content = self.get_content(start, end)
if isinstance(content, bytes):
content = [content]
for chunk in content:
try:
self.write(chunk)
yield self.flush()
except iostream.StreamClosedError:
return
else:
assert self.request.method == "HEAD"
def get_content(self, start=None, end=None):
"""
Retrieve the content of the requested resource which is located
at the given absolute path.
This method should either return a byte string or an iterator
of byte strings. The latter is preferred for large files
as it helps reduce memory fragmentation.
"""
with open(self.filepath, "rb") as file:
if start is not None:
file.seek(start)
if end is not None:
remaining = end - (start or 0)
else:
remaining = None
while True:
chunk_size = 64 * 1024
if remaining is not None and remaining < chunk_size:
chunk_size = remaining
chunk = file.read(chunk_size)
if chunk:
if remaining is not None:
remaining -= len(chunk)
yield chunk
else:
if remaining is not None:
assert remaining == 0
return
def set_headers(self):
"""
Sets the content headers on the response.
"""
self.set_header("Accept-Ranges", "bytes")
content_type = self.get_content_type()
if content_type:
self.set_header("Content-Type", content_type)
def _stat(self):
if not hasattr(self, '_stat_result'):
self._stat_result = os.stat(self.filepath)
return self._stat_result
def get_content_size(self):
"""
Retrieve the total size of the resource at the given path.
"""
stat_result = self._stat()
return stat_result[stat.ST_SIZE]
|
contains-io/typet | typet/validation.py | _BoundedMeta._get_bases | python | def _get_bases(type_):
# type: (type) -> Tuple[type, type]
try:
class _(type_): # type: ignore
"""Check if type_ is subclassable."""
BaseClass = type_
except TypeError:
BaseClass = object
class MetaClass(_ValidationMeta, BaseClass.__class__): # type: ignore
"""Use the type_ meta and include base validation functionality."""
return BaseClass, MetaClass | Get the base and meta classes to use in creating a subclass.
Args:
type_: The type to subclass.
Returns:
A tuple containing two values: a base class, and a metaclass. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L158-L180 | null | class _BoundedMeta(Uninstantiable):
"""A metaclass that adds slicing to a class that creates new classes."""
def __getitem__(cls, args):
# type: (Union[Tuple[_T, Any], Tuple[_T, Any, Callable]]) -> type
"""Create a new subclass of a type bounded by the arguments.
If a callable is passed as the third argument of the slice, it will be
used as the comparison function for the boundaries.
Args:
args: A tuple with two or three parameters: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
"""
type_, bound, keyfunc = cls._get_args(args)
keyfunc_name = cls._get_fullname(keyfunc)
identity = cls._identity
BaseClass, MetaClass = cls._get_bases(type_)
instantiate = cls._instantiate
@six.add_metaclass(MetaClass) # type: ignore
class _BoundedSubclass(BaseClass): # type: ignore
"""A subclass of type_ or object, bounded by a slice."""
def __new__(cls, __value, *args, **kwargs):
# type: (Type[_BoundedSubclass], Any, *Any, **Any) -> type
"""Return __value cast to _T.
Any additional arguments are passed as-is to the constructor.
Args:
__value: A value that can be converted to type _T.
args: Any additional positional arguments passed to the
constructor.
kwargs: Any additional keyword arguments passed to the
constructor.
"""
instance = instantiate(
BaseClass, type_, __value, *args, **kwargs
)
cmp_val = keyfunc(instance)
if bound.start is not None or bound.stop is not None:
if bound.start is not None and cmp_val < bound.start:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is below the minimum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.start,
)
)
raise ValueError(
"The value {} is below the minimum allowed value "
"of {}.".format(repr(__value), bound.start)
)
if bound.stop is not None and cmp_val > bound.stop:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is above the maximum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.stop,
)
)
raise ValueError(
"The value {} is above the maximum allowed value "
"of {}.".format(repr(__value), bound.stop)
)
elif not cmp_val:
raise ValueError(
"{}({}) is False".format(keyfunc_name, repr(instance))
)
return instance
_BoundedSubclass.__type__ = type_
_BoundedSubclass.__class_repr__ = cls._get_class_repr(
type_, bound, keyfunc, keyfunc_name
)
return _BoundedSubclass
@staticmethod
@staticmethod
def _instantiate(class_, type_, __value, *args, **kwargs):
"""Instantiate the object if possible.
Args:
class_: The class to instantiate.
type_: The the class is uninstantiable, attempt to cast to a base
type.
__value: The value to return if the class and type are
uninstantiable.
*args: The positional arguments to pass to the class.
**kwargs: The keyword arguments to pass to the class.
Returns:
The class or base type instantiated using the arguments. If it is
not possible to instantiate either, returns __value.
"""
try:
return class_(__value, *args, **kwargs)
except TypeError:
try:
return type_(__value, *args, **kwargs)
except Exception: # pylint: disable=broad-except
return __value
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
"""Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
"""
if keyfunc is not cls._default:
return "{}.{}[{}, {}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
keyfunc_name,
)
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
)
def _get_args(cls, args):
# type: (tuple) -> Tuple[Any, slice, Callable]
"""Return the parameters necessary to check type boundaries.
Args:
args: A tuple with two or three elements: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
Returns:
A tuple with three elements: a type, a slice, and a function to
apply to objects of the given type. If no function was specified,
it returns the identity function.
"""
if not isinstance(args, tuple):
raise TypeError(
"{}[...] takes two or three arguments.".format(cls.__name__)
)
elif len(args) == 2:
type_, bound = args
keyfunc = cls._identity
elif len(args) == 3:
type_, bound, keyfunc = args
else:
raise TypeError(
"Too many parameters given to {}[...]".format(cls.__name__)
)
if not isinstance(bound, slice):
bound = slice(bound)
return eval_type(type_), bound, keyfunc
@staticmethod
def _get_bound_repr(bound):
# type: (slice) -> str
"""Return a string representation of a boundary slice.
Args:
bound: A slice object.
Returns:
A string representing the slice.
"""
return "{}:{}".format(bound.start or "", bound.stop or "")
@staticmethod
def _identity(obj):
# type: (Any) -> Any
"""Return the given object.
Args:
obj: An object.
Returns:
The given object.
"""
return obj
_default = _identity # type: Callable[[Any], Any]
@staticmethod
def _get_fullname(obj):
# type: (Any) -> str
"""Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object.
"""
if not hasattr(obj, "__name__"):
obj = obj.__class__
if obj.__module__ in ("builtins", "__builtin__"):
return obj.__name__
return "{}.{}".format(obj.__module__, obj.__name__)
|
contains-io/typet | typet/validation.py | _BoundedMeta._instantiate | python | def _instantiate(class_, type_, __value, *args, **kwargs):
try:
return class_(__value, *args, **kwargs)
except TypeError:
try:
return type_(__value, *args, **kwargs)
except Exception: # pylint: disable=broad-except
return __value | Instantiate the object if possible.
Args:
class_: The class to instantiate.
type_: The the class is uninstantiable, attempt to cast to a base
type.
__value: The value to return if the class and type are
uninstantiable.
*args: The positional arguments to pass to the class.
**kwargs: The keyword arguments to pass to the class.
Returns:
The class or base type instantiated using the arguments. If it is
not possible to instantiate either, returns __value. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L183-L205 | null | class _BoundedMeta(Uninstantiable):
"""A metaclass that adds slicing to a class that creates new classes."""
def __getitem__(cls, args):
# type: (Union[Tuple[_T, Any], Tuple[_T, Any, Callable]]) -> type
"""Create a new subclass of a type bounded by the arguments.
If a callable is passed as the third argument of the slice, it will be
used as the comparison function for the boundaries.
Args:
args: A tuple with two or three parameters: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
"""
type_, bound, keyfunc = cls._get_args(args)
keyfunc_name = cls._get_fullname(keyfunc)
identity = cls._identity
BaseClass, MetaClass = cls._get_bases(type_)
instantiate = cls._instantiate
@six.add_metaclass(MetaClass) # type: ignore
class _BoundedSubclass(BaseClass): # type: ignore
"""A subclass of type_ or object, bounded by a slice."""
def __new__(cls, __value, *args, **kwargs):
# type: (Type[_BoundedSubclass], Any, *Any, **Any) -> type
"""Return __value cast to _T.
Any additional arguments are passed as-is to the constructor.
Args:
__value: A value that can be converted to type _T.
args: Any additional positional arguments passed to the
constructor.
kwargs: Any additional keyword arguments passed to the
constructor.
"""
instance = instantiate(
BaseClass, type_, __value, *args, **kwargs
)
cmp_val = keyfunc(instance)
if bound.start is not None or bound.stop is not None:
if bound.start is not None and cmp_val < bound.start:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is below the minimum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.start,
)
)
raise ValueError(
"The value {} is below the minimum allowed value "
"of {}.".format(repr(__value), bound.start)
)
if bound.stop is not None and cmp_val > bound.stop:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is above the maximum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.stop,
)
)
raise ValueError(
"The value {} is above the maximum allowed value "
"of {}.".format(repr(__value), bound.stop)
)
elif not cmp_val:
raise ValueError(
"{}({}) is False".format(keyfunc_name, repr(instance))
)
return instance
_BoundedSubclass.__type__ = type_
_BoundedSubclass.__class_repr__ = cls._get_class_repr(
type_, bound, keyfunc, keyfunc_name
)
return _BoundedSubclass
@staticmethod
def _get_bases(type_):
# type: (type) -> Tuple[type, type]
"""Get the base and meta classes to use in creating a subclass.
Args:
type_: The type to subclass.
Returns:
A tuple containing two values: a base class, and a metaclass.
"""
try:
class _(type_): # type: ignore
"""Check if type_ is subclassable."""
BaseClass = type_
except TypeError:
BaseClass = object
class MetaClass(_ValidationMeta, BaseClass.__class__): # type: ignore
"""Use the type_ meta and include base validation functionality."""
return BaseClass, MetaClass
@staticmethod
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
"""Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
"""
if keyfunc is not cls._default:
return "{}.{}[{}, {}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
keyfunc_name,
)
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
)
def _get_args(cls, args):
# type: (tuple) -> Tuple[Any, slice, Callable]
"""Return the parameters necessary to check type boundaries.
Args:
args: A tuple with two or three elements: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
Returns:
A tuple with three elements: a type, a slice, and a function to
apply to objects of the given type. If no function was specified,
it returns the identity function.
"""
if not isinstance(args, tuple):
raise TypeError(
"{}[...] takes two or three arguments.".format(cls.__name__)
)
elif len(args) == 2:
type_, bound = args
keyfunc = cls._identity
elif len(args) == 3:
type_, bound, keyfunc = args
else:
raise TypeError(
"Too many parameters given to {}[...]".format(cls.__name__)
)
if not isinstance(bound, slice):
bound = slice(bound)
return eval_type(type_), bound, keyfunc
@staticmethod
def _get_bound_repr(bound):
# type: (slice) -> str
"""Return a string representation of a boundary slice.
Args:
bound: A slice object.
Returns:
A string representing the slice.
"""
return "{}:{}".format(bound.start or "", bound.stop or "")
@staticmethod
def _identity(obj):
# type: (Any) -> Any
"""Return the given object.
Args:
obj: An object.
Returns:
The given object.
"""
return obj
_default = _identity # type: Callable[[Any], Any]
@staticmethod
def _get_fullname(obj):
# type: (Any) -> str
"""Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object.
"""
if not hasattr(obj, "__name__"):
obj = obj.__class__
if obj.__module__ in ("builtins", "__builtin__"):
return obj.__name__
return "{}.{}".format(obj.__module__, obj.__name__)
|
contains-io/typet | typet/validation.py | _BoundedMeta._get_class_repr | python | def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
if keyfunc is not cls._default:
return "{}.{}[{}, {}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
keyfunc_name,
)
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
) | Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L207-L234 | [
"def _get_bound_repr(bound):\n # type: (slice) -> str\n \"\"\"Return a string representation of a boundary slice.\n\n Args:\n bound: A slice object.\n\n Returns:\n A string representing the slice.\n \"\"\"\n return \"{}:{}\".format(bound.start or \"\", bound.stop or \"\")\n",
"def _get_fullname(obj):\n # type: (Any) -> str\n \"\"\"Get the full name of an object including the module.\n\n Args:\n obj: An object.\n\n Returns:\n The full class name of the object.\n \"\"\"\n if not hasattr(obj, \"__name__\"):\n obj = obj.__class__\n if obj.__module__ in (\"builtins\", \"__builtin__\"):\n return obj.__name__\n return \"{}.{}\".format(obj.__module__, obj.__name__)\n"
] | class _BoundedMeta(Uninstantiable):
"""A metaclass that adds slicing to a class that creates new classes."""
def __getitem__(cls, args):
# type: (Union[Tuple[_T, Any], Tuple[_T, Any, Callable]]) -> type
"""Create a new subclass of a type bounded by the arguments.
If a callable is passed as the third argument of the slice, it will be
used as the comparison function for the boundaries.
Args:
args: A tuple with two or three parameters: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
"""
type_, bound, keyfunc = cls._get_args(args)
keyfunc_name = cls._get_fullname(keyfunc)
identity = cls._identity
BaseClass, MetaClass = cls._get_bases(type_)
instantiate = cls._instantiate
@six.add_metaclass(MetaClass) # type: ignore
class _BoundedSubclass(BaseClass): # type: ignore
"""A subclass of type_ or object, bounded by a slice."""
def __new__(cls, __value, *args, **kwargs):
# type: (Type[_BoundedSubclass], Any, *Any, **Any) -> type
"""Return __value cast to _T.
Any additional arguments are passed as-is to the constructor.
Args:
__value: A value that can be converted to type _T.
args: Any additional positional arguments passed to the
constructor.
kwargs: Any additional keyword arguments passed to the
constructor.
"""
instance = instantiate(
BaseClass, type_, __value, *args, **kwargs
)
cmp_val = keyfunc(instance)
if bound.start is not None or bound.stop is not None:
if bound.start is not None and cmp_val < bound.start:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is below the minimum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.start,
)
)
raise ValueError(
"The value {} is below the minimum allowed value "
"of {}.".format(repr(__value), bound.start)
)
if bound.stop is not None and cmp_val > bound.stop:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is above the maximum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.stop,
)
)
raise ValueError(
"The value {} is above the maximum allowed value "
"of {}.".format(repr(__value), bound.stop)
)
elif not cmp_val:
raise ValueError(
"{}({}) is False".format(keyfunc_name, repr(instance))
)
return instance
_BoundedSubclass.__type__ = type_
_BoundedSubclass.__class_repr__ = cls._get_class_repr(
type_, bound, keyfunc, keyfunc_name
)
return _BoundedSubclass
@staticmethod
def _get_bases(type_):
# type: (type) -> Tuple[type, type]
"""Get the base and meta classes to use in creating a subclass.
Args:
type_: The type to subclass.
Returns:
A tuple containing two values: a base class, and a metaclass.
"""
try:
class _(type_): # type: ignore
"""Check if type_ is subclassable."""
BaseClass = type_
except TypeError:
BaseClass = object
class MetaClass(_ValidationMeta, BaseClass.__class__): # type: ignore
"""Use the type_ meta and include base validation functionality."""
return BaseClass, MetaClass
@staticmethod
def _instantiate(class_, type_, __value, *args, **kwargs):
"""Instantiate the object if possible.
Args:
class_: The class to instantiate.
type_: The the class is uninstantiable, attempt to cast to a base
type.
__value: The value to return if the class and type are
uninstantiable.
*args: The positional arguments to pass to the class.
**kwargs: The keyword arguments to pass to the class.
Returns:
The class or base type instantiated using the arguments. If it is
not possible to instantiate either, returns __value.
"""
try:
return class_(__value, *args, **kwargs)
except TypeError:
try:
return type_(__value, *args, **kwargs)
except Exception: # pylint: disable=broad-except
return __value
def _get_args(cls, args):
# type: (tuple) -> Tuple[Any, slice, Callable]
"""Return the parameters necessary to check type boundaries.
Args:
args: A tuple with two or three elements: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
Returns:
A tuple with three elements: a type, a slice, and a function to
apply to objects of the given type. If no function was specified,
it returns the identity function.
"""
if not isinstance(args, tuple):
raise TypeError(
"{}[...] takes two or three arguments.".format(cls.__name__)
)
elif len(args) == 2:
type_, bound = args
keyfunc = cls._identity
elif len(args) == 3:
type_, bound, keyfunc = args
else:
raise TypeError(
"Too many parameters given to {}[...]".format(cls.__name__)
)
if not isinstance(bound, slice):
bound = slice(bound)
return eval_type(type_), bound, keyfunc
@staticmethod
def _get_bound_repr(bound):
# type: (slice) -> str
"""Return a string representation of a boundary slice.
Args:
bound: A slice object.
Returns:
A string representing the slice.
"""
return "{}:{}".format(bound.start or "", bound.stop or "")
@staticmethod
def _identity(obj):
# type: (Any) -> Any
"""Return the given object.
Args:
obj: An object.
Returns:
The given object.
"""
return obj
_default = _identity # type: Callable[[Any], Any]
@staticmethod
def _get_fullname(obj):
# type: (Any) -> str
"""Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object.
"""
if not hasattr(obj, "__name__"):
obj = obj.__class__
if obj.__module__ in ("builtins", "__builtin__"):
return obj.__name__
return "{}.{}".format(obj.__module__, obj.__name__)
|
contains-io/typet | typet/validation.py | _BoundedMeta._get_args | python | def _get_args(cls, args):
# type: (tuple) -> Tuple[Any, slice, Callable]
if not isinstance(args, tuple):
raise TypeError(
"{}[...] takes two or three arguments.".format(cls.__name__)
)
elif len(args) == 2:
type_, bound = args
keyfunc = cls._identity
elif len(args) == 3:
type_, bound, keyfunc = args
else:
raise TypeError(
"Too many parameters given to {}[...]".format(cls.__name__)
)
if not isinstance(bound, slice):
bound = slice(bound)
return eval_type(type_), bound, keyfunc | Return the parameters necessary to check type boundaries.
Args:
args: A tuple with two or three elements: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
Returns:
A tuple with three elements: a type, a slice, and a function to
apply to objects of the given type. If no function was specified,
it returns the identity function. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L236-L266 | null | class _BoundedMeta(Uninstantiable):
"""A metaclass that adds slicing to a class that creates new classes."""
def __getitem__(cls, args):
# type: (Union[Tuple[_T, Any], Tuple[_T, Any, Callable]]) -> type
"""Create a new subclass of a type bounded by the arguments.
If a callable is passed as the third argument of the slice, it will be
used as the comparison function for the boundaries.
Args:
args: A tuple with two or three parameters: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
"""
type_, bound, keyfunc = cls._get_args(args)
keyfunc_name = cls._get_fullname(keyfunc)
identity = cls._identity
BaseClass, MetaClass = cls._get_bases(type_)
instantiate = cls._instantiate
@six.add_metaclass(MetaClass) # type: ignore
class _BoundedSubclass(BaseClass): # type: ignore
"""A subclass of type_ or object, bounded by a slice."""
def __new__(cls, __value, *args, **kwargs):
# type: (Type[_BoundedSubclass], Any, *Any, **Any) -> type
"""Return __value cast to _T.
Any additional arguments are passed as-is to the constructor.
Args:
__value: A value that can be converted to type _T.
args: Any additional positional arguments passed to the
constructor.
kwargs: Any additional keyword arguments passed to the
constructor.
"""
instance = instantiate(
BaseClass, type_, __value, *args, **kwargs
)
cmp_val = keyfunc(instance)
if bound.start is not None or bound.stop is not None:
if bound.start is not None and cmp_val < bound.start:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is below the minimum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.start,
)
)
raise ValueError(
"The value {} is below the minimum allowed value "
"of {}.".format(repr(__value), bound.start)
)
if bound.stop is not None and cmp_val > bound.stop:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is above the maximum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.stop,
)
)
raise ValueError(
"The value {} is above the maximum allowed value "
"of {}.".format(repr(__value), bound.stop)
)
elif not cmp_val:
raise ValueError(
"{}({}) is False".format(keyfunc_name, repr(instance))
)
return instance
_BoundedSubclass.__type__ = type_
_BoundedSubclass.__class_repr__ = cls._get_class_repr(
type_, bound, keyfunc, keyfunc_name
)
return _BoundedSubclass
@staticmethod
def _get_bases(type_):
# type: (type) -> Tuple[type, type]
"""Get the base and meta classes to use in creating a subclass.
Args:
type_: The type to subclass.
Returns:
A tuple containing two values: a base class, and a metaclass.
"""
try:
class _(type_): # type: ignore
"""Check if type_ is subclassable."""
BaseClass = type_
except TypeError:
BaseClass = object
class MetaClass(_ValidationMeta, BaseClass.__class__): # type: ignore
"""Use the type_ meta and include base validation functionality."""
return BaseClass, MetaClass
@staticmethod
def _instantiate(class_, type_, __value, *args, **kwargs):
"""Instantiate the object if possible.
Args:
class_: The class to instantiate.
type_: The the class is uninstantiable, attempt to cast to a base
type.
__value: The value to return if the class and type are
uninstantiable.
*args: The positional arguments to pass to the class.
**kwargs: The keyword arguments to pass to the class.
Returns:
The class or base type instantiated using the arguments. If it is
not possible to instantiate either, returns __value.
"""
try:
return class_(__value, *args, **kwargs)
except TypeError:
try:
return type_(__value, *args, **kwargs)
except Exception: # pylint: disable=broad-except
return __value
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
"""Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
"""
if keyfunc is not cls._default:
return "{}.{}[{}, {}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
keyfunc_name,
)
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
)
@staticmethod
def _get_bound_repr(bound):
# type: (slice) -> str
"""Return a string representation of a boundary slice.
Args:
bound: A slice object.
Returns:
A string representing the slice.
"""
return "{}:{}".format(bound.start or "", bound.stop or "")
@staticmethod
def _identity(obj):
# type: (Any) -> Any
"""Return the given object.
Args:
obj: An object.
Returns:
The given object.
"""
return obj
_default = _identity # type: Callable[[Any], Any]
@staticmethod
def _get_fullname(obj):
# type: (Any) -> str
"""Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object.
"""
if not hasattr(obj, "__name__"):
obj = obj.__class__
if obj.__module__ in ("builtins", "__builtin__"):
return obj.__name__
return "{}.{}".format(obj.__module__, obj.__name__)
|
contains-io/typet | typet/validation.py | _BoundedMeta._get_fullname | python | def _get_fullname(obj):
# type: (Any) -> str
if not hasattr(obj, "__name__"):
obj = obj.__class__
if obj.__module__ in ("builtins", "__builtin__"):
return obj.__name__
return "{}.{}".format(obj.__module__, obj.__name__) | Get the full name of an object including the module.
Args:
obj: An object.
Returns:
The full class name of the object. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L297-L311 | null | class _BoundedMeta(Uninstantiable):
"""A metaclass that adds slicing to a class that creates new classes."""
def __getitem__(cls, args):
# type: (Union[Tuple[_T, Any], Tuple[_T, Any, Callable]]) -> type
"""Create a new subclass of a type bounded by the arguments.
If a callable is passed as the third argument of the slice, it will be
used as the comparison function for the boundaries.
Args:
args: A tuple with two or three parameters: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
"""
type_, bound, keyfunc = cls._get_args(args)
keyfunc_name = cls._get_fullname(keyfunc)
identity = cls._identity
BaseClass, MetaClass = cls._get_bases(type_)
instantiate = cls._instantiate
@six.add_metaclass(MetaClass) # type: ignore
class _BoundedSubclass(BaseClass): # type: ignore
"""A subclass of type_ or object, bounded by a slice."""
def __new__(cls, __value, *args, **kwargs):
# type: (Type[_BoundedSubclass], Any, *Any, **Any) -> type
"""Return __value cast to _T.
Any additional arguments are passed as-is to the constructor.
Args:
__value: A value that can be converted to type _T.
args: Any additional positional arguments passed to the
constructor.
kwargs: Any additional keyword arguments passed to the
constructor.
"""
instance = instantiate(
BaseClass, type_, __value, *args, **kwargs
)
cmp_val = keyfunc(instance)
if bound.start is not None or bound.stop is not None:
if bound.start is not None and cmp_val < bound.start:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is below the minimum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.start,
)
)
raise ValueError(
"The value {} is below the minimum allowed value "
"of {}.".format(repr(__value), bound.start)
)
if bound.stop is not None and cmp_val > bound.stop:
if keyfunc is not identity:
raise ValueError(
"The value of {}({}) [{}] is above the maximum"
" allowed value of {}.".format(
keyfunc_name,
repr(__value),
repr(cmp_val),
bound.stop,
)
)
raise ValueError(
"The value {} is above the maximum allowed value "
"of {}.".format(repr(__value), bound.stop)
)
elif not cmp_val:
raise ValueError(
"{}({}) is False".format(keyfunc_name, repr(instance))
)
return instance
_BoundedSubclass.__type__ = type_
_BoundedSubclass.__class_repr__ = cls._get_class_repr(
type_, bound, keyfunc, keyfunc_name
)
return _BoundedSubclass
@staticmethod
def _get_bases(type_):
# type: (type) -> Tuple[type, type]
"""Get the base and meta classes to use in creating a subclass.
Args:
type_: The type to subclass.
Returns:
A tuple containing two values: a base class, and a metaclass.
"""
try:
class _(type_): # type: ignore
"""Check if type_ is subclassable."""
BaseClass = type_
except TypeError:
BaseClass = object
class MetaClass(_ValidationMeta, BaseClass.__class__): # type: ignore
"""Use the type_ meta and include base validation functionality."""
return BaseClass, MetaClass
@staticmethod
def _instantiate(class_, type_, __value, *args, **kwargs):
"""Instantiate the object if possible.
Args:
class_: The class to instantiate.
type_: The the class is uninstantiable, attempt to cast to a base
type.
__value: The value to return if the class and type are
uninstantiable.
*args: The positional arguments to pass to the class.
**kwargs: The keyword arguments to pass to the class.
Returns:
The class or base type instantiated using the arguments. If it is
not possible to instantiate either, returns __value.
"""
try:
return class_(__value, *args, **kwargs)
except TypeError:
try:
return type_(__value, *args, **kwargs)
except Exception: # pylint: disable=broad-except
return __value
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
"""Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
"""
if keyfunc is not cls._default:
return "{}.{}[{}, {}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
keyfunc_name,
)
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
cls._get_bound_repr(bound),
)
def _get_args(cls, args):
# type: (tuple) -> Tuple[Any, slice, Callable]
"""Return the parameters necessary to check type boundaries.
Args:
args: A tuple with two or three elements: a type, a slice
representing the minimum and maximum lengths allowed for values
of that type and, optionally, a function to use on values
before comparing against the bounds.
Returns:
A tuple with three elements: a type, a slice, and a function to
apply to objects of the given type. If no function was specified,
it returns the identity function.
"""
if not isinstance(args, tuple):
raise TypeError(
"{}[...] takes two or three arguments.".format(cls.__name__)
)
elif len(args) == 2:
type_, bound = args
keyfunc = cls._identity
elif len(args) == 3:
type_, bound, keyfunc = args
else:
raise TypeError(
"Too many parameters given to {}[...]".format(cls.__name__)
)
if not isinstance(bound, slice):
bound = slice(bound)
return eval_type(type_), bound, keyfunc
@staticmethod
def _get_bound_repr(bound):
# type: (slice) -> str
"""Return a string representation of a boundary slice.
Args:
bound: A slice object.
Returns:
A string representing the slice.
"""
return "{}:{}".format(bound.start or "", bound.stop or "")
@staticmethod
def _identity(obj):
# type: (Any) -> Any
"""Return the given object.
Args:
obj: An object.
Returns:
The given object.
"""
return obj
_default = _identity # type: Callable[[Any], Any]
@staticmethod
|
contains-io/typet | typet/validation.py | _LengthBoundedMeta._get_args | python | def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
if not isinstance(args, tuple) or not len(args) == 2:
raise TypeError(
"{}[...] takes exactly two arguments.".format(cls.__name__)
)
return super(_LengthBoundedMeta, cls)._get_args(args + (len,)) | Return the parameters necessary to check type boundaries.
Args:
args: A tuple with two parameters: a type, and a slice representing
the minimum and maximum lengths allowed for values of that
type.
Returns:
A tuple with three parameters: a type, a slice, and the len
function. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L343-L360 | null | class _LengthBoundedMeta(_BoundedMeta):
"""A metaclass that bounds a type with the len function."""
_default = len
|
contains-io/typet | typet/validation.py | _ValidationBoundedMeta._get_args | python | def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
if isinstance(args, tuple):
if not len(args) == 2:
raise TypeError(
"{}[...] takes one or two argument.".format(cls.__name__)
)
return super(_ValidationBoundedMeta, cls)._get_args(
(args[0], None, args[1])
)
return super(_ValidationBoundedMeta, cls)._get_args((Any, None, args)) | Return the parameters necessary to check type boundaries.
Args:
args: A tuple with one or two parameters: A type to cast the
value passed, and a predicate function to use for bounds
checking.
Returns:
A tuple with three parameters: a type, a slice, and the predicate
function. If no type was passed in args, the type defaults to Any. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L386-L407 | null | class _ValidationBoundedMeta(_BoundedMeta):
"""A metaclass that binds a type to a validation method."""
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
"""Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class.
"""
if type_ is not Any:
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
keyfunc_name,
)
return "{}.{}[{}]".format(cls.__module__, cls.__name__, keyfunc_name)
|
contains-io/typet | typet/validation.py | _ValidationBoundedMeta._get_class_repr | python | def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
if type_ is not Any:
return "{}.{}[{}, {}]".format(
cls.__module__,
cls.__name__,
cls._get_fullname(type_),
keyfunc_name,
)
return "{}.{}[{}]".format(cls.__module__, cls.__name__, keyfunc_name) | Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries.
keyfunc_name: The name of keyfunc.
Returns:
A string representing the class. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L409-L430 | null | class _ValidationBoundedMeta(_BoundedMeta):
"""A metaclass that binds a type to a validation method."""
def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
"""Return the parameters necessary to check type boundaries.
Args:
args: A tuple with one or two parameters: A type to cast the
value passed, and a predicate function to use for bounds
checking.
Returns:
A tuple with three parameters: a type, a slice, and the predicate
function. If no type was passed in args, the type defaults to Any.
"""
if isinstance(args, tuple):
if not len(args) == 2:
raise TypeError(
"{}[...] takes one or two argument.".format(cls.__name__)
)
return super(_ValidationBoundedMeta, cls)._get_args(
(args[0], None, args[1])
)
return super(_ValidationBoundedMeta, cls)._get_args((Any, None, args))
|
contains-io/typet | typet/validation.py | _StringMeta._get_args | python | def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
if isinstance(args, tuple):
raise TypeError(
"{}[...] takes exactly one argument.".format(cls.__name__)
)
return super(_StringMeta, cls)._get_args((_STR_TYPE, args)) | Return the parameters necessary to check type boundaries.
Args:
args: A slice representing the minimum and maximum lengths allowed
for values of that string.
Returns:
A tuple with three parameters: a type, a slice, and the len
function. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L462-L478 | null | class _StringMeta(_LengthBoundedMeta):
"""A metaclass that binds a string to a length bound."""
def __call__(cls, *args, **kwargs):
"""Instantiate a string object."""
return _STR_TYPE(*args, **kwargs)
def __instancecheck__(self, other):
# type: (Any) -> bool
"""Determine if an instance is of the string type.
Args:
other: The instance to test.
Returns:
True if the object is both of the same type as the String;
otherwise, False,
"""
return isinstance(other, _STR_TYPE)
def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
"""Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with. This will always be
_STR_TYPE.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries. This will always be builtins.len().
keyfunc_name: The name of keyfunc. This will always be 'len'.
Returns:
A string representing the class.
"""
return "{}.{}[{}]".format(
cls.__module__, cls.__name__, cls._get_bound_repr(bound)
)
|
contains-io/typet | typet/validation.py | _StringMeta._get_class_repr | python | def _get_class_repr(cls, type_, bound, keyfunc, keyfunc_name):
# type: (Any, slice, Callable, str) -> str
return "{}.{}[{}]".format(
cls.__module__, cls.__name__, cls._get_bound_repr(bound)
) | Return a class representation using the slice parameters.
Args:
type_: The type the class was sliced with. This will always be
_STR_TYPE.
bound: The boundaries specified for the values of type_.
keyfunc: The comparison function used to check the value
boundaries. This will always be builtins.len().
keyfunc_name: The name of keyfunc. This will always be 'len'.
Returns:
A string representing the class. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/validation.py#L480-L497 | null | class _StringMeta(_LengthBoundedMeta):
"""A metaclass that binds a string to a length bound."""
def __call__(cls, *args, **kwargs):
"""Instantiate a string object."""
return _STR_TYPE(*args, **kwargs)
def __instancecheck__(self, other):
# type: (Any) -> bool
"""Determine if an instance is of the string type.
Args:
other: The instance to test.
Returns:
True if the object is both of the same type as the String;
otherwise, False,
"""
return isinstance(other, _STR_TYPE)
def _get_args(cls, args):
# type: (tuple) -> Tuple[type, slice, Callable]
"""Return the parameters necessary to check type boundaries.
Args:
args: A slice representing the minimum and maximum lengths allowed
for values of that string.
Returns:
A tuple with three parameters: a type, a slice, and the len
function.
"""
if isinstance(args, tuple):
raise TypeError(
"{}[...] takes exactly one argument.".format(cls.__name__)
)
return super(_StringMeta, cls)._get_args((_STR_TYPE, args))
|
contains-io/typet | typet/objects.py | _get_type_name | python | def _get_type_name(type_):
# type: (type) -> str
name = repr(type_)
if name.startswith("<"):
name = getattr(type_, "__qualname__", getattr(type_, "__name__", ""))
return name.rsplit(".", 1)[-1] or repr(type_) | Return a displayable name for the type.
Args:
type_: A class object.
Returns:
A string value describing the class name that can be used in a natural
language sentence. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L46-L60 | null | # -*- coding: utf-8 -*-
"""A module containing base objects for creating typed classes.
Classes:
BaseStrictObject: An object that asserts all annotated attributes are of
the correct type.
StrictObject: A derivative of BaseStrictObject that implements the default
comparison operators and hash.
BaseObject: An object that coerces all annotated attributes to the
correct type.
Object: A derivative of BaseObject that implements the default
comparison operators and hash.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import re
import tokenize
import types
import six
from typingplus import ( # noqa: F401 pylint: disable=unused-import
cast,
get_type_hints,
is_instance,
Any,
Callable,
Dict,
List,
Optional,
Type,
TypeVar,
)
from .types import NoneType # pylint: disable=redefined-builtin
__all__ = ("BaseStrictObject", "StrictObject", "BaseObject", "Object")
_T = TypeVar("_T")
def _get_class_frame_source(class_name):
# type: (str) -> Optional[str]
"""Return the source code for a class by checking the frame stack.
This is necessary because it is not possible to get the source of a class
being created by a metaclass directly.
Args:
class_name: The class to look for on the stack.
Returns:
The source code for the requested class if the class was found and the
source was accessible.
"""
for frame_info in inspect.stack():
try:
with open(frame_info[1]) as fp:
src = "".join(fp.readlines()[frame_info[2] - 1 :])
except IOError:
continue
if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src):
reader = six.StringIO(src).readline
tokens = tokenize.generate_tokens(reader)
source_tokens = []
indent_level = 0
base_indent_level = 0
has_base_level = False
for token, value, _, _, _ in tokens: # type: ignore
source_tokens.append((token, value))
if token == tokenize.INDENT:
indent_level += 1
elif token == tokenize.DEDENT:
indent_level -= 1
if has_base_level and indent_level <= base_indent_level:
return (
tokenize.untokenize(source_tokens),
frame_info[0].f_globals,
frame_info[0].f_locals,
)
elif not has_base_level:
has_base_level = True
base_indent_level = indent_level
raise TypeError(
'Unable to retrieve source for class "{}"'.format(class_name)
)
def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
"""Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
"""
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
)
def _create_typed_object_meta(get_fset):
# type: (Callable[[str, str, Type[_T]], Callable[[_T], None]]) -> type
"""Create a metaclass for typed objects.
Args:
get_fset: A function that takes three parameters: the name of an
attribute, the name of the private attribute that holds the
property data, and a type. This function must an object method that
accepts a value.
Returns:
A metaclass that reads annotations from a class definition and creates
properties for annotated, public, non-constant, non-method attributes
that will guarantee the type of the stored value matches the
annotation.
"""
def _get_fget(attr, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[], Any]
"""Create a property getter method for an attribute.
Args:
attr: The name of the attribute that will be retrieved.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A function that takes self and retrieves the private attribute from
self.
"""
def _fget(self):
# type: (...) -> Any
"""Get attribute from self without revealing the private name."""
try:
return getattr(self, private_attr)
except AttributeError:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
_get_type_name(type_), attr
)
)
return _fget
class _AnnotatedObjectMeta(type):
"""A metaclass that reads annotations from a class definition."""
def __new__(
mcs, # type: Type[_AnnotatedObjectMeta]
name, # type: str
bases, # type: List[type]
attrs, # type: Dict[str, Any]
**kwargs # type: Dict[str, Any]
):
# type: (...) -> type
"""Create class objs that replaces annotated attrs with properties.
Args:
mcs: The class object being created.
name: The name of the class to create.
bases: The list of all base classes for the new class.
attrs: The list of all attributes for the new class from the
definition.
Returns:
A new class instance with the expected base classes and
attributes, but with annotated, public, non-constant,
non-method attributes replaced by property objects that
validate against the annotated type.
"""
annotations = attrs.get("__annotations__", {})
use_comment_type_hints = (
not annotations and attrs.get("__module__") != __name__
)
if use_comment_type_hints:
frame_source = _get_class_frame_source(name)
annotations = get_type_hints(*frame_source)
names = list(attrs) + list(annotations)
typed_attrs = {}
for attr in names:
typed_attrs[attr] = attrs.get(attr)
if _is_propertyable(names, attrs, annotations, attr):
private_attr = "__{}".format(attr)
if attr in attrs:
typed_attrs[private_attr] = attrs[attr]
type_ = (
Optional[annotations[attr]]
if not use_comment_type_hints
and attr in attrs
and attrs[attr] is None
else annotations[attr]
)
typed_attrs[attr] = property(
_get_fget(attr, private_attr, type_),
get_fset(attr, private_attr, type_),
)
properties = [
attr
for attr in annotations
if _is_propertyable(names, attrs, annotations, attr)
]
typed_attrs["_tp__typed_properties"] = properties
typed_attrs["_tp__required_typed_properties"] = [
attr
for attr in properties
if (
attr not in attrs
or attrs[attr] is None
and use_comment_type_hints
)
and NoneType not in getattr(annotations[attr], "__args__", ())
]
return super(_AnnotatedObjectMeta, mcs).__new__( # type: ignore
mcs, name, bases, typed_attrs, **kwargs
)
return _AnnotatedObjectMeta
def _strict_object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute iff the value is an instance of type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self iff the value is an instance of type_.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
if not is_instance(value, rtype):
raise TypeError(
"Cannot assign type of {} to attribute of type {}.".format(
_get_type_name(type(value)), _get_type_name(rtype)
)
)
vars(self)[private_attr] = value
return _fset
_StrictObjectMeta = _create_typed_object_meta(_strict_object_meta_fset)
def _object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute if the value is not an instance of type_
and cannot be cast into type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self and coerce it to type_ if necessary.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_
and cannot be cast into a compatible object of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
vars(self)[private_attr] = cast(rtype, value)
return _fset
_ObjectMeta = _create_typed_object_meta(_object_meta_fset)
class _BaseAnnotatedObject(object):
"""A base class that looks for class attributes to create __init__."""
def __init__(self, *args, **kwargs):
"""Set all attributes according to their annotation status."""
super(_BaseAnnotatedObject, self).__init__()
properties = self._tp__typed_properties
required = self._tp__required_typed_properties
positionals = zip(properties, args)
for attr, value in positionals:
if attr in kwargs:
raise TypeError(
"__init__() got multiple values for argument '{}'".format(
attr
)
)
kwargs[attr] = value
missing = [attr for attr in required if attr not in kwargs]
if missing:
num_missing = len(missing)
if num_missing > 1:
args = ", ".join("'{}'".format(m) for m in missing[:-1])
if num_missing > 2:
args += ","
args += " and '{}'".format(missing[-1])
else:
args = "'{}'".format(missing[0])
raise TypeError(
"__init__() missing {} required argument{}: {}".format(
num_missing, "s" if num_missing > 1 else "", args
)
)
for attr, value in six.iteritems(kwargs):
if attr in properties:
setattr(self, attr, value)
def __repr__(self):
# type: () -> str
"""Return a Python readable representation of the class."""
return "{}({})".format(
self.__class__.__name__,
", ".join(
"{}={}".format(attr_name, repr(getattr(self, attr_name)))
for attr_name in self._tp__typed_properties
),
) # type: ignore
class _AnnotatedObjectComparisonMixin(object):
"""A mixin to add comparisons to classes made by _AnnotatedObjectMeta."""
def _tp__get_typed_properties(self):
"""Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
"""
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError
def __eq__(self, other):
"""Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
def __ne__(self, other):
"""Test if two objects of the same class are not equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for non-equality.
Returns:
True if the objects are not equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self == other
def __lt__(self, other):
"""Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
def __le__(self, other):
"""Test if self is less than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than or equal other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return self == other or self < other
def __gt__(self, other):
"""Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
def __ge__(self, other):
"""Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
def __hash__(self):
"""Generate a hash for the object based on the annotated attrs."""
return hash(self._tp__get_typed_properties())
@six.add_metaclass(_StrictObjectMeta) # type: ignore
class BaseStrictObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import BaseStrictObject
>>> class Point(BaseStrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
"""
class StrictObject(BaseStrictObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import StrictObject
>>> class Point(StrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
@six.add_metaclass(_ObjectMeta) # type: ignore
class BaseObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import BaseObject
>>> class Point(BaseObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
"""
class Object(BaseObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import Object
>>> class Point(Object):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
|
contains-io/typet | typet/objects.py | _get_class_frame_source | python | def _get_class_frame_source(class_name):
# type: (str) -> Optional[str]
for frame_info in inspect.stack():
try:
with open(frame_info[1]) as fp:
src = "".join(fp.readlines()[frame_info[2] - 1 :])
except IOError:
continue
if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src):
reader = six.StringIO(src).readline
tokens = tokenize.generate_tokens(reader)
source_tokens = []
indent_level = 0
base_indent_level = 0
has_base_level = False
for token, value, _, _, _ in tokens: # type: ignore
source_tokens.append((token, value))
if token == tokenize.INDENT:
indent_level += 1
elif token == tokenize.DEDENT:
indent_level -= 1
if has_base_level and indent_level <= base_indent_level:
return (
tokenize.untokenize(source_tokens),
frame_info[0].f_globals,
frame_info[0].f_locals,
)
elif not has_base_level:
has_base_level = True
base_indent_level = indent_level
raise TypeError(
'Unable to retrieve source for class "{}"'.format(class_name)
) | Return the source code for a class by checking the frame stack.
This is necessary because it is not possible to get the source of a class
being created by a metaclass directly.
Args:
class_name: The class to look for on the stack.
Returns:
The source code for the requested class if the class was found and the
source was accessible. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L63-L107 | null | # -*- coding: utf-8 -*-
"""A module containing base objects for creating typed classes.
Classes:
BaseStrictObject: An object that asserts all annotated attributes are of
the correct type.
StrictObject: A derivative of BaseStrictObject that implements the default
comparison operators and hash.
BaseObject: An object that coerces all annotated attributes to the
correct type.
Object: A derivative of BaseObject that implements the default
comparison operators and hash.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import re
import tokenize
import types
import six
from typingplus import ( # noqa: F401 pylint: disable=unused-import
cast,
get_type_hints,
is_instance,
Any,
Callable,
Dict,
List,
Optional,
Type,
TypeVar,
)
from .types import NoneType # pylint: disable=redefined-builtin
__all__ = ("BaseStrictObject", "StrictObject", "BaseObject", "Object")
_T = TypeVar("_T")
def _get_type_name(type_):
# type: (type) -> str
"""Return a displayable name for the type.
Args:
type_: A class object.
Returns:
A string value describing the class name that can be used in a natural
language sentence.
"""
name = repr(type_)
if name.startswith("<"):
name = getattr(type_, "__qualname__", getattr(type_, "__name__", ""))
return name.rsplit(".", 1)[-1] or repr(type_)
def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
"""Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
"""
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
)
def _create_typed_object_meta(get_fset):
# type: (Callable[[str, str, Type[_T]], Callable[[_T], None]]) -> type
"""Create a metaclass for typed objects.
Args:
get_fset: A function that takes three parameters: the name of an
attribute, the name of the private attribute that holds the
property data, and a type. This function must an object method that
accepts a value.
Returns:
A metaclass that reads annotations from a class definition and creates
properties for annotated, public, non-constant, non-method attributes
that will guarantee the type of the stored value matches the
annotation.
"""
def _get_fget(attr, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[], Any]
"""Create a property getter method for an attribute.
Args:
attr: The name of the attribute that will be retrieved.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A function that takes self and retrieves the private attribute from
self.
"""
def _fget(self):
# type: (...) -> Any
"""Get attribute from self without revealing the private name."""
try:
return getattr(self, private_attr)
except AttributeError:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
_get_type_name(type_), attr
)
)
return _fget
class _AnnotatedObjectMeta(type):
"""A metaclass that reads annotations from a class definition."""
def __new__(
mcs, # type: Type[_AnnotatedObjectMeta]
name, # type: str
bases, # type: List[type]
attrs, # type: Dict[str, Any]
**kwargs # type: Dict[str, Any]
):
# type: (...) -> type
"""Create class objs that replaces annotated attrs with properties.
Args:
mcs: The class object being created.
name: The name of the class to create.
bases: The list of all base classes for the new class.
attrs: The list of all attributes for the new class from the
definition.
Returns:
A new class instance with the expected base classes and
attributes, but with annotated, public, non-constant,
non-method attributes replaced by property objects that
validate against the annotated type.
"""
annotations = attrs.get("__annotations__", {})
use_comment_type_hints = (
not annotations and attrs.get("__module__") != __name__
)
if use_comment_type_hints:
frame_source = _get_class_frame_source(name)
annotations = get_type_hints(*frame_source)
names = list(attrs) + list(annotations)
typed_attrs = {}
for attr in names:
typed_attrs[attr] = attrs.get(attr)
if _is_propertyable(names, attrs, annotations, attr):
private_attr = "__{}".format(attr)
if attr in attrs:
typed_attrs[private_attr] = attrs[attr]
type_ = (
Optional[annotations[attr]]
if not use_comment_type_hints
and attr in attrs
and attrs[attr] is None
else annotations[attr]
)
typed_attrs[attr] = property(
_get_fget(attr, private_attr, type_),
get_fset(attr, private_attr, type_),
)
properties = [
attr
for attr in annotations
if _is_propertyable(names, attrs, annotations, attr)
]
typed_attrs["_tp__typed_properties"] = properties
typed_attrs["_tp__required_typed_properties"] = [
attr
for attr in properties
if (
attr not in attrs
or attrs[attr] is None
and use_comment_type_hints
)
and NoneType not in getattr(annotations[attr], "__args__", ())
]
return super(_AnnotatedObjectMeta, mcs).__new__( # type: ignore
mcs, name, bases, typed_attrs, **kwargs
)
return _AnnotatedObjectMeta
def _strict_object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute iff the value is an instance of type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self iff the value is an instance of type_.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
if not is_instance(value, rtype):
raise TypeError(
"Cannot assign type of {} to attribute of type {}.".format(
_get_type_name(type(value)), _get_type_name(rtype)
)
)
vars(self)[private_attr] = value
return _fset
_StrictObjectMeta = _create_typed_object_meta(_strict_object_meta_fset)
def _object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute if the value is not an instance of type_
and cannot be cast into type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self and coerce it to type_ if necessary.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_
and cannot be cast into a compatible object of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
vars(self)[private_attr] = cast(rtype, value)
return _fset
_ObjectMeta = _create_typed_object_meta(_object_meta_fset)
class _BaseAnnotatedObject(object):
"""A base class that looks for class attributes to create __init__."""
def __init__(self, *args, **kwargs):
"""Set all attributes according to their annotation status."""
super(_BaseAnnotatedObject, self).__init__()
properties = self._tp__typed_properties
required = self._tp__required_typed_properties
positionals = zip(properties, args)
for attr, value in positionals:
if attr in kwargs:
raise TypeError(
"__init__() got multiple values for argument '{}'".format(
attr
)
)
kwargs[attr] = value
missing = [attr for attr in required if attr not in kwargs]
if missing:
num_missing = len(missing)
if num_missing > 1:
args = ", ".join("'{}'".format(m) for m in missing[:-1])
if num_missing > 2:
args += ","
args += " and '{}'".format(missing[-1])
else:
args = "'{}'".format(missing[0])
raise TypeError(
"__init__() missing {} required argument{}: {}".format(
num_missing, "s" if num_missing > 1 else "", args
)
)
for attr, value in six.iteritems(kwargs):
if attr in properties:
setattr(self, attr, value)
def __repr__(self):
# type: () -> str
"""Return a Python readable representation of the class."""
return "{}({})".format(
self.__class__.__name__,
", ".join(
"{}={}".format(attr_name, repr(getattr(self, attr_name)))
for attr_name in self._tp__typed_properties
),
) # type: ignore
class _AnnotatedObjectComparisonMixin(object):
"""A mixin to add comparisons to classes made by _AnnotatedObjectMeta."""
def _tp__get_typed_properties(self):
"""Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
"""
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError
def __eq__(self, other):
"""Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
def __ne__(self, other):
"""Test if two objects of the same class are not equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for non-equality.
Returns:
True if the objects are not equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self == other
def __lt__(self, other):
"""Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
def __le__(self, other):
"""Test if self is less than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than or equal other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return self == other or self < other
def __gt__(self, other):
"""Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
def __ge__(self, other):
"""Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
def __hash__(self):
"""Generate a hash for the object based on the annotated attrs."""
return hash(self._tp__get_typed_properties())
@six.add_metaclass(_StrictObjectMeta) # type: ignore
class BaseStrictObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import BaseStrictObject
>>> class Point(BaseStrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
"""
class StrictObject(BaseStrictObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import StrictObject
>>> class Point(StrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
@six.add_metaclass(_ObjectMeta) # type: ignore
class BaseObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import BaseObject
>>> class Point(BaseObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
"""
class Object(BaseObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import Object
>>> class Point(Object):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
|
contains-io/typet | typet/objects.py | _is_propertyable | python | def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
) | Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L110-L134 | null | # -*- coding: utf-8 -*-
"""A module containing base objects for creating typed classes.
Classes:
BaseStrictObject: An object that asserts all annotated attributes are of
the correct type.
StrictObject: A derivative of BaseStrictObject that implements the default
comparison operators and hash.
BaseObject: An object that coerces all annotated attributes to the
correct type.
Object: A derivative of BaseObject that implements the default
comparison operators and hash.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import re
import tokenize
import types
import six
from typingplus import ( # noqa: F401 pylint: disable=unused-import
cast,
get_type_hints,
is_instance,
Any,
Callable,
Dict,
List,
Optional,
Type,
TypeVar,
)
from .types import NoneType # pylint: disable=redefined-builtin
__all__ = ("BaseStrictObject", "StrictObject", "BaseObject", "Object")
_T = TypeVar("_T")
def _get_type_name(type_):
# type: (type) -> str
"""Return a displayable name for the type.
Args:
type_: A class object.
Returns:
A string value describing the class name that can be used in a natural
language sentence.
"""
name = repr(type_)
if name.startswith("<"):
name = getattr(type_, "__qualname__", getattr(type_, "__name__", ""))
return name.rsplit(".", 1)[-1] or repr(type_)
def _get_class_frame_source(class_name):
# type: (str) -> Optional[str]
"""Return the source code for a class by checking the frame stack.
This is necessary because it is not possible to get the source of a class
being created by a metaclass directly.
Args:
class_name: The class to look for on the stack.
Returns:
The source code for the requested class if the class was found and the
source was accessible.
"""
for frame_info in inspect.stack():
try:
with open(frame_info[1]) as fp:
src = "".join(fp.readlines()[frame_info[2] - 1 :])
except IOError:
continue
if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src):
reader = six.StringIO(src).readline
tokens = tokenize.generate_tokens(reader)
source_tokens = []
indent_level = 0
base_indent_level = 0
has_base_level = False
for token, value, _, _, _ in tokens: # type: ignore
source_tokens.append((token, value))
if token == tokenize.INDENT:
indent_level += 1
elif token == tokenize.DEDENT:
indent_level -= 1
if has_base_level and indent_level <= base_indent_level:
return (
tokenize.untokenize(source_tokens),
frame_info[0].f_globals,
frame_info[0].f_locals,
)
elif not has_base_level:
has_base_level = True
base_indent_level = indent_level
raise TypeError(
'Unable to retrieve source for class "{}"'.format(class_name)
)
def _create_typed_object_meta(get_fset):
# type: (Callable[[str, str, Type[_T]], Callable[[_T], None]]) -> type
"""Create a metaclass for typed objects.
Args:
get_fset: A function that takes three parameters: the name of an
attribute, the name of the private attribute that holds the
property data, and a type. This function must an object method that
accepts a value.
Returns:
A metaclass that reads annotations from a class definition and creates
properties for annotated, public, non-constant, non-method attributes
that will guarantee the type of the stored value matches the
annotation.
"""
def _get_fget(attr, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[], Any]
"""Create a property getter method for an attribute.
Args:
attr: The name of the attribute that will be retrieved.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A function that takes self and retrieves the private attribute from
self.
"""
def _fget(self):
# type: (...) -> Any
"""Get attribute from self without revealing the private name."""
try:
return getattr(self, private_attr)
except AttributeError:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
_get_type_name(type_), attr
)
)
return _fget
class _AnnotatedObjectMeta(type):
"""A metaclass that reads annotations from a class definition."""
def __new__(
mcs, # type: Type[_AnnotatedObjectMeta]
name, # type: str
bases, # type: List[type]
attrs, # type: Dict[str, Any]
**kwargs # type: Dict[str, Any]
):
# type: (...) -> type
"""Create class objs that replaces annotated attrs with properties.
Args:
mcs: The class object being created.
name: The name of the class to create.
bases: The list of all base classes for the new class.
attrs: The list of all attributes for the new class from the
definition.
Returns:
A new class instance with the expected base classes and
attributes, but with annotated, public, non-constant,
non-method attributes replaced by property objects that
validate against the annotated type.
"""
annotations = attrs.get("__annotations__", {})
use_comment_type_hints = (
not annotations and attrs.get("__module__") != __name__
)
if use_comment_type_hints:
frame_source = _get_class_frame_source(name)
annotations = get_type_hints(*frame_source)
names = list(attrs) + list(annotations)
typed_attrs = {}
for attr in names:
typed_attrs[attr] = attrs.get(attr)
if _is_propertyable(names, attrs, annotations, attr):
private_attr = "__{}".format(attr)
if attr in attrs:
typed_attrs[private_attr] = attrs[attr]
type_ = (
Optional[annotations[attr]]
if not use_comment_type_hints
and attr in attrs
and attrs[attr] is None
else annotations[attr]
)
typed_attrs[attr] = property(
_get_fget(attr, private_attr, type_),
get_fset(attr, private_attr, type_),
)
properties = [
attr
for attr in annotations
if _is_propertyable(names, attrs, annotations, attr)
]
typed_attrs["_tp__typed_properties"] = properties
typed_attrs["_tp__required_typed_properties"] = [
attr
for attr in properties
if (
attr not in attrs
or attrs[attr] is None
and use_comment_type_hints
)
and NoneType not in getattr(annotations[attr], "__args__", ())
]
return super(_AnnotatedObjectMeta, mcs).__new__( # type: ignore
mcs, name, bases, typed_attrs, **kwargs
)
return _AnnotatedObjectMeta
def _strict_object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute iff the value is an instance of type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self iff the value is an instance of type_.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
if not is_instance(value, rtype):
raise TypeError(
"Cannot assign type of {} to attribute of type {}.".format(
_get_type_name(type(value)), _get_type_name(rtype)
)
)
vars(self)[private_attr] = value
return _fset
_StrictObjectMeta = _create_typed_object_meta(_strict_object_meta_fset)
def _object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute if the value is not an instance of type_
and cannot be cast into type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self and coerce it to type_ if necessary.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_
and cannot be cast into a compatible object of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
vars(self)[private_attr] = cast(rtype, value)
return _fset
_ObjectMeta = _create_typed_object_meta(_object_meta_fset)
class _BaseAnnotatedObject(object):
"""A base class that looks for class attributes to create __init__."""
def __init__(self, *args, **kwargs):
"""Set all attributes according to their annotation status."""
super(_BaseAnnotatedObject, self).__init__()
properties = self._tp__typed_properties
required = self._tp__required_typed_properties
positionals = zip(properties, args)
for attr, value in positionals:
if attr in kwargs:
raise TypeError(
"__init__() got multiple values for argument '{}'".format(
attr
)
)
kwargs[attr] = value
missing = [attr for attr in required if attr not in kwargs]
if missing:
num_missing = len(missing)
if num_missing > 1:
args = ", ".join("'{}'".format(m) for m in missing[:-1])
if num_missing > 2:
args += ","
args += " and '{}'".format(missing[-1])
else:
args = "'{}'".format(missing[0])
raise TypeError(
"__init__() missing {} required argument{}: {}".format(
num_missing, "s" if num_missing > 1 else "", args
)
)
for attr, value in six.iteritems(kwargs):
if attr in properties:
setattr(self, attr, value)
def __repr__(self):
# type: () -> str
"""Return a Python readable representation of the class."""
return "{}({})".format(
self.__class__.__name__,
", ".join(
"{}={}".format(attr_name, repr(getattr(self, attr_name)))
for attr_name in self._tp__typed_properties
),
) # type: ignore
class _AnnotatedObjectComparisonMixin(object):
"""A mixin to add comparisons to classes made by _AnnotatedObjectMeta."""
def _tp__get_typed_properties(self):
"""Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
"""
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError
def __eq__(self, other):
"""Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
def __ne__(self, other):
"""Test if two objects of the same class are not equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for non-equality.
Returns:
True if the objects are not equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self == other
def __lt__(self, other):
"""Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
def __le__(self, other):
"""Test if self is less than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than or equal other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return self == other or self < other
def __gt__(self, other):
"""Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
def __ge__(self, other):
"""Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
def __hash__(self):
"""Generate a hash for the object based on the annotated attrs."""
return hash(self._tp__get_typed_properties())
@six.add_metaclass(_StrictObjectMeta) # type: ignore
class BaseStrictObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import BaseStrictObject
>>> class Point(BaseStrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
"""
class StrictObject(BaseStrictObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import StrictObject
>>> class Point(StrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
@six.add_metaclass(_ObjectMeta) # type: ignore
class BaseObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import BaseObject
>>> class Point(BaseObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
"""
class Object(BaseObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import Object
>>> class Point(Object):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
|
contains-io/typet | typet/objects.py | _create_typed_object_meta | python | def _create_typed_object_meta(get_fset):
# type: (Callable[[str, str, Type[_T]], Callable[[_T], None]]) -> type
def _get_fget(attr, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[], Any]
"""Create a property getter method for an attribute.
Args:
attr: The name of the attribute that will be retrieved.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A function that takes self and retrieves the private attribute from
self.
"""
def _fget(self):
# type: (...) -> Any
"""Get attribute from self without revealing the private name."""
try:
return getattr(self, private_attr)
except AttributeError:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
_get_type_name(type_), attr
)
)
return _fget
class _AnnotatedObjectMeta(type):
"""A metaclass that reads annotations from a class definition."""
def __new__(
mcs, # type: Type[_AnnotatedObjectMeta]
name, # type: str
bases, # type: List[type]
attrs, # type: Dict[str, Any]
**kwargs # type: Dict[str, Any]
):
# type: (...) -> type
"""Create class objs that replaces annotated attrs with properties.
Args:
mcs: The class object being created.
name: The name of the class to create.
bases: The list of all base classes for the new class.
attrs: The list of all attributes for the new class from the
definition.
Returns:
A new class instance with the expected base classes and
attributes, but with annotated, public, non-constant,
non-method attributes replaced by property objects that
validate against the annotated type.
"""
annotations = attrs.get("__annotations__", {})
use_comment_type_hints = (
not annotations and attrs.get("__module__") != __name__
)
if use_comment_type_hints:
frame_source = _get_class_frame_source(name)
annotations = get_type_hints(*frame_source)
names = list(attrs) + list(annotations)
typed_attrs = {}
for attr in names:
typed_attrs[attr] = attrs.get(attr)
if _is_propertyable(names, attrs, annotations, attr):
private_attr = "__{}".format(attr)
if attr in attrs:
typed_attrs[private_attr] = attrs[attr]
type_ = (
Optional[annotations[attr]]
if not use_comment_type_hints
and attr in attrs
and attrs[attr] is None
else annotations[attr]
)
typed_attrs[attr] = property(
_get_fget(attr, private_attr, type_),
get_fset(attr, private_attr, type_),
)
properties = [
attr
for attr in annotations
if _is_propertyable(names, attrs, annotations, attr)
]
typed_attrs["_tp__typed_properties"] = properties
typed_attrs["_tp__required_typed_properties"] = [
attr
for attr in properties
if (
attr not in attrs
or attrs[attr] is None
and use_comment_type_hints
)
and NoneType not in getattr(annotations[attr], "__args__", ())
]
return super(_AnnotatedObjectMeta, mcs).__new__( # type: ignore
mcs, name, bases, typed_attrs, **kwargs
)
return _AnnotatedObjectMeta | Create a metaclass for typed objects.
Args:
get_fset: A function that takes three parameters: the name of an
attribute, the name of the private attribute that holds the
property data, and a type. This function must an object method that
accepts a value.
Returns:
A metaclass that reads annotations from a class definition and creates
properties for annotated, public, non-constant, non-method attributes
that will guarantee the type of the stored value matches the
annotation. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L137-L256 | null | # -*- coding: utf-8 -*-
"""A module containing base objects for creating typed classes.
Classes:
BaseStrictObject: An object that asserts all annotated attributes are of
the correct type.
StrictObject: A derivative of BaseStrictObject that implements the default
comparison operators and hash.
BaseObject: An object that coerces all annotated attributes to the
correct type.
Object: A derivative of BaseObject that implements the default
comparison operators and hash.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import re
import tokenize
import types
import six
from typingplus import ( # noqa: F401 pylint: disable=unused-import
cast,
get_type_hints,
is_instance,
Any,
Callable,
Dict,
List,
Optional,
Type,
TypeVar,
)
from .types import NoneType # pylint: disable=redefined-builtin
__all__ = ("BaseStrictObject", "StrictObject", "BaseObject", "Object")
_T = TypeVar("_T")
def _get_type_name(type_):
# type: (type) -> str
"""Return a displayable name for the type.
Args:
type_: A class object.
Returns:
A string value describing the class name that can be used in a natural
language sentence.
"""
name = repr(type_)
if name.startswith("<"):
name = getattr(type_, "__qualname__", getattr(type_, "__name__", ""))
return name.rsplit(".", 1)[-1] or repr(type_)
def _get_class_frame_source(class_name):
# type: (str) -> Optional[str]
"""Return the source code for a class by checking the frame stack.
This is necessary because it is not possible to get the source of a class
being created by a metaclass directly.
Args:
class_name: The class to look for on the stack.
Returns:
The source code for the requested class if the class was found and the
source was accessible.
"""
for frame_info in inspect.stack():
try:
with open(frame_info[1]) as fp:
src = "".join(fp.readlines()[frame_info[2] - 1 :])
except IOError:
continue
if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src):
reader = six.StringIO(src).readline
tokens = tokenize.generate_tokens(reader)
source_tokens = []
indent_level = 0
base_indent_level = 0
has_base_level = False
for token, value, _, _, _ in tokens: # type: ignore
source_tokens.append((token, value))
if token == tokenize.INDENT:
indent_level += 1
elif token == tokenize.DEDENT:
indent_level -= 1
if has_base_level and indent_level <= base_indent_level:
return (
tokenize.untokenize(source_tokens),
frame_info[0].f_globals,
frame_info[0].f_locals,
)
elif not has_base_level:
has_base_level = True
base_indent_level = indent_level
raise TypeError(
'Unable to retrieve source for class "{}"'.format(class_name)
)
def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
"""Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
"""
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
)
def _strict_object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute iff the value is an instance of type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self iff the value is an instance of type_.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
if not is_instance(value, rtype):
raise TypeError(
"Cannot assign type of {} to attribute of type {}.".format(
_get_type_name(type(value)), _get_type_name(rtype)
)
)
vars(self)[private_attr] = value
return _fset
_StrictObjectMeta = _create_typed_object_meta(_strict_object_meta_fset)
def _object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute if the value is not an instance of type_
and cannot be cast into type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self and coerce it to type_ if necessary.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_
and cannot be cast into a compatible object of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
vars(self)[private_attr] = cast(rtype, value)
return _fset
_ObjectMeta = _create_typed_object_meta(_object_meta_fset)
class _BaseAnnotatedObject(object):
"""A base class that looks for class attributes to create __init__."""
def __init__(self, *args, **kwargs):
"""Set all attributes according to their annotation status."""
super(_BaseAnnotatedObject, self).__init__()
properties = self._tp__typed_properties
required = self._tp__required_typed_properties
positionals = zip(properties, args)
for attr, value in positionals:
if attr in kwargs:
raise TypeError(
"__init__() got multiple values for argument '{}'".format(
attr
)
)
kwargs[attr] = value
missing = [attr for attr in required if attr not in kwargs]
if missing:
num_missing = len(missing)
if num_missing > 1:
args = ", ".join("'{}'".format(m) for m in missing[:-1])
if num_missing > 2:
args += ","
args += " and '{}'".format(missing[-1])
else:
args = "'{}'".format(missing[0])
raise TypeError(
"__init__() missing {} required argument{}: {}".format(
num_missing, "s" if num_missing > 1 else "", args
)
)
for attr, value in six.iteritems(kwargs):
if attr in properties:
setattr(self, attr, value)
def __repr__(self):
# type: () -> str
"""Return a Python readable representation of the class."""
return "{}({})".format(
self.__class__.__name__,
", ".join(
"{}={}".format(attr_name, repr(getattr(self, attr_name)))
for attr_name in self._tp__typed_properties
),
) # type: ignore
class _AnnotatedObjectComparisonMixin(object):
"""A mixin to add comparisons to classes made by _AnnotatedObjectMeta."""
def _tp__get_typed_properties(self):
"""Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
"""
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError
def __eq__(self, other):
"""Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
def __ne__(self, other):
"""Test if two objects of the same class are not equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for non-equality.
Returns:
True if the objects are not equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self == other
def __lt__(self, other):
"""Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
def __le__(self, other):
"""Test if self is less than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than or equal other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return self == other or self < other
def __gt__(self, other):
"""Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
def __ge__(self, other):
"""Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
def __hash__(self):
"""Generate a hash for the object based on the annotated attrs."""
return hash(self._tp__get_typed_properties())
@six.add_metaclass(_StrictObjectMeta) # type: ignore
class BaseStrictObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import BaseStrictObject
>>> class Point(BaseStrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
"""
class StrictObject(BaseStrictObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import StrictObject
>>> class Point(StrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
@six.add_metaclass(_ObjectMeta) # type: ignore
class BaseObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import BaseObject
>>> class Point(BaseObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
"""
class Object(BaseObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import Object
>>> class Point(Object):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
|
contains-io/typet | typet/objects.py | _strict_object_meta_fset | python | def _strict_object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self iff the value is an instance of type_.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
if not is_instance(value, rtype):
raise TypeError(
"Cannot assign type of {} to attribute of type {}.".format(
_get_type_name(type(value)), _get_type_name(rtype)
)
)
vars(self)[private_attr] = value
return _fset | Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute iff the value is an instance of type_. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L259-L299 | null | # -*- coding: utf-8 -*-
"""A module containing base objects for creating typed classes.
Classes:
BaseStrictObject: An object that asserts all annotated attributes are of
the correct type.
StrictObject: A derivative of BaseStrictObject that implements the default
comparison operators and hash.
BaseObject: An object that coerces all annotated attributes to the
correct type.
Object: A derivative of BaseObject that implements the default
comparison operators and hash.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import re
import tokenize
import types
import six
from typingplus import ( # noqa: F401 pylint: disable=unused-import
cast,
get_type_hints,
is_instance,
Any,
Callable,
Dict,
List,
Optional,
Type,
TypeVar,
)
from .types import NoneType # pylint: disable=redefined-builtin
__all__ = ("BaseStrictObject", "StrictObject", "BaseObject", "Object")
_T = TypeVar("_T")
def _get_type_name(type_):
# type: (type) -> str
"""Return a displayable name for the type.
Args:
type_: A class object.
Returns:
A string value describing the class name that can be used in a natural
language sentence.
"""
name = repr(type_)
if name.startswith("<"):
name = getattr(type_, "__qualname__", getattr(type_, "__name__", ""))
return name.rsplit(".", 1)[-1] or repr(type_)
def _get_class_frame_source(class_name):
# type: (str) -> Optional[str]
"""Return the source code for a class by checking the frame stack.
This is necessary because it is not possible to get the source of a class
being created by a metaclass directly.
Args:
class_name: The class to look for on the stack.
Returns:
The source code for the requested class if the class was found and the
source was accessible.
"""
for frame_info in inspect.stack():
try:
with open(frame_info[1]) as fp:
src = "".join(fp.readlines()[frame_info[2] - 1 :])
except IOError:
continue
if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src):
reader = six.StringIO(src).readline
tokens = tokenize.generate_tokens(reader)
source_tokens = []
indent_level = 0
base_indent_level = 0
has_base_level = False
for token, value, _, _, _ in tokens: # type: ignore
source_tokens.append((token, value))
if token == tokenize.INDENT:
indent_level += 1
elif token == tokenize.DEDENT:
indent_level -= 1
if has_base_level and indent_level <= base_indent_level:
return (
tokenize.untokenize(source_tokens),
frame_info[0].f_globals,
frame_info[0].f_locals,
)
elif not has_base_level:
has_base_level = True
base_indent_level = indent_level
raise TypeError(
'Unable to retrieve source for class "{}"'.format(class_name)
)
def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
"""Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
"""
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
)
def _create_typed_object_meta(get_fset):
# type: (Callable[[str, str, Type[_T]], Callable[[_T], None]]) -> type
"""Create a metaclass for typed objects.
Args:
get_fset: A function that takes three parameters: the name of an
attribute, the name of the private attribute that holds the
property data, and a type. This function must an object method that
accepts a value.
Returns:
A metaclass that reads annotations from a class definition and creates
properties for annotated, public, non-constant, non-method attributes
that will guarantee the type of the stored value matches the
annotation.
"""
def _get_fget(attr, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[], Any]
"""Create a property getter method for an attribute.
Args:
attr: The name of the attribute that will be retrieved.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A function that takes self and retrieves the private attribute from
self.
"""
def _fget(self):
# type: (...) -> Any
"""Get attribute from self without revealing the private name."""
try:
return getattr(self, private_attr)
except AttributeError:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
_get_type_name(type_), attr
)
)
return _fget
class _AnnotatedObjectMeta(type):
"""A metaclass that reads annotations from a class definition."""
def __new__(
mcs, # type: Type[_AnnotatedObjectMeta]
name, # type: str
bases, # type: List[type]
attrs, # type: Dict[str, Any]
**kwargs # type: Dict[str, Any]
):
# type: (...) -> type
"""Create class objs that replaces annotated attrs with properties.
Args:
mcs: The class object being created.
name: The name of the class to create.
bases: The list of all base classes for the new class.
attrs: The list of all attributes for the new class from the
definition.
Returns:
A new class instance with the expected base classes and
attributes, but with annotated, public, non-constant,
non-method attributes replaced by property objects that
validate against the annotated type.
"""
annotations = attrs.get("__annotations__", {})
use_comment_type_hints = (
not annotations and attrs.get("__module__") != __name__
)
if use_comment_type_hints:
frame_source = _get_class_frame_source(name)
annotations = get_type_hints(*frame_source)
names = list(attrs) + list(annotations)
typed_attrs = {}
for attr in names:
typed_attrs[attr] = attrs.get(attr)
if _is_propertyable(names, attrs, annotations, attr):
private_attr = "__{}".format(attr)
if attr in attrs:
typed_attrs[private_attr] = attrs[attr]
type_ = (
Optional[annotations[attr]]
if not use_comment_type_hints
and attr in attrs
and attrs[attr] is None
else annotations[attr]
)
typed_attrs[attr] = property(
_get_fget(attr, private_attr, type_),
get_fset(attr, private_attr, type_),
)
properties = [
attr
for attr in annotations
if _is_propertyable(names, attrs, annotations, attr)
]
typed_attrs["_tp__typed_properties"] = properties
typed_attrs["_tp__required_typed_properties"] = [
attr
for attr in properties
if (
attr not in attrs
or attrs[attr] is None
and use_comment_type_hints
)
and NoneType not in getattr(annotations[attr], "__args__", ())
]
return super(_AnnotatedObjectMeta, mcs).__new__( # type: ignore
mcs, name, bases, typed_attrs, **kwargs
)
return _AnnotatedObjectMeta
_StrictObjectMeta = _create_typed_object_meta(_strict_object_meta_fset)
def _object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute if the value is not an instance of type_
and cannot be cast into type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self and coerce it to type_ if necessary.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_
and cannot be cast into a compatible object of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
vars(self)[private_attr] = cast(rtype, value)
return _fset
_ObjectMeta = _create_typed_object_meta(_object_meta_fset)
class _BaseAnnotatedObject(object):
"""A base class that looks for class attributes to create __init__."""
def __init__(self, *args, **kwargs):
"""Set all attributes according to their annotation status."""
super(_BaseAnnotatedObject, self).__init__()
properties = self._tp__typed_properties
required = self._tp__required_typed_properties
positionals = zip(properties, args)
for attr, value in positionals:
if attr in kwargs:
raise TypeError(
"__init__() got multiple values for argument '{}'".format(
attr
)
)
kwargs[attr] = value
missing = [attr for attr in required if attr not in kwargs]
if missing:
num_missing = len(missing)
if num_missing > 1:
args = ", ".join("'{}'".format(m) for m in missing[:-1])
if num_missing > 2:
args += ","
args += " and '{}'".format(missing[-1])
else:
args = "'{}'".format(missing[0])
raise TypeError(
"__init__() missing {} required argument{}: {}".format(
num_missing, "s" if num_missing > 1 else "", args
)
)
for attr, value in six.iteritems(kwargs):
if attr in properties:
setattr(self, attr, value)
def __repr__(self):
# type: () -> str
"""Return a Python readable representation of the class."""
return "{}({})".format(
self.__class__.__name__,
", ".join(
"{}={}".format(attr_name, repr(getattr(self, attr_name)))
for attr_name in self._tp__typed_properties
),
) # type: ignore
class _AnnotatedObjectComparisonMixin(object):
"""A mixin to add comparisons to classes made by _AnnotatedObjectMeta."""
def _tp__get_typed_properties(self):
"""Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
"""
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError
def __eq__(self, other):
"""Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
def __ne__(self, other):
"""Test if two objects of the same class are not equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for non-equality.
Returns:
True if the objects are not equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self == other
def __lt__(self, other):
"""Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
def __le__(self, other):
"""Test if self is less than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than or equal other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return self == other or self < other
def __gt__(self, other):
"""Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
def __ge__(self, other):
"""Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
def __hash__(self):
"""Generate a hash for the object based on the annotated attrs."""
return hash(self._tp__get_typed_properties())
@six.add_metaclass(_StrictObjectMeta) # type: ignore
class BaseStrictObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import BaseStrictObject
>>> class Point(BaseStrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
"""
class StrictObject(BaseStrictObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import StrictObject
>>> class Point(StrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
@six.add_metaclass(_ObjectMeta) # type: ignore
class BaseObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import BaseObject
>>> class Point(BaseObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
"""
class Object(BaseObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import Object
>>> class Point(Object):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
|
contains-io/typet | typet/objects.py | _object_meta_fset | python | def _object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self and coerce it to type_ if necessary.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_
and cannot be cast into a compatible object of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
vars(self)[private_attr] = cast(rtype, value)
return _fset | Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute if the value is not an instance of type_
and cannot be cast into type_. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L305-L341 | null | # -*- coding: utf-8 -*-
"""A module containing base objects for creating typed classes.
Classes:
BaseStrictObject: An object that asserts all annotated attributes are of
the correct type.
StrictObject: A derivative of BaseStrictObject that implements the default
comparison operators and hash.
BaseObject: An object that coerces all annotated attributes to the
correct type.
Object: A derivative of BaseObject that implements the default
comparison operators and hash.
"""
from __future__ import absolute_import
from __future__ import unicode_literals
import inspect
import re
import tokenize
import types
import six
from typingplus import ( # noqa: F401 pylint: disable=unused-import
cast,
get_type_hints,
is_instance,
Any,
Callable,
Dict,
List,
Optional,
Type,
TypeVar,
)
from .types import NoneType # pylint: disable=redefined-builtin
__all__ = ("BaseStrictObject", "StrictObject", "BaseObject", "Object")
_T = TypeVar("_T")
def _get_type_name(type_):
# type: (type) -> str
"""Return a displayable name for the type.
Args:
type_: A class object.
Returns:
A string value describing the class name that can be used in a natural
language sentence.
"""
name = repr(type_)
if name.startswith("<"):
name = getattr(type_, "__qualname__", getattr(type_, "__name__", ""))
return name.rsplit(".", 1)[-1] or repr(type_)
def _get_class_frame_source(class_name):
# type: (str) -> Optional[str]
"""Return the source code for a class by checking the frame stack.
This is necessary because it is not possible to get the source of a class
being created by a metaclass directly.
Args:
class_name: The class to look for on the stack.
Returns:
The source code for the requested class if the class was found and the
source was accessible.
"""
for frame_info in inspect.stack():
try:
with open(frame_info[1]) as fp:
src = "".join(fp.readlines()[frame_info[2] - 1 :])
except IOError:
continue
if re.search(r"\bclass\b\s+\b{}\b".format(class_name), src):
reader = six.StringIO(src).readline
tokens = tokenize.generate_tokens(reader)
source_tokens = []
indent_level = 0
base_indent_level = 0
has_base_level = False
for token, value, _, _, _ in tokens: # type: ignore
source_tokens.append((token, value))
if token == tokenize.INDENT:
indent_level += 1
elif token == tokenize.DEDENT:
indent_level -= 1
if has_base_level and indent_level <= base_indent_level:
return (
tokenize.untokenize(source_tokens),
frame_info[0].f_globals,
frame_info[0].f_locals,
)
elif not has_base_level:
has_base_level = True
base_indent_level = indent_level
raise TypeError(
'Unable to retrieve source for class "{}"'.format(class_name)
)
def _is_propertyable(
names, # type: List[str]
attrs, # type: Dict[str, Any]
annotations, # type: Dict[str, type]
attr, # Dict[str, Any]
):
# type: (...) -> bool
"""Determine if an attribute can be replaced with a property.
Args:
names: The complete list of all attribute names for the class.
attrs: The attribute dict returned by __prepare__.
annotations: A mapping of all defined annotations for the class.
attr: The attribute to test.
Returns:
True if the attribute can be replaced with a property; else False.
"""
return (
attr in annotations
and not attr.startswith("_")
and not attr.isupper()
and "__{}".format(attr) not in names
and not isinstance(getattr(attrs, attr, None), types.MethodType)
)
def _create_typed_object_meta(get_fset):
# type: (Callable[[str, str, Type[_T]], Callable[[_T], None]]) -> type
"""Create a metaclass for typed objects.
Args:
get_fset: A function that takes three parameters: the name of an
attribute, the name of the private attribute that holds the
property data, and a type. This function must an object method that
accepts a value.
Returns:
A metaclass that reads annotations from a class definition and creates
properties for annotated, public, non-constant, non-method attributes
that will guarantee the type of the stored value matches the
annotation.
"""
def _get_fget(attr, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[], Any]
"""Create a property getter method for an attribute.
Args:
attr: The name of the attribute that will be retrieved.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A function that takes self and retrieves the private attribute from
self.
"""
def _fget(self):
# type: (...) -> Any
"""Get attribute from self without revealing the private name."""
try:
return getattr(self, private_attr)
except AttributeError:
raise AttributeError(
"'{}' object has no attribute '{}'".format(
_get_type_name(type_), attr
)
)
return _fget
class _AnnotatedObjectMeta(type):
"""A metaclass that reads annotations from a class definition."""
def __new__(
mcs, # type: Type[_AnnotatedObjectMeta]
name, # type: str
bases, # type: List[type]
attrs, # type: Dict[str, Any]
**kwargs # type: Dict[str, Any]
):
# type: (...) -> type
"""Create class objs that replaces annotated attrs with properties.
Args:
mcs: The class object being created.
name: The name of the class to create.
bases: The list of all base classes for the new class.
attrs: The list of all attributes for the new class from the
definition.
Returns:
A new class instance with the expected base classes and
attributes, but with annotated, public, non-constant,
non-method attributes replaced by property objects that
validate against the annotated type.
"""
annotations = attrs.get("__annotations__", {})
use_comment_type_hints = (
not annotations and attrs.get("__module__") != __name__
)
if use_comment_type_hints:
frame_source = _get_class_frame_source(name)
annotations = get_type_hints(*frame_source)
names = list(attrs) + list(annotations)
typed_attrs = {}
for attr in names:
typed_attrs[attr] = attrs.get(attr)
if _is_propertyable(names, attrs, annotations, attr):
private_attr = "__{}".format(attr)
if attr in attrs:
typed_attrs[private_attr] = attrs[attr]
type_ = (
Optional[annotations[attr]]
if not use_comment_type_hints
and attr in attrs
and attrs[attr] is None
else annotations[attr]
)
typed_attrs[attr] = property(
_get_fget(attr, private_attr, type_),
get_fset(attr, private_attr, type_),
)
properties = [
attr
for attr in annotations
if _is_propertyable(names, attrs, annotations, attr)
]
typed_attrs["_tp__typed_properties"] = properties
typed_attrs["_tp__required_typed_properties"] = [
attr
for attr in properties
if (
attr not in attrs
or attrs[attr] is None
and use_comment_type_hints
)
and NoneType not in getattr(annotations[attr], "__args__", ())
]
return super(_AnnotatedObjectMeta, mcs).__new__( # type: ignore
mcs, name, bases, typed_attrs, **kwargs
)
return _AnnotatedObjectMeta
def _strict_object_meta_fset(_, private_attr, type_):
# type: (str, str, Type[_T]) -> Callable[[_T], None]
"""Create a property setter method for the attribute.
Args:
_: The name of the attribute to set. Unused.
private_attr: The name of the attribute that will store any data
related to the attribute.
type_: The annotated type defining what values can be stored in the
attribute.
Returns:
A method that takes self and a value and stores that value on self
in the private attribute iff the value is an instance of type_.
"""
def _fset(self, value): # type: Any
# type: (...) -> None
"""Set the value on self iff the value is an instance of type_.
Args:
value: The value to set.
Raises:
TypeError: Raised when the value is not an instance of type_.
"""
rtype = type_
if isinstance(type_, TypeVar):
type_map = dict(
zip(self.__parameters__, self.__orig_class__.__args__)
)
rtype = type_map[type_]
if not is_instance(value, rtype):
raise TypeError(
"Cannot assign type of {} to attribute of type {}.".format(
_get_type_name(type(value)), _get_type_name(rtype)
)
)
vars(self)[private_attr] = value
return _fset
_StrictObjectMeta = _create_typed_object_meta(_strict_object_meta_fset)
_ObjectMeta = _create_typed_object_meta(_object_meta_fset)
class _BaseAnnotatedObject(object):
"""A base class that looks for class attributes to create __init__."""
def __init__(self, *args, **kwargs):
"""Set all attributes according to their annotation status."""
super(_BaseAnnotatedObject, self).__init__()
properties = self._tp__typed_properties
required = self._tp__required_typed_properties
positionals = zip(properties, args)
for attr, value in positionals:
if attr in kwargs:
raise TypeError(
"__init__() got multiple values for argument '{}'".format(
attr
)
)
kwargs[attr] = value
missing = [attr for attr in required if attr not in kwargs]
if missing:
num_missing = len(missing)
if num_missing > 1:
args = ", ".join("'{}'".format(m) for m in missing[:-1])
if num_missing > 2:
args += ","
args += " and '{}'".format(missing[-1])
else:
args = "'{}'".format(missing[0])
raise TypeError(
"__init__() missing {} required argument{}: {}".format(
num_missing, "s" if num_missing > 1 else "", args
)
)
for attr, value in six.iteritems(kwargs):
if attr in properties:
setattr(self, attr, value)
def __repr__(self):
# type: () -> str
"""Return a Python readable representation of the class."""
return "{}({})".format(
self.__class__.__name__,
", ".join(
"{}={}".format(attr_name, repr(getattr(self, attr_name)))
for attr_name in self._tp__typed_properties
),
) # type: ignore
class _AnnotatedObjectComparisonMixin(object):
"""A mixin to add comparisons to classes made by _AnnotatedObjectMeta."""
def _tp__get_typed_properties(self):
"""Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta.
"""
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError
def __eq__(self, other):
"""Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
def __ne__(self, other):
"""Test if two objects of the same class are not equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for non-equality.
Returns:
True if the objects are not equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self == other
def __lt__(self, other):
"""Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
def __le__(self, other):
"""Test if self is less than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than or equal other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return self == other or self < other
def __gt__(self, other):
"""Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
def __ge__(self, other):
"""Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
def __hash__(self):
"""Generate a hash for the object based on the annotated attrs."""
return hash(self._tp__get_typed_properties())
@six.add_metaclass(_StrictObjectMeta) # type: ignore
class BaseStrictObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import BaseStrictObject
>>> class Point(BaseStrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
"""
class StrictObject(BaseStrictObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
>>> from typet import StrictObject
>>> class Point(StrictObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '0'
Traceback (most recent call last):
...
TypeError: Cannot assign value of type str to attribute of type int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
@six.add_metaclass(_ObjectMeta) # type: ignore
class BaseObject(_BaseAnnotatedObject):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import BaseObject
>>> class Point(BaseObject):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
"""
class Object(BaseObject, _AnnotatedObjectComparisonMixin):
"""A base class to create instance attrs for annotated class attrs.
For every class attribute that is annotated, public, and not constant in
the subclasses, this base class will generate property objects for the
the instances that will enforce the type of the value set by attempting to
cast the given value to the set type.
If the subclass does not define __init__, a default implementation will be
generated that takes all of the annotated, public, non-constant attributes
as parameters. If an annotated attribute is not defined, it will be
required in __init__.
Additionally, this class implements basic comparison operators and the hash
function.
>>> from typet import Object
>>> class Point(Object):
... x: int
... y: int
...
...
>>> p = Point(0, 0)
>>> p.x
0
>>> p.x = '5'
>>> p.x
5
>>> p.x = 'five'
Traceback (most recent call last):
...
TypeError: Cannot convert 'five' to int.
>>> p2 = Point(2, 2)
>>> p < p2
True
>>> p > p2
False
"""
|
contains-io/typet | typet/objects.py | _AnnotatedObjectComparisonMixin._tp__get_typed_properties | python | def _tp__get_typed_properties(self):
try:
return tuple(getattr(self, p) for p in self._tp__typed_properties)
except AttributeError:
raise NotImplementedError | Return a tuple of typed attrs that can be used for comparisons.
Raises:
NotImplementedError: Raised if this class was mixed into a class
that was not created by _AnnotatedObjectMeta. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/objects.py#L398-L408 | null | class _AnnotatedObjectComparisonMixin(object):
"""A mixin to add comparisons to classes made by _AnnotatedObjectMeta."""
def __eq__(self, other):
"""Test if two objects of the same base class are equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for equality.
Returns:
True if the objects are equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
== other._tp__get_typed_properties()
)
def __ne__(self, other):
"""Test if two objects of the same class are not equal.
If the objects are not of the same class, Python will default to
comparison-by-ID.
Args:
other: The object to compare for non-equality.
Returns:
True if the objects are not equal; else False.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self == other
def __lt__(self, other):
"""Test if self is less than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return (
self._tp__get_typed_properties()
< other._tp__get_typed_properties()
)
def __le__(self, other):
"""Test if self is less than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is less than or equal other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return self == other or self < other
def __gt__(self, other):
"""Test if self is greater than an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self <= other
def __ge__(self, other):
"""Test if self is greater than or equal an object of the same class.
Args:
other: The object to compare against.
Returns:
True if self is greater than or equal to other; else False.
Raises:
TypeError: Raised if the objects are not of the same class.
"""
if other.__class__ is not self.__class__:
return NotImplemented
return not self < other
def __hash__(self):
"""Generate a hash for the object based on the annotated attrs."""
return hash(self._tp__get_typed_properties())
|
contains-io/typet | typet/meta.py | metaclass | python | def metaclass(*metaclasses):
# type: (*type) -> Callable[[type], type]
def _inner(cls):
# pragma pylint: disable=unused-variable
metabases = tuple(
collections.OrderedDict( # noqa: F841
(c, None) for c in (metaclasses + (type(cls),))
).keys()
)
# pragma pylint: enable=unused-variable
_Meta = metabases[0]
for base in metabases[1:]:
class _Meta(base, _Meta): # pylint: disable=function-redefined
pass
return six.add_metaclass(_Meta)(cls)
return _inner | Create the class using all metaclasses.
Args:
metaclasses: A tuple of metaclasses that will be used to generate and
replace a specified class.
Returns:
A decorator that will recreate the class using the specified
metaclasses. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/meta.py#L38-L67 | null | # -*- coding: utf-8 -*-
"""A module containing common metaclasses and utilites for working with them.
Metaclasses:
Singleton: A metaclass to force a class to only ever be instantiated once.
IdempotentSingleton: A metaclass that will force a class to only create one
instance, but will call __init__ on the instance when new instantiation
attempts occur.
Uninstantiable: A metaclass that causes a class to be uninstantiable.
Decorators:
metaclass: A class decorator that will create the class using multiple
metaclasses.
singleton: A class decorator that will make the class a singleton, even if
the class already has a metaclass.
"""
from __future__ import unicode_literals
import collections
from typingplus import ( # noqa: F401 pylint: disable=unused-import
Any,
Callable,
)
import six
__all__ = (
"metaclass",
"Singleton",
"singleton",
"IdempotentSingleton",
"Uninstantiable",
)
class Singleton(type):
"""A metaclass to turn a class into a singleton.
If the instance already exists, Singleton will attempt to call
__singleton__ on the instance to allow the instance to update if necessary.
"""
__instance__ = None # type: type
def __call__(cls, *args, **kwargs):
# type: (*Any, **Any) -> type
"""Instantiate the class only once."""
if not cls.__instance__:
cls.__instance__ = super(Singleton, cls).__call__(*args, **kwargs)
else:
try:
cls.__instance__.__singleton__(*args, **kwargs) # type: ignore
except (AttributeError, TypeError):
pass
return cls.__instance__
class IdempotentSingleton(Singleton):
"""A metaclass to turn a class into a singleton.
If the instance already exists, IdempotentSingleton will call __init__ on
the existing instance with the arguments given.
"""
def __call__(cls, *args, **kwargs):
# type: (*Any, **Any) -> type
"""Create one instance of the class and reinstantiate as necessary."""
if not cls.__instance__:
cls.__instance__ = super(IdempotentSingleton, cls).__call__(
*args, **kwargs
)
else:
try:
cls.__instance__.__init__(*args, **kwargs) # type: ignore
except (AttributeError, TypeError):
pass
return cls.__instance__
singleton = metaclass(Singleton)
class Uninstantiable(type):
"""A metaclass that disallows instantiation."""
def __call__(cls, *args, **kwargs):
# type: (*Any, **Any) -> None
"""Do not allow the class to be instantiated."""
raise TypeError("Type {} cannot be instantiated.".format(cls.__name__))
|
contains-io/typet | typet/path.py | is_dir | python | def is_dir(path):
try:
return path.expanduser().absolute().is_dir()
except AttributeError:
return os.path.isdir(os.path.abspath(os.path.expanduser(str(path)))) | Determine if a Path or string is a directory on the file system. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/path.py#L30-L35 | null | # -*- coding: utf-8 -*-
"""A module containing types representing file and directory states.
Classes:
File: A type instance of Valid that validates that the value is a file.
Dir: A type instance of Valid that validates that the value is a directory.
Path: A type instance of Valid that expands a value to a path.
"""
from __future__ import unicode_literals
import os.path
from .validation import Valid
try:
import pathlib
except ImportError:
import pathlib2 as pathlib # type: ignore
def _valid(name, type_, predicate):
new_type = Valid[type_, predicate]
setattr(
new_type, "__class_repr__", "{}.{}".format(predicate.__module__, name)
)
return new_type
def is_file(path):
"""Determine if a Path or string is a file on the file system."""
try:
return path.expanduser().absolute().is_file()
except AttributeError:
return os.path.isfile(os.path.abspath(os.path.expanduser(str(path))))
def exists(path):
"""Determine if a Path or string is an existing path on the file system."""
try:
return path.expanduser().absolute().exists()
except AttributeError:
return os.path.exists(os.path.abspath(os.path.expanduser(str(path))))
Dir = _valid("Dir", pathlib.Path, is_dir)
File = _valid("File", pathlib.Path, is_file)
ExistingPath = _valid("ExistingPath", pathlib.Path, exists)
|
contains-io/typet | typet/path.py | is_file | python | def is_file(path):
try:
return path.expanduser().absolute().is_file()
except AttributeError:
return os.path.isfile(os.path.abspath(os.path.expanduser(str(path)))) | Determine if a Path or string is a file on the file system. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/path.py#L38-L43 | null | # -*- coding: utf-8 -*-
"""A module containing types representing file and directory states.
Classes:
File: A type instance of Valid that validates that the value is a file.
Dir: A type instance of Valid that validates that the value is a directory.
Path: A type instance of Valid that expands a value to a path.
"""
from __future__ import unicode_literals
import os.path
from .validation import Valid
try:
import pathlib
except ImportError:
import pathlib2 as pathlib # type: ignore
def _valid(name, type_, predicate):
new_type = Valid[type_, predicate]
setattr(
new_type, "__class_repr__", "{}.{}".format(predicate.__module__, name)
)
return new_type
def is_dir(path):
"""Determine if a Path or string is a directory on the file system."""
try:
return path.expanduser().absolute().is_dir()
except AttributeError:
return os.path.isdir(os.path.abspath(os.path.expanduser(str(path))))
def exists(path):
"""Determine if a Path or string is an existing path on the file system."""
try:
return path.expanduser().absolute().exists()
except AttributeError:
return os.path.exists(os.path.abspath(os.path.expanduser(str(path))))
Dir = _valid("Dir", pathlib.Path, is_dir)
File = _valid("File", pathlib.Path, is_file)
ExistingPath = _valid("ExistingPath", pathlib.Path, exists)
|
contains-io/typet | typet/path.py | exists | python | def exists(path):
try:
return path.expanduser().absolute().exists()
except AttributeError:
return os.path.exists(os.path.abspath(os.path.expanduser(str(path)))) | Determine if a Path or string is an existing path on the file system. | train | https://github.com/contains-io/typet/blob/ad5087c567af84db299eca186776e1cee228e442/typet/path.py#L46-L51 | null | # -*- coding: utf-8 -*-
"""A module containing types representing file and directory states.
Classes:
File: A type instance of Valid that validates that the value is a file.
Dir: A type instance of Valid that validates that the value is a directory.
Path: A type instance of Valid that expands a value to a path.
"""
from __future__ import unicode_literals
import os.path
from .validation import Valid
try:
import pathlib
except ImportError:
import pathlib2 as pathlib # type: ignore
def _valid(name, type_, predicate):
new_type = Valid[type_, predicate]
setattr(
new_type, "__class_repr__", "{}.{}".format(predicate.__module__, name)
)
return new_type
def is_dir(path):
"""Determine if a Path or string is a directory on the file system."""
try:
return path.expanduser().absolute().is_dir()
except AttributeError:
return os.path.isdir(os.path.abspath(os.path.expanduser(str(path))))
def is_file(path):
"""Determine if a Path or string is a file on the file system."""
try:
return path.expanduser().absolute().is_file()
except AttributeError:
return os.path.isfile(os.path.abspath(os.path.expanduser(str(path))))
Dir = _valid("Dir", pathlib.Path, is_dir)
File = _valid("File", pathlib.Path, is_file)
ExistingPath = _valid("ExistingPath", pathlib.Path, exists)
|
tony-landis/datomic-py | datomic/datomic.py | dump_edn_val | python | def dump_edn_val(v):
" edn simple value dump"
if isinstance(v, (str, unicode)):
return json.dumps(v)
elif isinstance(v, E):
return unicode(v)
else:
return dumps(v) | edn simple value dump | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L644-L651 | null | # -*- coding: utf-8 -*-
"""
"""
import datetime
import urllib3
from pprint import pprint as pp
from termcolor import colored as cl
import logging
from schema import Schema
from clj import dumps, loads
import json
from itertools import izip
class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
class Query(object):
""" chainable query builder"
>>> db.find('?e ?a') # default find
>>> q.where() # with add
>>> q.ins() # in add
"""
def __init__(self, find, db=None, schema=None):
self.db = db
self.schema = schema
self._find = []
self._where = []
self._input = []
self._limit = None
self._offset = None
self._history = False
self.find(find)
def __repr__(self):
return " ".join([str(self._find), str(self._in), str(self._where)])
def find(self, *args, **kwargs):
" :find "
if args[0] is all:
pass # finds all
else:
[(self._find.append(x)) for x in args]
return self
def where(self, *args, **kwargs):
" :where "
[(self._where.append(x)) for x in args]
return self
def fulltext(self, attr, s, q, e, v):
self._where.append("(fulltext $ {0} {1}) [[{2} {3}]]".format(attr, s, e, v))
self._input.append((s, q))
def param(self, *args, **kwargs):
" :in "
for first, second in pairwise(args):
if isinstance(second, list):
if not isinstance(second[0], list):
" add a logical _or_ "
self._input.append((
u"[{0} ...]".format(first), second))
else:
" relations, list of list"
self._input.append((
u"[[{0}]]".format(first), second))
elif isinstance(second, tuple):
" tuple "
self._input.append((
u"[{0}]".format(first), list(second)))
else:
" nothing special "
self._input.append((first,second))
return self
def limit(self, limit):
self._limit = limit
return self
def offset(self, offset):
self._offset = offset
return self
def history(self, history):
self._offset = history
return self
def hashone(self):
"execute query, get back"
rs = self.one()
if not rs:
return {}
else:
finds = " ".join(self._find).split(' ')
return dict(zip((x.replace('?','') for x in finds), rs))
def one(self):
"execute query, get a single list"
self.limit(1)
rs = self.all()
if not rs:
return None
else:
return rs[0]
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history)
def _toedn(self):
""" prepare the query for the rest api
"""
finds = u""
inputs = u""
wheres = u""
args = []
": in and args"
for a,b in self._input:
inputs += " {0}".format(a)
args.append(dump_edn_val(b))
if inputs:
inputs = u":in ${0}".format(inputs)
" :where "
for where in self._where:
if isinstance(where, (str,unicode)):
wheres += u"[{0}]".format(where)
elif isinstance(where, (list)):
wheres += u" ".join([u"[{0}]".format(w) for w in where])
" find: "
if self._find == []: #find all
fs = set()
for p in wheres.replace('[',' ').replace(']',' ').split(' '):
if p.startswith('?'):
fs.add(p)
self._find = list(fs)
finds = " ".join(self._find)
" all togethr now..."
q = u"""[ :find {0} {1} :where {2} ]""".\
format( finds, inputs, wheres)
return q,args
class E(dict):
""" An entity and its db, optionally a tx.
"""
def __init__(self, e, db=None, tx=None):
""" Represents an entity in the db,
or a tempid in a non-committed state.
>>> person = E(1, db)
>>> person.eid
1
Fetch all attributes, behave like a dict
>>> person.items()
Iterator just like a dictionary
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
>>> person.get('person/name')
Access ns attribute with dot notation
>>> person.person
"""
assert (db is not None or tx is not None),\
"A DB or TX object is required"
self._eid = int(e)
self._db = db or tx.db
self._tx = tx
self._txid = -1 if not tx else tx.txid
self._dict = None
def __repr__(self):
return "{'db/id': %s}" % cl(self._eid, 'magenta')
def __unicode__(self):
return u"#db/id[:db.part/user %s]" % self._eid
def __int__(self):
return self._eid
""" compare entity id + at-tx
"""
def __eq__(self, obj):
if not isinstance(obj, E): return False
return self._eid == obj._eid and \
self._txid == obj._txid
def __ne__(self, obj):
if not isinstance(obj, E): return True
return self._eid != obj._eid or \
self._txid != obj._txid
""" compare at-tx
"""
def __lt__(self, obj):
return self._txid < obj._txid
def __gt__(self, obj):
return self._txid > obj._txid
def __le__(self, obj):
return self._txid <= obj._txid
def __ge__(self, obj):
return self._txid >= obj._txid
""" attributes
"""
@property
def __dict__(self):
'returns a dictionary with last known state in the db'
if isinstance(self._dict, dict): return self._dict
if self._eid < 0: return {} # uncommitted
self._dict = self._db.e(self.eid) # fetch
return self._dict
def vpar(self, val):
# TODO - check schema for type,cardinality
if not isinstance(val, dict): return val
return E(val.get('db/id'), db=self._db, tx=self._tx)
def __getitem__(self, attr, default=None):
val = self.__dict__.get(attr, default)
return self.vpar(val)
def __getattr__(self, attr, default=None):
val = self.__dict__.get(attr, default)
if val: return self.vpar(v)
rs, ns = {}, '{0}/'.format(attr)
for k,v in self.__dict__.iteritems():
if k.startswith(ns):
attr = "/".join(k.split('/')[1:])
vp = self.vpar(v)
if not attr in rs:
rs[attr] = vp
elif isinstance(rs[attr], list):
rs[attr].append(vp)
else:
rs[attr] = list(rs[attr], vp)
return rs
@property
def items(self):
return self.__dict__.items
@property
def iteritems(self):
return self.__dict__.iteritems
@property
def eid(self):
return self._eid
def add(self, *args, **kwargs):
self._tx.add(self, *args, **kwargs)
def retract(self, a, v):
assert self.eid > 0, "unresolved entity state, cannot issue retractions"
if not a.startswith(':'):
a = u':%s' % v
self._db.tx(u'[:db/retract {0} {1} {2}]'.\
format(self.eid, a, dump_edn_val(v)))
class TX(object):
""" Accumulate, execute, and resolve tempids
"""
def __init__(self, db):
self.db = db
self.tmpents, self.adds, self.ctmpid, self.txid = [], [], -1, -1
self.resp = None
self.realents = []
def __repr__(self):
return "<datomic tx, %i pending>" % len(self)
def __len__(self):
return len(self.adds or [])
def __int__(self):
return self.txid
def add(self, *args, **kwargs):
""" Accumulate datums for the transaction
Start a transaction on an existing db connection
>>> tx = TX(db)
Get get an entity object with a tempid
>>> ref = add()
>>> ref = add(0)
>>> ref = add(None)
>>> ref = add(False)
Entity id passed as first argument (int|long)
>>> tx.add(1, 'thing/name', 'value')
Shorthand form for multiple attributes sharing a root namespace
>>> tx.add(':thing/', {'name':'value', 'tag':'value'})
Attributes with a value of None are ignored
>>> tx.add(':thing/ignored', None)
Add multiple datums for an attribute with carinality:many
>>> tx.add(':thing/color', ['red','white','blue'])
"""
assert self.resp is None, "Transaction already committed"
entity, av_pairs, args = None, [], list(args)
if len(args):
if isinstance(args[0], (int, long)):
" first arg is an entity or tempid"
entity = E(args[0], tx=self)
elif isinstance(args[0], E):
" dont resuse entity from another tx"
if args[0]._tx is self:
entity = args[0]
else:
if int(args[0]) > 0:
" use the entity id on a new obj"
entity = E(int(args[0]), tx=self)
args[0] = None
" drop the first arg"
if entity is not None or args[0] in (None, False, 0):
v = args.pop(0)
" auto generate a temp id?"
if entity is None:
entity = E(self.ctmpid, tx=self)
self.ctmpid -= 1
" a,v from kwargs"
if len(args) == 0 and kwargs:
for a,v in kwargs.iteritems():
self.addeav(entity, a, v)
" a,v from args "
if len(args):
assert len(args) % 2 == 0, "imbalanced a,v in args: " % args
for first, second in pairwise(args):
if not first.startswith(':'):
first = ':' + first
if not first.endswith('/'):
" longhand used: blah/blah "
if isinstance(second, list):
for v in second:
self.addeav(entity, first, v)
else:
self.addeav(entity, first, second)
continue
elif isinstance(second, dict):
" shorthand used: blah/, dict "
for a,v in second.iteritems():
self.addeav(entity, "%s%s" % (first, a), v)
continue
elif isinstance(second, (list, tuple)):
" shorthand used: blah/, list|tuple "
for a,v in pairwise(second):
self.addeav(entity, "%s%s" % (first, a), v)
continue
else:
raise Exception, "invalid pair: %s : %s" % (first,second)
"pass back the entity so it can be resolved after tx()"
return entity
def execute(self, **kwargs):
""" commit the current statements from add()
"""
assert self.resp is None, "Transaction already committed"
try:
self.resp = self.db.tx(list(self.edn_iter), **kwargs)
except Exception:
self.resp = False
raise
else:
self.resolve()
self.adds = None
self.tmpents = None
return self.resp # raw dict response
def resolve(self):
""" Resolve one or more tempids.
Automatically takes place after transaction is executed.
"""
assert isinstance(self.resp, dict), "Transaction in uncommitted or failed state"
rids = [(v) for k,v in self.resp['tempids'].items()]
self.txid = self.resp['tx-data'][0]['tx']
rids.reverse()
for t in self.tmpents:
pos = self.tmpents.index(t)
t._eid, t._txid = rids[pos], self.txid
for t in self.realents:
t._txid = self.txid
def addeav(self, e, a, v):
if v is None: return
self.adds.append((e, a, v))
if int(e) < 0 and e not in self.tmpents:
self.tmpents.append(e)
elif int(e) > 0 and e not in self.realents:
self.realents.append(e)
@property
def edn_iter(self):
""" yields edns
"""
for e,a,v in self.adds:
yield u"{%(a)s %(v)s :db/id #db/id[:db.part/user %(e)s ]}" % \
dict(a=a, v=dump_edn_val(v), e=int(e))
def pairwise(iterable):
"s -> (s0,s1), (s2,s3), (s4, s5), ..."
a = iter(iterable)
return izip(a, a)
|
tony-landis/datomic-py | datomic/datomic.py | DB.create | python | def create(self):
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True | Creates the database
>>> db.create()
True | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L51-L58 | [
"def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):\n \"\"\" Rest helpers\n \"\"\"\n r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)\n if not r.status in (status_codes if status_codes else (200,201)):\n print cl('\\n---------\\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')\n print cl(data, 'red')\n print r.headers\n raise Exception, \"Invalid status code: %s\" % r.status\n if not parse: \n \" return raw urllib3 response\"\n return r\n if not self.debug_loads:\n \" return parsed edn\"\n return loads(r.data)\n \"time edn parse time and return parsed edn\"\n return self.debug(loads, args=(r_data, ), kwargs={},\n fmt='<<< parsed edn datastruct in {ms}ms', color='green')\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.tx_schema | python | def tx_schema(self, **kwargs):
for s in self.schema.schema:
tx = self.tx(s, **kwargs) | Builds the data structure edn, and puts it in the db | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L67-L71 | [
"def tx(self, *args, **kwargs):\n \"\"\" Executes a raw tx string, or get a new TX object to work with.\n\n Passing a raw string or list of strings will immedately transact \n and return the API response as a dict.\n >>> resp = tx('{:db/id #db/id[:db.part/user] :person/name \"Bob\"}')\n {db-before: db-after: tempids: }\n\n This gets a fresh `TX()` to prepare a transaction with.\n >>> tx = db.tx()\n\n New `E()` object with person/fname and person/lname attributes\n >>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})\n\n New state and city objects referencing the state\n >>> state = tx.add('loc/state', 'WA')\n >>> city = tx.add('loc/city', 'Seattle', 'isin', state)\n\n Add person/city, person/state, and person/likes refs to the person entity\n >>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})\n\n Excute the transaction\n >>> resp = tx.tx() \n\n The resolved entity ids for our person\n >>> person.eid, state.eid, city.eid\n\n Fetch all attributes, behave like a dict\n >>> person.items()\n >>> person.iteritems()\n\n Access attribute as an attribute\n >>> person['person/name']\n\n See `TX()` for options.\n\n \"\"\"\n if 0 == len(args): return TX(self) \n ops = []\n for op in args:\n if isinstance(op, list): ops += op\n elif isinstance(op, (str,unicode)): ops.append(op)\n if 'debug' in kwargs: pp(ops)\n tx_proc =\"[ %s ]\" % \"\".join(ops)\n x = self.rest('POST', self.uri_db, data={\"tx-data\": tx_proc})\n return x\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.tx | python | def tx(self, *args, **kwargs):
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x | Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options. | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L73-L118 | [
"def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):\n \"\"\" Rest helpers\n \"\"\"\n r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)\n if not r.status in (status_codes if status_codes else (200,201)):\n print cl('\\n---------\\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')\n print cl(data, 'red')\n print r.headers\n raise Exception, \"Invalid status code: %s\" % r.status\n if not parse: \n \" return raw urllib3 response\"\n return r\n if not self.debug_loads:\n \" return parsed edn\"\n return loads(r.data)\n \"time edn parse time and return parsed edn\"\n return self.debug(loads, args=(r_data, ), kwargs={},\n fmt='<<< parsed edn datastruct in {ms}ms', color='green')\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.e | python | def e(self, eid):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs | Get an Entity | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L120-L127 | [
"def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):\n \"\"\" Rest helpers\n \"\"\"\n r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)\n if not r.status in (status_codes if status_codes else (200,201)):\n print cl('\\n---------\\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')\n print cl(data, 'red')\n print r.headers\n raise Exception, \"Invalid status code: %s\" % r.status\n if not parse: \n \" return raw urllib3 response\"\n return r\n if not self.debug_loads:\n \" return parsed edn\"\n return loads(r.data)\n \"time edn parse time and return parsed edn\"\n return self.debug(loads, args=(r_data, ), kwargs={},\n fmt='<<< parsed edn datastruct in {ms}ms', color='green')\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.retract | python | def retract(self, e, a, v):
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs | redact the value of an attribute | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L129-L137 | [
"def dump_edn_val(v):\n \" edn simple value dump\"\n if isinstance(v, (str, unicode)): \n return json.dumps(v)\n elif isinstance(v, E): \n return unicode(v)\n else: \n return dumps(v)\n",
"def tx(self, *args, **kwargs):\n \"\"\" Executes a raw tx string, or get a new TX object to work with.\n\n Passing a raw string or list of strings will immedately transact \n and return the API response as a dict.\n >>> resp = tx('{:db/id #db/id[:db.part/user] :person/name \"Bob\"}')\n {db-before: db-after: tempids: }\n\n This gets a fresh `TX()` to prepare a transaction with.\n >>> tx = db.tx()\n\n New `E()` object with person/fname and person/lname attributes\n >>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})\n\n New state and city objects referencing the state\n >>> state = tx.add('loc/state', 'WA')\n >>> city = tx.add('loc/city', 'Seattle', 'isin', state)\n\n Add person/city, person/state, and person/likes refs to the person entity\n >>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})\n\n Excute the transaction\n >>> resp = tx.tx() \n\n The resolved entity ids for our person\n >>> person.eid, state.eid, city.eid\n\n Fetch all attributes, behave like a dict\n >>> person.items()\n >>> person.iteritems()\n\n Access attribute as an attribute\n >>> person['person/name']\n\n See `TX()` for options.\n\n \"\"\"\n if 0 == len(args): return TX(self) \n ops = []\n for op in args:\n if isinstance(op, list): ops += op\n elif isinstance(op, (str,unicode)): ops.append(op)\n if 'debug' in kwargs: pp(ops)\n tx_proc =\"[ %s ]\" % \"\".join(ops)\n x = self.rest('POST', self.uri_db, data={\"tx-data\": tx_proc})\n return x\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.datoms | python | def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk | Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L140-L172 | [
"def dump_edn_val(v):\n \" edn simple value dump\"\n if isinstance(v, (str, unicode)): \n return json.dumps(v)\n elif isinstance(v, E): \n return unicode(v)\n else: \n return dumps(v)\n",
"def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):\n \"\"\" Rest helpers\n \"\"\"\n r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)\n if not r.status in (status_codes if status_codes else (200,201)):\n print cl('\\n---------\\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')\n print cl(data, 'red')\n print r.headers\n raise Exception, \"Invalid status code: %s\" % r.status\n if not parse: \n \" return raw urllib3 response\"\n return r\n if not self.debug_loads:\n \" return parsed edn\"\n return loads(r.data)\n \"time edn parse time and return parsed edn\"\n return self.debug(loads, args=(r_data, ), kwargs={},\n fmt='<<< parsed edn datastruct in {ms}ms', color='green')\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.rest | python | def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green') | Rest helpers | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L174-L191 | [
"def debug(self, defn, args, kwargs, fmt=None, color='green'):\n \"\"\" debug timing, colored terminal output\n \"\"\"\n ta = datetime.datetime.now()\n rs = defn(*args, **kwargs) \n tb = datetime.datetime.now() - ta\n fmt = fmt or \"processed {defn} in {ms}ms\"\n logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)\n \"terminal output\"\n print cl(logmsg, color)\n \"logging output\"\n logging.debug(logmsg)\n return rs\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.debug | python | def debug(self, defn, args, kwargs, fmt=None, color='green'):
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs | debug timing, colored terminal output | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L193-L205 | null | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.q | python | def q(self, q, inputs=None, limit='', offset='', history=False):
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True) | query | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L207-L221 | [
"def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):\n \"\"\" Rest helpers\n \"\"\"\n r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)\n if not r.status in (status_codes if status_codes else (200,201)):\n print cl('\\n---------\\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')\n print cl(data, 'red')\n print r.headers\n raise Exception, \"Invalid status code: %s\" % r.status\n if not parse: \n \" return raw urllib3 response\"\n return r\n if not self.debug_loads:\n \" return parsed edn\"\n return loads(r.data)\n \"time edn parse time and return parsed edn\"\n return self.debug(loads, args=(r_data, ), kwargs={},\n fmt='<<< parsed edn datastruct in {ms}ms', color='green')\n"
] | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema)
|
tony-landis/datomic-py | datomic/datomic.py | DB.find | python | def find(self, *args, **kwargs):
" new query builder on current db"
return Query(*args, db=self, schema=self.schema) | new query builder on current db | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L223-L225 | null | class DB(object):
def __init__(self, host, port, store, db, schema=None, **kwargs):
""" Assuming the datomic REST service was started this way:
> bin/rest -p 8888 mem datomic:mem://
Then get a connection for database name 'test' like this:
>>> db = DB("localhost", 8888, "mem", "test", schema=S)
"""
self.host, self.port, self.store, self.db = host, port, store, db
self.uri_str = "/data/"+ self.store +"/"
self.uri_db = "/data/"+ self.store +"/"+ self.db +"/"
self.uri_q = "/api/query"
self.pool = urllib3.connectionpool.HTTPConnectionPool(
self.host, port=self.port,
timeout=3, maxsize=20,
headers={"Accept":"application/edn", "Connection": "Keep-Alive"})
"debugging"
for d in ('debug_http','debug_loads'):
setattr(self, d, kwargs.get(d) == True)
"build or use provided Schema"
if isinstance(schema, Schema):
self.schema = schema
elif isinstance(schema, tuple):
self.schema = Schema(schema)
else:
self.schema = None
if schema is None: return
logging.warning("I don't know what to do with schema kwarg of type '%s'" % type(schema))
def create(self):
""" Creates the database
>>> db.create()
True
"""
data = data={"db-name":self.db}
self.rest('POST', self.uri_str, status_codes=(200,201), data=data)
return True
def info(self):
""" Fetch the current db state
>>> db.info()
{:db/alias "store/db", :basis-t ...}
"""
return self.rest('GET', self.uri_db + '-/')
def tx_schema(self, **kwargs):
""" Builds the data structure edn, and puts it in the db
"""
for s in self.schema.schema:
tx = self.tx(s, **kwargs)
def tx(self, *args, **kwargs):
""" Executes a raw tx string, or get a new TX object to work with.
Passing a raw string or list of strings will immedately transact
and return the API response as a dict.
>>> resp = tx('{:db/id #db/id[:db.part/user] :person/name "Bob"}')
{db-before: db-after: tempids: }
This gets a fresh `TX()` to prepare a transaction with.
>>> tx = db.tx()
New `E()` object with person/fname and person/lname attributes
>>> person = tx.add('person/', {'fname':'John', 'lname':'Doe'})
New state and city objects referencing the state
>>> state = tx.add('loc/state', 'WA')
>>> city = tx.add('loc/city', 'Seattle', 'isin', state)
Add person/city, person/state, and person/likes refs to the person entity
>>> person.add('person/', {'city': city, 'state': state, 'likes': [city, state]})
Excute the transaction
>>> resp = tx.tx()
The resolved entity ids for our person
>>> person.eid, state.eid, city.eid
Fetch all attributes, behave like a dict
>>> person.items()
>>> person.iteritems()
Access attribute as an attribute
>>> person['person/name']
See `TX()` for options.
"""
if 0 == len(args): return TX(self)
ops = []
for op in args:
if isinstance(op, list): ops += op
elif isinstance(op, (str,unicode)): ops.append(op)
if 'debug' in kwargs: pp(ops)
tx_proc ="[ %s ]" % "".join(ops)
x = self.rest('POST', self.uri_db, data={"tx-data": tx_proc})
return x
def e(self, eid):
"""Get an Entity
"""
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/entity', data={'e':int(eid)}, parse=True)
tb = datetime.datetime.now() - ta
print cl('<<< fetched entity %s in %sms' % (eid, tb.microseconds/1000.0), 'cyan')
return rs
def retract(self, e, a, v):
""" redact the value of an attribute
"""
ta = datetime.datetime.now()
ret = u"[:db/retract %i :%s %s]" % (e, a, dump_edn_val(v))
rs = self.tx(ret)
tb = datetime.datetime.now() - ta
print cl('<<< retracted %s,%s,%s in %sms' % (e,a,v, tb.microseconds/1000.0), 'cyan')
return rs
def datoms(self, index='aevt', e='', a='', v='',
limit=0, offset=0, chunk=100,
start='', end='', since='', as_of='', history='', **kwargs):
""" Returns a lazy generator that will only fetch groups of datoms
at the chunk size specified.
http://docs.datomic.com/clojure/index.html#datomic.api/datoms
"""
assert index in ['aevt','eavt','avet','vaet'], "non-existant index"
data = {'index': index,
'a': ':{0}'.format(a) if a else '',
'v': dump_edn_val(v) if v else '',
'e': int(e) if e else '',
'offset': offset or 0,
'start': start,
'end': end,
'limit': limit,
'history': 'true' if history else '',
'as-of': int(as_of) if as_of else '',
'since': int(since) if since else '',
}
data['limit'] = offset + chunk
rs = True
while rs and (data['offset'] < (limit or 1000000000)):
ta = datetime.datetime.now()
rs = self.rest('GET', self.uri_db + '-/datoms', data=data, parse=True)
if not len(rs):
rs = False
tb = datetime.datetime.now() - ta
print cl('<<< fetched %i datoms at offset %i in %sms' % (
len(rs), data['offset'], tb.microseconds/1000.0), 'cyan')
for r in rs: yield r
data['offset'] += chunk
def rest(self, method, uri, data=None, status_codes=None, parse=True, **kwargs):
""" Rest helpers
"""
r = self.pool.request_encode_body(method, uri, fields=data, encode_multipart=False)
if not r.status in (status_codes if status_codes else (200,201)):
print cl('\n---------\nURI / REQUEST TYPE : %s %s' % (uri, method), 'red')
print cl(data, 'red')
print r.headers
raise Exception, "Invalid status code: %s" % r.status
if not parse:
" return raw urllib3 response"
return r
if not self.debug_loads:
" return parsed edn"
return loads(r.data)
"time edn parse time and return parsed edn"
return self.debug(loads, args=(r_data, ), kwargs={},
fmt='<<< parsed edn datastruct in {ms}ms', color='green')
def debug(self, defn, args, kwargs, fmt=None, color='green'):
""" debug timing, colored terminal output
"""
ta = datetime.datetime.now()
rs = defn(*args, **kwargs)
tb = datetime.datetime.now() - ta
fmt = fmt or "processed {defn} in {ms}ms"
logmsg = fmt.format(ms=tb.microseconds/1000.0, defn=defn)
"terminal output"
print cl(logmsg, color)
"logging output"
logging.debug(logmsg)
return rs
def q(self, q, inputs=None, limit='', offset='', history=False):
""" query
"""
if not q.strip().startswith("["): q = "[ {0} ]".format(q)
args = u'[ {:db/alias "%(store)s/%(db)s" %(hist)s} %(inputs)s ]' % dict(
store = self.store,
db = self.db,
hist = ':history true' if history==True else '',
inputs = " ".join(inputs or []))
data = {"args": args,
"q": q,
"offset": offset or '',
"limit": limit or '',
}
return self.rest('GET', self.uri_q, data=data, parse=True)
|
tony-landis/datomic-py | datomic/datomic.py | Query.find | python | def find(self, *args, **kwargs):
" :find "
if args[0] is all:
pass # finds all
else:
[(self._find.append(x)) for x in args]
return self | :find | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L254-L260 | null | class Query(object):
""" chainable query builder"
>>> db.find('?e ?a') # default find
>>> q.where() # with add
>>> q.ins() # in add
"""
def __init__(self, find, db=None, schema=None):
self.db = db
self.schema = schema
self._find = []
self._where = []
self._input = []
self._limit = None
self._offset = None
self._history = False
self.find(find)
def __repr__(self):
return " ".join([str(self._find), str(self._in), str(self._where)])
def where(self, *args, **kwargs):
" :where "
[(self._where.append(x)) for x in args]
return self
def fulltext(self, attr, s, q, e, v):
self._where.append("(fulltext $ {0} {1}) [[{2} {3}]]".format(attr, s, e, v))
self._input.append((s, q))
def param(self, *args, **kwargs):
" :in "
for first, second in pairwise(args):
if isinstance(second, list):
if not isinstance(second[0], list):
" add a logical _or_ "
self._input.append((
u"[{0} ...]".format(first), second))
else:
" relations, list of list"
self._input.append((
u"[[{0}]]".format(first), second))
elif isinstance(second, tuple):
" tuple "
self._input.append((
u"[{0}]".format(first), list(second)))
else:
" nothing special "
self._input.append((first,second))
return self
def limit(self, limit):
self._limit = limit
return self
def offset(self, offset):
self._offset = offset
return self
def history(self, history):
self._offset = history
return self
def hashone(self):
"execute query, get back"
rs = self.one()
if not rs:
return {}
else:
finds = " ".join(self._find).split(' ')
return dict(zip((x.replace('?','') for x in finds), rs))
def one(self):
"execute query, get a single list"
self.limit(1)
rs = self.all()
if not rs:
return None
else:
return rs[0]
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history)
def _toedn(self):
""" prepare the query for the rest api
"""
finds = u""
inputs = u""
wheres = u""
args = []
": in and args"
for a,b in self._input:
inputs += " {0}".format(a)
args.append(dump_edn_val(b))
if inputs:
inputs = u":in ${0}".format(inputs)
" :where "
for where in self._where:
if isinstance(where, (str,unicode)):
wheres += u"[{0}]".format(where)
elif isinstance(where, (list)):
wheres += u" ".join([u"[{0}]".format(w) for w in where])
" find: "
if self._find == []: #find all
fs = set()
for p in wheres.replace('[',' ').replace(']',' ').split(' '):
if p.startswith('?'):
fs.add(p)
self._find = list(fs)
finds = " ".join(self._find)
" all togethr now..."
q = u"""[ :find {0} {1} :where {2} ]""".\
format( finds, inputs, wheres)
return q,args
|
tony-landis/datomic-py | datomic/datomic.py | Query.where | python | def where(self, *args, **kwargs):
" :where "
[(self._where.append(x)) for x in args]
return self | :where | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L262-L265 | null | class Query(object):
""" chainable query builder"
>>> db.find('?e ?a') # default find
>>> q.where() # with add
>>> q.ins() # in add
"""
def __init__(self, find, db=None, schema=None):
self.db = db
self.schema = schema
self._find = []
self._where = []
self._input = []
self._limit = None
self._offset = None
self._history = False
self.find(find)
def __repr__(self):
return " ".join([str(self._find), str(self._in), str(self._where)])
def find(self, *args, **kwargs):
" :find "
if args[0] is all:
pass # finds all
else:
[(self._find.append(x)) for x in args]
return self
def fulltext(self, attr, s, q, e, v):
self._where.append("(fulltext $ {0} {1}) [[{2} {3}]]".format(attr, s, e, v))
self._input.append((s, q))
def param(self, *args, **kwargs):
" :in "
for first, second in pairwise(args):
if isinstance(second, list):
if not isinstance(second[0], list):
" add a logical _or_ "
self._input.append((
u"[{0} ...]".format(first), second))
else:
" relations, list of list"
self._input.append((
u"[[{0}]]".format(first), second))
elif isinstance(second, tuple):
" tuple "
self._input.append((
u"[{0}]".format(first), list(second)))
else:
" nothing special "
self._input.append((first,second))
return self
def limit(self, limit):
self._limit = limit
return self
def offset(self, offset):
self._offset = offset
return self
def history(self, history):
self._offset = history
return self
def hashone(self):
"execute query, get back"
rs = self.one()
if not rs:
return {}
else:
finds = " ".join(self._find).split(' ')
return dict(zip((x.replace('?','') for x in finds), rs))
def one(self):
"execute query, get a single list"
self.limit(1)
rs = self.all()
if not rs:
return None
else:
return rs[0]
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history)
def _toedn(self):
""" prepare the query for the rest api
"""
finds = u""
inputs = u""
wheres = u""
args = []
": in and args"
for a,b in self._input:
inputs += " {0}".format(a)
args.append(dump_edn_val(b))
if inputs:
inputs = u":in ${0}".format(inputs)
" :where "
for where in self._where:
if isinstance(where, (str,unicode)):
wheres += u"[{0}]".format(where)
elif isinstance(where, (list)):
wheres += u" ".join([u"[{0}]".format(w) for w in where])
" find: "
if self._find == []: #find all
fs = set()
for p in wheres.replace('[',' ').replace(']',' ').split(' '):
if p.startswith('?'):
fs.add(p)
self._find = list(fs)
finds = " ".join(self._find)
" all togethr now..."
q = u"""[ :find {0} {1} :where {2} ]""".\
format( finds, inputs, wheres)
return q,args
|
tony-landis/datomic-py | datomic/datomic.py | Query.param | python | def param(self, *args, **kwargs):
" :in "
for first, second in pairwise(args):
if isinstance(second, list):
if not isinstance(second[0], list):
" add a logical _or_ "
self._input.append((
u"[{0} ...]".format(first), second))
else:
" relations, list of list"
self._input.append((
u"[[{0}]]".format(first), second))
elif isinstance(second, tuple):
" tuple "
self._input.append((
u"[{0}]".format(first), list(second)))
else:
" nothing special "
self._input.append((first,second))
return self | :in | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L271-L290 | [
"def pairwise(iterable):\n \"s -> (s0,s1), (s2,s3), (s4, s5), ...\"\n a = iter(iterable)\n return izip(a, a)\n"
] | class Query(object):
""" chainable query builder"
>>> db.find('?e ?a') # default find
>>> q.where() # with add
>>> q.ins() # in add
"""
def __init__(self, find, db=None, schema=None):
self.db = db
self.schema = schema
self._find = []
self._where = []
self._input = []
self._limit = None
self._offset = None
self._history = False
self.find(find)
def __repr__(self):
return " ".join([str(self._find), str(self._in), str(self._where)])
def find(self, *args, **kwargs):
" :find "
if args[0] is all:
pass # finds all
else:
[(self._find.append(x)) for x in args]
return self
def where(self, *args, **kwargs):
" :where "
[(self._where.append(x)) for x in args]
return self
def fulltext(self, attr, s, q, e, v):
self._where.append("(fulltext $ {0} {1}) [[{2} {3}]]".format(attr, s, e, v))
self._input.append((s, q))
def limit(self, limit):
self._limit = limit
return self
def offset(self, offset):
self._offset = offset
return self
def history(self, history):
self._offset = history
return self
def hashone(self):
"execute query, get back"
rs = self.one()
if not rs:
return {}
else:
finds = " ".join(self._find).split(' ')
return dict(zip((x.replace('?','') for x in finds), rs))
def one(self):
"execute query, get a single list"
self.limit(1)
rs = self.all()
if not rs:
return None
else:
return rs[0]
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history)
def _toedn(self):
""" prepare the query for the rest api
"""
finds = u""
inputs = u""
wheres = u""
args = []
": in and args"
for a,b in self._input:
inputs += " {0}".format(a)
args.append(dump_edn_val(b))
if inputs:
inputs = u":in ${0}".format(inputs)
" :where "
for where in self._where:
if isinstance(where, (str,unicode)):
wheres += u"[{0}]".format(where)
elif isinstance(where, (list)):
wheres += u" ".join([u"[{0}]".format(w) for w in where])
" find: "
if self._find == []: #find all
fs = set()
for p in wheres.replace('[',' ').replace(']',' ').split(' '):
if p.startswith('?'):
fs.add(p)
self._find = list(fs)
finds = " ".join(self._find)
" all togethr now..."
q = u"""[ :find {0} {1} :where {2} ]""".\
format( finds, inputs, wheres)
return q,args
|
tony-landis/datomic-py | datomic/datomic.py | Query.hashone | python | def hashone(self):
"execute query, get back"
rs = self.one()
if not rs:
return {}
else:
finds = " ".join(self._find).split(' ')
return dict(zip((x.replace('?','') for x in finds), rs)) | execute query, get back | train | https://github.com/tony-landis/datomic-py/blob/54f713d29ad85ba86d53d5115c9b312ff14b7846/datomic/datomic.py#L302-L309 | [
"def one(self):\n \"execute query, get a single list\"\n self.limit(1)\n rs = self.all()\n if not rs: \n return None\n else:\n return rs[0]\n"
] | class Query(object):
""" chainable query builder"
>>> db.find('?e ?a') # default find
>>> q.where() # with add
>>> q.ins() # in add
"""
def __init__(self, find, db=None, schema=None):
self.db = db
self.schema = schema
self._find = []
self._where = []
self._input = []
self._limit = None
self._offset = None
self._history = False
self.find(find)
def __repr__(self):
return " ".join([str(self._find), str(self._in), str(self._where)])
def find(self, *args, **kwargs):
" :find "
if args[0] is all:
pass # finds all
else:
[(self._find.append(x)) for x in args]
return self
def where(self, *args, **kwargs):
" :where "
[(self._where.append(x)) for x in args]
return self
def fulltext(self, attr, s, q, e, v):
self._where.append("(fulltext $ {0} {1}) [[{2} {3}]]".format(attr, s, e, v))
self._input.append((s, q))
def param(self, *args, **kwargs):
" :in "
for first, second in pairwise(args):
if isinstance(second, list):
if not isinstance(second[0], list):
" add a logical _or_ "
self._input.append((
u"[{0} ...]".format(first), second))
else:
" relations, list of list"
self._input.append((
u"[[{0}]]".format(first), second))
elif isinstance(second, tuple):
" tuple "
self._input.append((
u"[{0}]".format(first), list(second)))
else:
" nothing special "
self._input.append((first,second))
return self
def limit(self, limit):
self._limit = limit
return self
def offset(self, offset):
self._offset = offset
return self
def history(self, history):
self._offset = history
return self
def one(self):
"execute query, get a single list"
self.limit(1)
rs = self.all()
if not rs:
return None
else:
return rs[0]
def all(self):
" execute query, get all list of lists"
query,inputs = self._toedn()
return self.db.q(query,
inputs = inputs,
limit = self._limit,
offset = self._offset,
history = self._history)
def _toedn(self):
""" prepare the query for the rest api
"""
finds = u""
inputs = u""
wheres = u""
args = []
": in and args"
for a,b in self._input:
inputs += " {0}".format(a)
args.append(dump_edn_val(b))
if inputs:
inputs = u":in ${0}".format(inputs)
" :where "
for where in self._where:
if isinstance(where, (str,unicode)):
wheres += u"[{0}]".format(where)
elif isinstance(where, (list)):
wheres += u" ".join([u"[{0}]".format(w) for w in where])
" find: "
if self._find == []: #find all
fs = set()
for p in wheres.replace('[',' ').replace(']',' ').split(' '):
if p.startswith('?'):
fs.add(p)
self._find = list(fs)
finds = " ".join(self._find)
" all togethr now..."
q = u"""[ :find {0} {1} :where {2} ]""".\
format( finds, inputs, wheres)
return q,args
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.