_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q272700 | Process.render | test | def render(template, context):
"""Wrapper to the jinja2 render method from a template file
Parameters
----------
template : str
Path to template file.
context : dict
Dictionary with kwargs context to populate the template
"""
path, filename = os.path.split(template)
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
).get_template(filename).render(context) | python | {
"resource": ""
} |
q272701 | Process.template_str | test | def template_str(self):
"""Class property that returns a populated template string
This property allows the template of a particular process to be
dynamically generated and returned when doing ``Process.template_str``.
Returns
-------
x : str
String with the complete and populated process template
"""
if not self._context:
raise eh.ProcessError("Channels must be setup first using the "
"set_channels method")
logger.debug("Setting context for template {}: {}".format(
self.template, self._context
))
x = self.render(self._template_path, self._context)
return x | python | {
"resource": ""
} |
q272702 | Process.set_channels | test | def set_channels(self, **kwargs):
""" General purpose method that sets the main channels
This method will take a variable number of keyword arguments to
set the :py:attr:`Process._context` attribute with the information
on the main channels for the process. This is done by appending
the process ID (:py:attr:`Process.pid`) attribute to the input,
output and status channel prefix strings. In the output channel,
the process ID is incremented by 1 to allow the connection with the
channel in the next process.
The ``**kwargs`` system for setting the :py:attr:`Process._context`
attribute also provides additional flexibility. In this way,
individual processes can provide additional information not covered
in this method, without changing it.
Parameters
----------
kwargs : dict
Dictionary with the keyword arguments for setting up the template
context
"""
if not self.pid:
self.pid = "{}_{}".format(self.lane, kwargs.get("pid"))
for i in self.status_channels:
if i.startswith("STATUS_"):
self.status_strs.append("{}_{}".format(i, self.pid))
else:
self.status_strs.append("STATUS_{}_{}".format(i, self.pid))
if self.main_forks:
logger.debug("Setting main fork channels: {}".format(
self.main_forks))
operator = "set" if len(self.main_forks) == 1 else "into"
self.forks = ["\n{}.{}{{ {} }}\n".format(
self.output_channel, operator, ";".join(self.main_forks))]
self._context = {**kwargs, **{"input_channel": self.input_channel,
"output_channel": self.output_channel,
"template": self.template,
"forks": "\n".join(self.forks),
"pid": self.pid}} | python | {
"resource": ""
} |
q272703 | Process.update_main_forks | test | def update_main_forks(self, sink):
"""Updates the forks attribute with the sink channel destination
Parameters
----------
sink : str
Channel onto which the main input will be forked to
"""
if not self.main_forks:
self.main_forks = [self.output_channel]
self.output_channel = "_{}".format(self.output_channel)
self.main_forks.append(sink)
# fork_lst = self.forks + self.main_forks
operator = "set" if len(self.main_forks) == 1 else "into"
self.forks = ["\n{}.{}{{ {} }}\n".format(
self.output_channel, operator, ";".join(self.main_forks))]
self._context = {**self._context,
**{"forks": "".join(self.forks),
"output_channel": self.output_channel}} | python | {
"resource": ""
} |
q272704 | Process.set_secondary_channel | test | def set_secondary_channel(self, source, channel_list):
""" General purpose method for setting a secondary channel
This method allows a given source channel to be forked into one or
more channels and sets those forks in the :py:attr:`Process.forks`
attribute. Both the source and the channels in the ``channel_list``
argument must be the final channel strings, which means that this
method should be called only after setting the main channels.
If the source is not a main channel, this will simply create a fork
or set for every channel in the ``channel_list`` argument list::
SOURCE_CHANNEL_1.into{SINK_1;SINK_2}
If the source is a main channel, this will apply some changes to
the output channel of the process, to avoid overlapping main output
channels. For instance, forking the main output channel for process
2 would create a ``MAIN_2.into{...}``. The issue here is that the
``MAIN_2`` channel is expected as the input of the next process, but
now is being used to create the fork. To solve this issue, the output
channel is modified into ``_MAIN_2``, and the fork is set to
the channels provided channels plus the ``MAIN_2`` channel::
_MAIN_2.into{MAIN_2;MAIN_5;...}
Parameters
----------
source : str
String with the name of the source channel
channel_list : list
List of channels that will receive a fork of the secondary
channel
"""
logger.debug("Setting secondary channel for source '{}': {}".format(
source, channel_list))
source = "{}_{}".format(source, self.pid)
# Removes possible duplicate channels, when the fork is terminal
channel_list = sorted(list(set(channel_list)))
# When there is only one channel to fork into, use the 'set' operator
# instead of 'into'
op = "set" if len(channel_list) == 1 else "into"
self.forks.append("\n{}.{}{{ {} }}\n".format(
source, op, ";".join(channel_list)))
logger.debug("Setting forks attribute to: {}".format(self.forks))
self._context = {**self._context, **{"forks": "\n".join(self.forks)}} | python | {
"resource": ""
} |
q272705 | Process.update_attributes | test | def update_attributes(self, attr_dict):
"""Updates the directives attribute from a dictionary object.
This will only update the directives for processes that have been
defined in the subclass.
Parameters
----------
attr_dict : dict
Dictionary containing the attributes that will be used to update
the process attributes and/or directives.
"""
# Update directives
# Allowed attributes to write
valid_directives = ["pid", "ignore_type", "ignore_pid", "extra_input",
"group", "input_type"]
for attribute, val in attr_dict.items():
# If the attribute has a valid directive key, update that
# directive
if attribute in valid_directives and hasattr(self, attribute):
setattr(self, attribute, val)
# The params attribute is special, in the sense that it provides
# information for the self.params attribute.
elif attribute == "params":
for name, value in val.items():
if name in self.params:
self.params[name]["default"] = value
else:
raise eh.ProcessError(
"The parameter name '{}' does not exist for "
"component '{}'".format(name, self.template))
else:
for p in self.directives:
self.directives[p][attribute] = val | python | {
"resource": ""
} |
q272706 | Compiler.set_compiler_channels | test | def set_compiler_channels(self, channel_list, operator="mix"):
"""General method for setting the input channels for the status process
Given a list of status channels that are gathered during the pipeline
construction, this method will automatically set the input channel
for the status process. This makes use of the ``mix`` channel operator
of nextflow for multiple channels::
STATUS_1.mix(STATUS_2,STATUS_3,...)
This will set the ``status_channels`` key for the ``_context``
attribute of the process.
Parameters
----------
channel_list : list
List of strings with the final name of the status channels
operator : str
Specifies the operator used to join the compiler channels.
Available options are 'mix'and 'join'.
"""
if not channel_list:
raise eh.ProcessError("At least one status channel must be "
"provided to include this process in the "
"pipeline")
if len(channel_list) == 1:
logger.debug("Setting only one status channel: {}".format(
channel_list[0]))
self._context = {"compile_channels": channel_list[0]}
else:
first_status = channel_list[0]
if operator == "mix":
lst = ",".join(channel_list[1:])
s = "{}.mix({})".format(first_status, lst)
elif operator == "join":
s = first_status
for ch in channel_list[1:]:
s += ".join({})".format(ch)
s += ".map{ ot -> [ ot[0], ot[1..-1] ] }"
logger.debug("Status channel string: {}".format(s))
self._context = {"compile_channels": s} | python | {
"resource": ""
} |
q272707 | Init.set_raw_inputs | test | def set_raw_inputs(self, raw_input):
"""Sets the main input channels of the pipeline and their forks.
The ``raw_input`` dictionary input should contain one entry for each
input type (fastq, fasta, etc). The corresponding value should be a
dictionary/json with the following key:values:
- ``channel``: Name of the raw input channel (e.g.: channel1)
- ``channel_str``: The nextflow definition of the channel and
eventual checks (e.g.: channel1 = Channel.fromPath(param))
- ``raw_forks``: A list of channels to which the channel name will
for to.
Each new type of input parameter is automatically added to the
:attr:`params` attribute, so that they are automatically collected
for the pipeline description and help.
Parameters
----------
raw_input : dict
Contains an entry for each input type with the channel name,
channel string and forks.
"""
logger.debug("Setting raw inputs using raw input dict: {}".format(
raw_input))
primary_inputs = []
for input_type, el in raw_input.items():
primary_inputs.append(el["channel_str"])
# Update the process' parameters with the raw input
raw_channel = self.RAW_MAPPING[input_type]
self.params[input_type] = {
"default": raw_channel["default_value"],
"description": raw_channel["description"]
}
op = "set" if len(el["raw_forks"]) == 1 else "into"
self.forks.append("\n{}.{}{{ {} }}\n".format(
el["channel"], op, ";".join(el["raw_forks"])
))
logger.debug("Setting raw inputs: {}".format(primary_inputs))
logger.debug("Setting forks attribute to: {}".format(self.forks))
self._context = {**self._context,
**{"forks": "\n".join(self.forks),
"main_inputs": "\n".join(primary_inputs)}} | python | {
"resource": ""
} |
q272708 | Init.set_secondary_inputs | test | def set_secondary_inputs(self, channel_dict):
""" Adds secondary inputs to the start of the pipeline.
This channels are inserted into the pipeline file as they are
provided in the values of the argument.
Parameters
----------
channel_dict : dict
Each entry should be <parameter>: <channel string>.
"""
logger.debug("Setting secondary inputs: {}".format(channel_dict))
secondary_input_str = "\n".join(list(channel_dict.values()))
self._context = {**self._context,
**{"secondary_inputs": secondary_input_str}} | python | {
"resource": ""
} |
q272709 | Init.set_extra_inputs | test | def set_extra_inputs(self, channel_dict):
"""Sets the initial definition of the extra input channels.
The ``channel_dict`` argument should contain the input type and
destination channel of each parameter (which is the key)::
channel_dict = {
"param1": {
"input_type": "fasta"
"channels": ["abricate_2_3", "chewbbaca_3_4"]
}
}
Parameters
----------
channel_dict : dict
Dictionary with the extra_input parameter as key, and a dictionary
as a value with the input_type and destination channels
"""
extra_inputs = []
for param, info in channel_dict.items():
# Update the process' parameters with the raw input
raw_channel = self.RAW_MAPPING[info["input_type"]]
self.params[param] = {
"default": raw_channel["default_value"],
"description": raw_channel["description"]
}
channel_name = "IN_{}_extraInput".format(param)
channel_str = self.RAW_MAPPING[info["input_type"]]["channel_str"]
extra_inputs.append("{} = {}".format(channel_name,
channel_str.format(param)))
op = "set" if len(info["channels"]) == 1 else "into"
extra_inputs.append("{}.{}{{ {} }}".format(
channel_name, op, ";".join(info["channels"])))
self._context = {
**self._context,
**{"extra_inputs": "\n".join(extra_inputs)}
} | python | {
"resource": ""
} |
q272710 | Assembly._parse_coverage | test | def _parse_coverage(header_str):
"""Attempts to retrieve the coverage value from the header string.
It splits the header by "_" and then screens the list backwards in
search of the first float value. This will be interpreted as the
coverage value. If it cannot find a float value, it returns None.
This search methodology is based on the strings of assemblers
like spades and skesa that put the mean kmer coverage for each
contig in its corresponding fasta header.
Parameters
----------
header_str : str
String
Returns
-------
float or None
The coverage value for the contig. None if it cannot find the
value in the provide string.
"""
cov = None
for i in header_str.split("_")[::-1]:
try:
cov = float(i)
break
except ValueError:
continue
return cov | python | {
"resource": ""
} |
q272711 | Assembly._parse_assembly | test | def _parse_assembly(self, assembly_file):
"""Parse an assembly fasta file.
This is a Fasta parsing method that populates the
:py:attr:`~Assembly.contigs` attribute with data for each contig in the
assembly.
The insertion of data on the self.contigs is done by the
:py:meth:`Assembly._populate_contigs` method, which also calculates
GC content and proportions.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
"""
# Temporary storage of sequence data
seq_temp = []
# Id counter for contig that will serve as key in self.contigs
contig_id = 0
# Initialize kmer coverage and header
cov, header = None, None
with open(assembly_file) as fh:
logger.debug("Starting iteration of assembly file: {}".format(
assembly_file))
for line in fh:
# Skip empty lines
if not line.strip():
continue
else:
# Remove whitespace surrounding line for further processing
line = line.strip()
if line.startswith(">"):
# If a sequence has already been populated, save the
# previous contig information
if seq_temp:
# Use join() to convert string list into the full
# contig string. This is generally much more efficient
# than successively concatenating strings.
seq = "".join(seq_temp)
logger.debug("Populating contig with contig_id '{}', "
"header '{}' and cov '{}'".format(
contig_id, header, cov))
self._populate_contigs(contig_id, header, cov, seq)
# Reset temporary sequence storage
seq_temp = []
contig_id += 1
header = line[1:]
cov = self._parse_coverage(line)
else:
seq_temp.append(line)
# Populate last contig entry
logger.debug("Populating contig with contig_id '{}', "
"header '{}' and cov '{}'".format(
contig_id, header, cov))
seq = "".join(seq_temp)
self._populate_contigs(contig_id, header, cov, seq) | python | {
"resource": ""
} |
q272712 | Assembly._get_gc_content | test | def _get_gc_content(sequence, length):
"""Get GC content and proportions.
Parameters
----------
sequence : str
The complete sequence of the contig.
length : int
The length of the sequence contig.
Returns
-------
x : dict
Dictionary with the at/gc/n counts and proportions
"""
# Get AT/GC/N counts
at = sum(map(sequence.count, ["A", "T"]))
gc = sum(map(sequence.count, ["G", "C"]))
n = length - (at + gc)
# Get AT/GC/N proportions
at_prop = at / length
gc_prop = gc / length
n_prop = n / length
return {"at": at, "gc": gc, "n": n,
"at_prop": at_prop, "gc_prop": gc_prop, "n_prop": n_prop} | python | {
"resource": ""
} |
q272713 | Assembly.filter_contigs | test | def filter_contigs(self, *comparisons):
"""Filters the contigs of the assembly according to user provided\
comparisons.
The comparisons must be a list of three elements with the
:py:attr:`~Assembly.contigs` key, operator and test value. For
example, to filter contigs with a minimum length of 250, a comparison
would be::
self.filter_contigs(["length", ">=", 250])
The filtered contig ids will be stored in the
:py:attr:`~Assembly.filtered_ids` list.
The result of the test for all contigs will be stored in the
:py:attr:`~Assembly.report` dictionary.
Parameters
----------
comparisons : list
List with contig key, operator and value to test.
"""
# Reset list of filtered ids
self.filtered_ids = []
self.report = {}
gc_filters = [
["gc_prop", ">=", self.min_gc],
["gc_prop", "<=", 1 - self.min_gc]
]
self.filters = list(comparisons) + gc_filters
logger.debug("Filtering contigs using filters: {}".format(
self.filters))
for contig_id, contig in self.contigs.items():
for key, op, value in list(comparisons) + gc_filters:
if not self._test_truth(contig[key], op, value):
self.filtered_ids.append(contig_id)
self.report[contig_id] = "{}/{}/{}".format(key,
contig[key],
value)
break
else:
self.report[contig_id] = "pass" | python | {
"resource": ""
} |
q272714 | Assembly.get_assembly_length | test | def get_assembly_length(self):
"""Returns the length of the assembly, without the filtered contigs.
Returns
-------
x : int
Total length of the assembly.
"""
return sum(
[vals["length"] for contig_id, vals in self.contigs.items()
if contig_id not in self.filtered_ids]) | python | {
"resource": ""
} |
q272715 | Assembly.write_assembly | test | def write_assembly(self, output_file, filtered=True):
"""Writes the assembly to a new file.
The ``filtered`` option controls whether the new assembly will be
filtered or not.
Parameters
----------
output_file : str
Name of the output assembly file.
filtered : bool
If ``True``, does not include filtered ids.
"""
logger.debug("Writing the filtered assembly into: {}".format(
output_file))
with open(output_file, "w") as fh:
for contig_id, contig in self.contigs.items():
if contig_id not in self.filtered_ids and filtered:
fh.write(">{}_{}\\n{}\\n".format(self.sample,
contig["header"],
contig["sequence"])) | python | {
"resource": ""
} |
q272716 | Assembly.write_report | test | def write_report(self, output_file):
"""Writes a report with the test results for the current assembly
Parameters
----------
output_file : str
Name of the output assembly file.
"""
logger.debug("Writing the assembly report into: {}".format(
output_file))
with open(output_file, "w") as fh:
for contig_id, vals in self.report.items():
fh.write("{}, {}\\n".format(contig_id, vals)) | python | {
"resource": ""
} |
q272717 | remove_inner_forks | test | def remove_inner_forks(text):
"""Recursively removes nested brackets
This function is used to remove nested brackets from fork strings using
regular expressions
Parameters
----------
text: str
The string that contains brackets with inner forks to be removed
Returns
-------
text: str
the string with only the processes that are not in inner forks, thus
the processes that belong to a given fork.
"""
n = 1 # run at least once for one level of fork
# Then this loop assures that all brackets will get removed in a nested
# structure
while n:
# this removes non-nested brackets
text, n = re.subn(r'\([^()]*\)', '', text)
return text | python | {
"resource": ""
} |
q272718 | inner_fork_insanity_checks | test | def inner_fork_insanity_checks(pipeline_string):
"""
This function performs two sanity checks in the pipeline string. The first
check, assures that each fork contains a lane token '|', while the second
check looks for duplicated processes within the same fork.
Parameters
----------
pipeline_string: str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
"""
# first lets get all forks to a list.
list_of_forks = [] # stores forks
left_indexes = [] # stores indexes of left brackets
# iterate through the string looking for '(' and ')'.
for pos, char in enumerate(pipeline_string):
if char == FORK_TOKEN:
# saves pos to left_indexes list
left_indexes.append(pos)
elif char == CLOSE_TOKEN and len(left_indexes) > 0:
# saves fork to list_of_forks
list_of_forks.append(pipeline_string[left_indexes[-1] + 1: pos])
# removes last bracket from left_indexes list
left_indexes = left_indexes[:-1]
# sort list in descending order of number of forks
list_of_forks.sort(key=lambda x: x.count(FORK_TOKEN), reverse=True)
# Now, we can iterate through list_of_forks and check for errors in each
# fork
for fork in list_of_forks:
# remove inner forks for these checks since each fork has its own entry
# in list_of_forks. Note that each fork is now sorted in descending
# order which enables to remove sequentially the string for the fork
# potentially with more inner forks
for subfork in list_of_forks:
# checks if subfork is contained in fork and if they are different,
# avoiding to remove itself
if subfork in list_of_forks and subfork != fork:
# removes inner forks. Note that string has no spaces
fork_simplified = fork.replace("({})".format(subfork), "")
else:
fork_simplified = fork
# Checks if there is no fork separator character '|' within each fork
if not len(fork_simplified.split(LANE_TOKEN)) > 1:
raise SanityError("One of the forks doesn't have '|' "
"separator between the processes to fork. This is"
" the prime suspect: '({})'".format(fork)) | python | {
"resource": ""
} |
q272719 | insanity_checks | test | def insanity_checks(pipeline_str):
"""Wrapper that performs all sanity checks on the pipeline string
Parameters
----------
pipeline_str : str
String with the pipeline definition
"""
# Gets rid of all spaces in string
p_string = pipeline_str.replace(" ", "").strip()
# some of the check functions use the pipeline_str as the user provided but
# the majority uses the parsed p_string.
checks = [
[p_string, [
empty_tasks,
brackets_but_no_lanes,
brackets_insanity_check,
lane_char_insanity_check,
final_char_insanity_check,
fork_procs_insanity_check,
start_proc_insanity_check,
late_proc_insanity_check
]],
[pipeline_str, [
inner_fork_insanity_checks
]]
]
# executes sanity checks in pipeline string before parsing it.
for param, func_list in checks:
for func in func_list:
func(param) | python | {
"resource": ""
} |
q272720 | parse_pipeline | test | def parse_pipeline(pipeline_str):
"""Parses a pipeline string into a list of dictionaries with the connections
between processes
Parameters
----------
pipeline_str : str
String with the definition of the pipeline, e.g.::
'processA processB processC(ProcessD | ProcessE)'
Returns
-------
pipeline_links : list
"""
if os.path.exists(pipeline_str):
logger.debug("Found pipeline file: {}".format(pipeline_str))
with open(pipeline_str) as fh:
pipeline_str = "".join([x.strip() for x in fh.readlines()])
logger.info(colored_print("Resulting pipeline string:\n"))
logger.info(colored_print(pipeline_str + "\n"))
# Perform pipeline insanity checks
insanity_checks(pipeline_str)
logger.debug("Parsing pipeline string: {}".format(pipeline_str))
pipeline_links = []
lane = 1
# Add unique identifiers to each process to allow a correct connection
# between forks with same processes
pipeline_str_modified, identifiers_to_tags = add_unique_identifiers(
pipeline_str)
# Get number of forks in the pipeline
nforks = pipeline_str_modified.count(FORK_TOKEN)
logger.debug("Found {} fork(s)".format(nforks))
# If there are no forks, connect the pipeline as purely linear
if not nforks:
logger.debug("Detected linear pipeline string : {}".format(
pipeline_str))
linear_pipeline = ["__init__"] + pipeline_str_modified.split()
pipeline_links.extend(linear_connection(linear_pipeline, lane))
# Removes unique identifiers used for correctly assign fork parents with
# a possible same process name
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links
for i in range(nforks):
logger.debug("Processing fork {} in lane {}".format(i, lane))
# Split the pipeline at each fork start position. fields[-1] will
# hold the process after the fork. fields[-2] will hold the processes
# before the fork.
fields = pipeline_str_modified.split(FORK_TOKEN, i + 1)
# Get the processes before the fork. This may be empty when the
# fork is at the beginning of the pipeline.
previous_process = fields[-2].split(LANE_TOKEN)[-1].split()
logger.debug("Previous processes string: {}".format(fields[-2]))
logger.debug("Previous processes list: {}".format(previous_process))
# Get lanes after the fork
next_lanes = get_lanes(fields[-1])
logger.debug("Next lanes object: {}".format(next_lanes))
# Get the immediate targets of the fork
fork_sink = [x[0] for x in next_lanes]
logger.debug("The fork sinks into the processes: {}".format(fork_sink))
# The first fork is a special case, where the processes before AND
# after the fork (until the start of another fork) are added to
# the ``pipeline_links`` variable. Otherwise, only the processes
# after the fork will be added
if i == 0:
# If there are no previous process, the fork is at the beginning
# of the pipeline string. In this case, inject the special
# "init" process.
if not previous_process:
previous_process = ["__init__"]
lane = 0
else:
previous_process = ["__init__"] + previous_process
# Add the linear modules before the fork
pipeline_links.extend(
linear_connection(previous_process, lane))
fork_source = previous_process[-1]
logger.debug("Fork source is set to: {}".format(fork_source))
fork_lane = get_source_lane(previous_process, pipeline_links)
logger.debug("Fork lane is set to: {}".format(fork_lane))
# Add the forking modules
pipeline_links.extend(
fork_connection(fork_source, fork_sink, fork_lane, lane))
# Add the linear connections in the subsequent lanes
pipeline_links.extend(
linear_lane_connection(next_lanes, lane))
lane += len(fork_sink)
pipeline_links = remove_unique_identifiers(identifiers_to_tags,
pipeline_links)
return pipeline_links | python | {
"resource": ""
} |
q272721 | get_source_lane | test | def get_source_lane(fork_process, pipeline_list):
"""Returns the lane of the last process that matches fork_process
Parameters
----------
fork_process : list
List of processes before the fork.
pipeline_list : list
List with the pipeline connection dictionaries.
Returns
-------
int
Lane of the last process that matches fork_process
"""
fork_source = fork_process[-1]
fork_sig = [x for x in fork_process if x != "__init__"]
for position, p in enumerate(pipeline_list[::-1]):
if p["output"]["process"] == fork_source:
lane = p["output"]["lane"]
logger.debug("Possible source match found in position {} in lane"
" {}".format(position, lane))
lane_sequence = [x["output"]["process"] for x in pipeline_list
if x["output"]["lane"] == lane]
logger.debug("Testing lane sequence '{}' against fork signature"
" '{}'".format(lane_sequence, fork_sig))
if lane_sequence == fork_sig:
return p["output"]["lane"]
return 0 | python | {
"resource": ""
} |
q272722 | get_lanes | test | def get_lanes(lanes_str):
"""From a raw pipeline string, get a list of lanes from the start
of the current fork.
When the pipeline is being parsed, it will be split at every fork
position. The string at the right of the fork position will be provided
to this function. It's job is to retrieve the lanes that result
from that fork, ignoring any nested forks.
Parameters
----------
lanes_str : str
Pipeline string after a fork split
Returns
-------
lanes : list
List of lists, with the list of processes for each lane
"""
logger.debug("Parsing lanes from raw string: {}".format(lanes_str))
# Temporarily stores the lanes string after removal of nested forks
parsed_lanes = ""
# Flag used to determined whether the cursor is inside or outside the
# right fork
infork = 0
for i in lanes_str:
# Nested fork started
if i == FORK_TOKEN:
infork += 1
# Nested fork stopped
if i == CLOSE_TOKEN:
infork -= 1
if infork < 0:
break
# Save only when in the right fork
if infork == 0:
# Ignore forking syntax tokens
if i not in [FORK_TOKEN, CLOSE_TOKEN]:
parsed_lanes += i
return [x.split() for x in parsed_lanes.split(LANE_TOKEN)] | python | {
"resource": ""
} |
q272723 | linear_connection | test | def linear_connection(plist, lane):
"""Connects a linear list of processes into a list of dictionaries
Parameters
----------
plist : list
List with process names. This list should contain at least two entries.
lane : int
Corresponding lane of the processes
Returns
-------
res : list
List of dictionaries with the links between processes
"""
logger.debug(
"Establishing linear connection with processes: {}".format(plist))
res = []
previous = None
for p in plist:
# Skip first process
if not previous:
previous = p
continue
res.append({
"input": {
"process": previous,
"lane": lane
},
"output": {
"process": p,
"lane": lane
}
})
previous = p
return res | python | {
"resource": ""
} |
q272724 | fork_connection | test | def fork_connection(source, sink, source_lane, lane):
"""Makes the connection between a process and the first processes in the
lanes to which it forks.
The ``lane`` argument should correspond to the lane of the source process.
For each lane in ``sink``, the lane counter will increase.
Parameters
----------
source : str
Name of the process that is forking
sink : list
List of the processes where the source will fork to. Each element
corresponds to the start of a lane.
source_lane : int
Lane of the forking process
lane : int
Lane of the source process
Returns
-------
res : list
List of dictionaries with the links between processes
"""
logger.debug("Establishing forking of source '{}' into processes"
" '{}'. Source lane set to '{}' and lane set to '{}'".format(
source, sink, source_lane, lane))
res = []
# Increase the lane counter for the first lane
lane_counter = lane + 1
for p in sink:
res.append({
"input": {
"process": source,
"lane": source_lane
},
"output": {
"process": p,
"lane": lane_counter
}
})
lane_counter += 1
return res | python | {
"resource": ""
} |
q272725 | add_unique_identifiers | test | def add_unique_identifiers(pipeline_str):
"""Returns the pipeline string with unique identifiers and a dictionary with
references between the unique keys and the original values
Parameters
----------
pipeline_str : str
Pipeline string
Returns
-------
str
Pipeline string with unique identifiers
dict
Match between process unique values and original names
"""
# Add space at beginning and end of pipeline to allow regex mapping of final
# process in linear pipelines
pipeline_str_modified = " {} ".format(pipeline_str)
# Regex to get all process names. Catch all words without spaces and that
# are not fork tokens or pipes
reg_find_proc = r"[^\s{}{}{}]+".format(LANE_TOKEN, FORK_TOKEN, CLOSE_TOKEN)
process_names = re.findall(reg_find_proc, pipeline_str_modified)
identifiers_to_tags = {}
"""
dict: Matches new process names (identifiers) with original process
names
"""
new_process_names = []
"""
list: New process names used to replace in the pipeline string
"""
# Assigns the new process names by appending a numeric id at the end of
# the process name
for index, val in enumerate(process_names):
if "=" in val:
parts = val.split("=")
new_id = "{}_{}={}".format(parts[0], index, parts[1])
else:
new_id = "{}_{}".format(val, index)
# add new process with id
new_process_names.append(new_id)
# makes a match between new process name and original process name
identifiers_to_tags[new_id] = val
# Add space between forks, pipes and the process names for the replace
# regex to work
match_result = lambda match: " {} ".format(match.group())
# force to add a space between each token so that regex modification can
# be applied
find = r'[{}{}{}]+'.format(FORK_TOKEN, LANE_TOKEN, CLOSE_TOKEN)
pipeline_str_modified = re.sub(find, match_result, pipeline_str_modified)
# Replace original process names by the unique identifiers
for index, val in enumerate(process_names):
# regex to replace process names with non assigned process ids
# escape characters are required to match to the dict keys
# (identifiers_to_tags), since python keys with escape characters
# must be escaped
find = r'{}[^_]'.format(val).replace("\\", "\\\\")
pipeline_str_modified = re.sub(find, new_process_names[index] + " ",
pipeline_str_modified, 1)
return pipeline_str_modified, identifiers_to_tags | python | {
"resource": ""
} |
q272726 | remove_unique_identifiers | test | def remove_unique_identifiers(identifiers_to_tags, pipeline_links):
"""Removes unique identifiers and add the original process names to the
already parsed pipelines
Parameters
----------
identifiers_to_tags : dict
Match between unique process identifiers and process names
pipeline_links: list
Parsed pipeline list with unique identifiers
Returns
-------
list
Pipeline list with original identifiers
"""
# Replaces the unique identifiers by the original process names
for index, val in enumerate(pipeline_links):
if val["input"]["process"] != "__init__":
val["input"]["process"] = identifiers_to_tags[
val["input"]["process"]]
if val["output"]["process"] != "__init__":
val["output"]["process"] = identifiers_to_tags[
val["output"]["process"]]
return pipeline_links | python | {
"resource": ""
} |
q272727 | NextflowInspector._check_required_files | test | def _check_required_files(self):
"""Checks whetner the trace and log files are available
"""
if not os.path.exists(self.trace_file):
raise eh.InspectionError("The provided trace file could not be "
"opened: {}".format(self.trace_file))
if not os.path.exists(self.log_file):
raise eh.InspectionError("The .nextflow.log files could not be "
"opened. Are you sure you are in a "
"nextflow project directory?") | python | {
"resource": ""
} |
q272728 | NextflowInspector._header_mapping | test | def _header_mapping(header):
"""Parses the trace file header and retrieves the positions of each
column key.
Parameters
----------
header : str
The header line of nextflow's trace file
Returns
-------
dict
Mapping the column ID to its position (e.g.: {"tag":2})
"""
return dict(
(x.strip(), pos) for pos, x in enumerate(header.split("\t"))
) | python | {
"resource": ""
} |
q272729 | NextflowInspector._hms | test | def _hms(s):
"""Converts a hms string into seconds.
Parameters
----------
s : str
The hms string can be something like '20s', '1m30s' or '300ms'.
Returns
-------
float
Time in seconds.
"""
if s == "-":
return 0
if s.endswith("ms"):
return float(s.rstrip("ms")) / 1000
fields = list(map(float, re.split("[dhms]", s)[:-1]))
if len(fields) == 4:
return fields[0] * 24 * 3600 + fields[1] * 3600 + fields[2] * 60 +\
fields[3]
if len(fields) == 3:
return fields[0] * 3600 + fields[1] * 60 + fields[2]
elif len(fields) == 2:
return fields[0] * 60 + fields[1]
else:
return fields[0] | python | {
"resource": ""
} |
q272730 | NextflowInspector._size_coverter | test | def _size_coverter(s):
"""Converts size string into megabytes
Parameters
----------
s : str
The size string can be '30KB', '20MB' or '1GB'
Returns
-------
float
With the size in bytes
"""
if s.upper().endswith("KB"):
return float(s.rstrip("KB")) / 1024
elif s.upper().endswith(" B"):
return float(s.rstrip("B")) / 1024 / 1024
elif s.upper().endswith("MB"):
return float(s.rstrip("MB"))
elif s.upper().endswith("GB"):
return float(s.rstrip("GB")) * 1024
elif s.upper().endswith("TB"):
return float(s.rstrip("TB")) * 1024 * 1024
else:
return float(s) | python | {
"resource": ""
} |
q272731 | NextflowInspector._get_pipeline_processes | test | def _get_pipeline_processes(self):
"""Parses the .nextflow.log file and retrieves the complete list
of processes
This method searches for specific signatures at the beginning of the
.nextflow.log file::
Apr-19 19:07:32.660 [main] DEBUG nextflow.processor
TaskProcessor - Creating operator > report_corrupt_1_1 --
maxForks: 4
When a line with the .*Creating operator.* signature is found, the
process name is retrieved and populates the :attr:`processes` attribute
"""
with open(self.log_file) as fh:
for line in fh:
if re.match(".*Creating operator.*", line):
# Retrieves the process name from the string
match = re.match(".*Creating operator > (.*) --", line)
process = match.group(1)
if any([process.startswith(x) for x in self._blacklist]):
continue
if process not in self.skip_processes:
self.processes[match.group(1)] = {
"barrier": "W",
"submitted": set(),
"finished": set(),
"failed": set(),
"retry": set(),
"cpus": None,
"memory": None
}
self.process_tags[process] = {}
# Retrieves the pipeline name from the string
if re.match(".*Launching `.*` \[.*\] ", line):
tag_match = re.match(".*Launching `.*` \[(.*)\] ", line)
self.pipeline_tag = tag_match.group(1) if tag_match else \
"?"
name_match = re.match(".*Launching `(.*)` \[.*\] ", line)
self.pipeline_name = name_match.group(1) if name_match \
else "?"
self.content_lines = len(self.processes) | python | {
"resource": ""
} |
q272732 | NextflowInspector._clear_inspect | test | def _clear_inspect(self):
"""Clears inspect attributes when re-executing a pipeline"""
self.trace_info = defaultdict(list)
self.process_tags = {}
self.process_stats = {}
self.samples = []
self.stored_ids = []
self.stored_log_ids = []
self.time_start = None
self.time_stop = None
self.execution_command = None
self.nextflow_version = None
self.abort_cause = None
self._c = 0
# Clean up of tag running status
for p in self.processes.values():
p["barrier"] = "W"
for i in ["submitted", "finished", "failed", "retry"]:
p[i] = set() | python | {
"resource": ""
} |
q272733 | NextflowInspector._update_barrier_status | test | def _update_barrier_status(self):
"""Checks whether the channels to each process have been closed.
"""
with open(self.log_file) as fh:
for line in fh:
# Exit barrier update after session abort signal
if "Session aborted" in line:
return
if "<<< barrier arrive" in line:
# Retrieve process name from string
process_m = re.match(".*process: (.*)\)", line)
if process_m:
process = process_m.group(1)
# Updates process channel to complete
if process in self.processes:
self.processes[process]["barrier"] = "C" | python | {
"resource": ""
} |
q272734 | NextflowInspector._retrieve_log | test | def _retrieve_log(path):
"""Method used to retrieve the contents of a log file into a list.
Parameters
----------
path
Returns
-------
list or None
Contents of the provided file, each line as a list entry
"""
if not os.path.exists(path):
return None
with open(path) as fh:
return fh.readlines() | python | {
"resource": ""
} |
q272735 | NextflowInspector._assess_resource_warnings | test | def _assess_resource_warnings(self, process, vals):
"""Assess whether the cpu load or memory usage is above the allocation
Parameters
----------
process : str
Process name
vals : vals
List of trace information for each tag of that process
Returns
-------
cpu_warnings : dict
Keys are tags and values are the excessive cpu load
mem_warnings : dict
Keys are tags and values are the excessive rss
"""
cpu_warnings = {}
mem_warnings = {}
for i in vals:
try:
expected_load = float(i["cpus"]) * 100
cpu_load = float(i["%cpu"].replace(",", ".").replace("%", ""))
if expected_load * 0.9 > cpu_load > expected_load * 1.10:
cpu_warnings[i["tag"]] = {
"expected": expected_load,
"value": cpu_load
}
except (ValueError, KeyError):
pass
try:
rss = self._size_coverter(i["rss"])
mem_allocated = self._size_coverter(i["memory"])
if rss > mem_allocated * 1.10:
mem_warnings[i["tag"]] = {
"expected": mem_allocated,
"value": rss
}
except (ValueError, KeyError):
pass
return cpu_warnings, mem_warnings | python | {
"resource": ""
} |
q272736 | NextflowInspector._update_process_stats | test | def _update_process_stats(self):
"""Updates the process stats with the information from the processes
This method is called at the end of each static parsing of the nextflow
trace file. It re-populates the :attr:`process_stats` dictionary
with the new stat metrics.
"""
good_status = ["COMPLETED", "CACHED"]
for process, vals in self.trace_info.items():
# Update submission status of tags for each process
vals = self._update_tag_status(process, vals)
# Update process resources
self._update_process_resources(process, vals)
self.process_stats[process] = {}
inst = self.process_stats[process]
# Get number of completed samples
inst["completed"] = "{}".format(
len([x for x in vals if x["status"] in good_status]))
# Get average time
try:
time_array = [self._hms(x["realtime"]) for x in vals]
mean_time = round(sum(time_array) / len(time_array), 1)
mean_time_str = strftime('%H:%M:%S', gmtime(mean_time))
inst["realtime"] = mean_time_str
# When the realtime column is not present
except KeyError:
inst["realtime"] = "-"
# Get cumulative cpu/hours
try:
cpu_hours = [self._cpu_load_parser(
x["cpus"], x["%cpu"], x["realtime"]) for x in vals]
inst["cpuhour"] = round(sum(cpu_hours), 2)
# When the realtime, cpus or %cpus column are not present
except KeyError:
inst["cpuhour"] = "-"
# Assess resource warnings
inst["cpu_warnings"], inst["mem_warnings"] = \
self._assess_resource_warnings(process, vals)
# Get maximum memory
try:
rss_values = [self._size_coverter(x["rss"]) for x in vals
if x["rss"] != "-"]
if rss_values:
max_rss = round(max(rss_values))
rss_str = self._size_compress(max_rss)
else:
rss_str = "-"
inst["maxmem"] = rss_str
except KeyError:
inst["maxmem"] = "-"
# Get read size
try:
rchar_values = [self._size_coverter(x["rchar"]) for x in vals
if x["rchar"] != "-"]
if rchar_values:
avg_rchar = round(sum(rchar_values) / len(rchar_values))
rchar_str = self._size_compress(avg_rchar)
else:
rchar_str = "-"
except KeyError:
rchar_str = "-"
inst["avgread"] = rchar_str
# Get write size
try:
wchar_values = [self._size_coverter(x["wchar"]) for x in vals
if x["wchar"] != "-"]
if wchar_values:
avg_wchar = round(sum(wchar_values) / len(wchar_values))
wchar_str = self._size_compress(avg_wchar)
else:
wchar_str = "-"
except KeyError:
wchar_str = "-"
inst["avgwrite"] = wchar_str | python | {
"resource": ""
} |
q272737 | NextflowInspector.log_parser | test | def log_parser(self):
"""Method that parses the nextflow log file once and updates the
submitted number of samples for each process
"""
# Check the timestamp of the log file. Only proceed with the parsing
# if it changed from the previous time.
size_stamp = os.path.getsize(self.log_file)
self.log_retry = 0
if size_stamp and size_stamp == self.log_sizestamp:
return
else:
logger.debug("Updating log size stamp to: {}".format(size_stamp))
self.log_sizestamp = size_stamp
# Regular expression to catch four groups:
# 1. Start timestamp
# 2. Work directory hash
# 3. Process name
# 4. Tag name
r = ".* (.*) \[.*\].*\[(.*)\].*process > (.*) \((.*)\).*"
with open(self.log_file) as fh:
for line in fh:
if "Submitted process >" in line or \
"Re-submitted process >" in line or \
"Cached process >" in line:
m = re.match(r, line)
if not m:
continue
time_start = m.group(1)
workdir = m.group(2)
process = m.group(3)
tag = m.group(4)
# Skip if this line has already been parsed
if time_start + tag not in self.stored_log_ids:
self.stored_log_ids.append(time_start + tag)
else:
continue
# For first time processes
if process not in self.processes:
continue
p = self.processes[process]
# Skip is process/tag combination has finished or is retrying
if tag in list(p["finished"]) + list(p["retry"]):
continue
# Update failed process/tags when they have been re-submitted
if tag in list(p["failed"]) and \
"Re-submitted process >" in line:
p["retry"].add(tag)
self.send = True
continue
# Set process barrier to running. Check for barrier status
# are performed at the end of the trace parsing in the
# _update_barrier_status method.
p["barrier"] = "R"
if tag not in p["submitted"]:
p["submitted"].add(tag)
# Update the process_tags attribute with the new tag.
# Update only when the tag does not exist. This may rarely
# occur when the tag is parsed first in the trace file
if tag not in self.process_tags[process]:
self.process_tags[process][tag] = {
"workdir": self._expand_path(workdir),
"start": time_start
}
self.send = True
# When the tag is filled in the trace file parsing,
# the timestamp may not be present in the trace. In
# those cases, fill that information here.
elif not self.process_tags[process][tag]["start"]:
self.process_tags[process][tag]["start"] = time_start
self.send = True
self._update_pipeline_status() | python | {
"resource": ""
} |
q272738 | NextflowInspector.update_inspection | test | def update_inspection(self):
"""Wrapper method that calls the appropriate main updating methods of
the inspection.
It is meant to be used inside a loop (like while), so that it can
continuously update the class attributes from the trace and log files.
It already implements checks to parse these files only when they
change, and they ignore entries that have been previously processes.
"""
try:
self.log_parser()
except (FileNotFoundError, StopIteration) as e:
logger.debug("ERROR: " + str(sys.exc_info()[0]))
self.log_retry += 1
if self.log_retry == self.MAX_RETRIES:
raise e
try:
self.trace_parser()
except (FileNotFoundError, StopIteration) as e:
logger.debug("ERROR: " + str(sys.exc_info()[0]))
self.trace_retry += 1
if self.trace_retry == self.MAX_RETRIES:
raise e | python | {
"resource": ""
} |
q272739 | NextflowInspector.display_overview | test | def display_overview(self):
"""Displays the default pipeline inspection overview
"""
stay_alive = True
self.screen = curses.initscr()
self.screen.keypad(True)
self.screen.nodelay(-1)
curses.cbreak()
curses.noecho()
curses.start_color()
self.screen_lines = self.screen.getmaxyx()[0]
# self.screen_width = self.screen.getmaxyx()[1]
try:
while stay_alive:
# Provide functionality to certain keybindings
self._curses_keybindings()
# Updates main inspector attributes
self.update_inspection()
# Display curses interface
self.flush_overview()
sleep(self.refresh_rate)
except FileNotFoundError:
sys.stderr.write(colored_print(
"ERROR: nextflow log and/or trace files are no longer "
"reachable!", "red_bold"))
except Exception as e:
sys.stderr.write(str(e))
finally:
curses.nocbreak()
self.screen.keypad(0)
curses.echo()
curses.endwin() | python | {
"resource": ""
} |
q272740 | NextflowInspector._updown | test | def _updown(self, direction):
"""Provides curses scroll functionality.
"""
if direction == "up" and self.top_line != 0:
self.top_line -= 1
elif direction == "down" and \
self.screen.getmaxyx()[0] + self.top_line\
<= self.content_lines + 3:
self.top_line += 1 | python | {
"resource": ""
} |
q272741 | NextflowInspector._rightleft | test | def _rightleft(self, direction):
"""Provides curses horizontal padding"""
if direction == "left" and self.padding != 0:
self.padding -= 1
if direction == "right" and \
self.screen.getmaxyx()[1] + self.padding < self.max_width:
self.padding += 1 | python | {
"resource": ""
} |
q272742 | NextflowInspector._get_log_lines | test | def _get_log_lines(self, n=300):
"""Returns a list with the last ``n`` lines of the nextflow log file
Parameters
----------
n : int
Number of last lines from the log file
Returns
-------
list
List of strings with the nextflow log
"""
with open(self.log_file) as fh:
last_lines = fh.readlines()[-n:]
return last_lines | python | {
"resource": ""
} |
q272743 | NextflowInspector._prepare_static_info | test | def _prepare_static_info(self):
"""Prepares the first batch of information, containing static
information such as the pipeline file, and configuration files
Returns
-------
dict
Dict with the static information for the first POST request
"""
pipeline_files = {}
with open(join(self.workdir, self.pipeline_name)) as fh:
pipeline_files["pipelineFile"] = fh.readlines()
nf_config = join(self.workdir, "nextflow.config")
if os.path.exists(nf_config):
with open(nf_config) as fh:
pipeline_files["configFile"] = fh.readlines()
# Check for specific flowcraft configurations files
configs = {
"params.config": "paramsFile",
"resources.config": "resourcesFile",
"containers.config": "containersFile",
"user.config": "userFile",
}
for config, key in configs.items():
cfile = join(self.workdir, config)
if os.path.exists(cfile):
with open(cfile) as fh:
pipeline_files[key] = fh.readlines()
return pipeline_files | python | {
"resource": ""
} |
q272744 | NextflowInspector._dag_file_to_dict | test | def _dag_file_to_dict(self):
"""Function that opens the dotfile named .treeDag.json in the current
working directory
Returns
-------
Returns a dictionary with the dag object to be used in the post
instance available through the method _establish_connection
"""
try:
dag_file = open(os.path.join(self.workdir, ".treeDag.json"))
dag_json = json.load(dag_file)
except (FileNotFoundError, json.decoder.JSONDecodeError):
logger.warning(colored_print(
"WARNING: dotfile named .treeDag.json not found or corrupted",
"red_bold"))
dag_json = {}
return dag_json | python | {
"resource": ""
} |
q272745 | NextflowInspector._get_run_hash | test | def _get_run_hash(self):
"""Gets the hash of the nextflow file"""
# Get name and path of the pipeline from the log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = self.workdir.encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest() | python | {
"resource": ""
} |
q272746 | get_nextflow_filepath | test | def get_nextflow_filepath(log_file):
"""Gets the nextflow file path from the nextflow log file. It searches for
the nextflow run command throughout the file.
Parameters
----------
log_file : str
Path for the .nextflow.log file
Returns
-------
str
Path for the nextflow file
"""
with open(log_file) as fh:
# Searches for the first occurence of the nextflow pipeline
# file name in the .nextflow.log file
while 1:
line = fh.readline()
if not line:
# file is empty
raise eh.LogError("Nextflow command path could not be found - Is "
".nextflow.log empty?")
try:
# Regex supports absolute paths and relative paths
pipeline_path = re.match(".*\s(.*.nf).*", line) \
.group(1)
return pipeline_path
except AttributeError:
continue | python | {
"resource": ""
} |
q272747 | main | test | def main(sample_id, assembly, min_size):
"""Main executor of the split_fasta template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly : list
Assembly file.
min_size : int
Minimum contig size.
"""
logger.info("Starting script")
f_open = open(assembly, "rU")
entry = (x[1] for x in groupby(f_open, lambda line: line[0] == ">"))
success = 0
for header in entry:
headerStr = header.__next__()[1:].strip()
seq = "".join(s.strip() for s in entry.__next__())
if len(seq) >= min_size:
with open(sample_id + '_' + headerStr.replace(" ","_").replace("=","_") + '.fasta', "w") as output_file:
output_file.write(">" + sample_id + "_" + headerStr.replace(" ","_").replace("=","_") + "\\n" + seq + "\\n")
success += 1
f_open.close()
logger.info("{} sequences sucessfully splitted.".format(success)) | python | {
"resource": ""
} |
q272748 | main | test | def main(sample_id, trace_file, workdir):
"""
Parses a nextflow trace file, searches for processes with a specific tag
and sends a JSON report with the relevant information
The expected fields for the trace file are::
0. task_id
1. process
2. tag
3. status
4. exit code
5. start timestamp
6. container
7. cpus
8. duration
9. realtime
10. queue
11. cpu percentage
12. memory percentage
13. real memory size of the process
14. virtual memory size of the process
Parameters
----------
trace_file : str
Path to the nextflow trace file
"""
# Determine the path of the stored JSON for the sample_id
stats_suffix = ".stats.json"
stats_path = join(workdir, sample_id + stats_suffix)
trace_path = join(workdir, trace_file)
logger.info("Starting pipeline status routine")
logger.debug("Checking for previous pipeline status data")
stats_array = get_previous_stats(stats_path)
logger.info("Stats JSON object set to : {}".format(stats_array))
# Search for this substring in the tags field. Only lines with this
# tag will be processed for the reports
tag = " getStats"
logger.debug("Tag variable set to: {}".format(tag))
logger.info("Starting parsing of trace file: {}".format(trace_path))
with open(trace_path) as fh:
header = next(fh).strip().split()
logger.debug("Header set to: {}".format(header))
for line in fh:
fields = line.strip().split("\t")
# Check if tag substring is in the tag field of the nextflow trace
if tag in fields[2] and fields[3] == "COMPLETED":
logger.debug(
"Parsing trace line with COMPLETED status: {}".format(
line))
current_json = get_json_info(fields, header)
stats_array[fields[0]] = current_json
else:
logger.debug(
"Ignoring trace line without COMPLETED status"
" or stats specific tag: {}".format(
line))
with open(join(stats_path), "w") as fh, open(".report.json", "w") as rfh:
fh.write(json.dumps(stats_array, separators=(",", ":")))
rfh.write(json.dumps(stats_array, separators=(",", ":"))) | python | {
"resource": ""
} |
q272749 | brew_innuendo | test | def brew_innuendo(args):
"""Brews a given list of processes according to the recipe
Parameters
----------
args : argparse.Namespace
The arguments passed through argparser that will be used to check the
the recipe, tasks and brew the process
Returns
-------
str
The final pipeline string, ready for the engine.
list
List of process strings.
"""
# Create recipe class instance
automatic_pipeline = Innuendo()
if not args.tasks:
input_processes = " ".join(
automatic_pipeline.process_descriptions.keys())
else:
input_processes = args.tasks
# Validate the provided pipeline processes
validated = automatic_pipeline.validate_pipeline(input_processes)
if not validated:
sys.exit(1)
# Get the final pipeline string
pipeline_string = automatic_pipeline.run_auto_pipeline(input_processes)
return pipeline_string | python | {
"resource": ""
} |
q272750 | brew_recipe | test | def brew_recipe(recipe_name):
"""Returns a pipeline string from a recipe name.
Parameters
----------
recipe_name : str
Name of the recipe. Must match the name attribute in one of the classes
defined in :mod:`flowcraft.generator.recipes`
Returns
-------
str
Pipeline string ready for parsing and processing by flowcraft engine
"""
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
# Create instance of class to allow fetching the name attribute
recipe_cls = cls()
if getattr(recipe_cls, "name", None) == recipe_name:
return recipe_cls.brew()
logger.error(
colored_print("Recipe name '{}' does not exist.".format(recipe_name))
)
sys.exit(1) | python | {
"resource": ""
} |
q272751 | list_recipes | test | def list_recipes(full=False):
"""Method that iterates over all available recipes and prints their
information to the standard output
Parameters
----------
full : bool
If true, it will provide the pipeline string along with the recipe name
"""
logger.info(colored_print(
"\n===== L I S T O F R E C I P E S =====\n",
"green_bold"))
# This will iterate over all modules included in the recipes subpackage
# It will return the import class and the module name, algon with the
# correct prefix
prefix = "{}.".format(recipes.__name__)
for importer, modname, _ in pkgutil.iter_modules(recipes.__path__, prefix):
# Import the current module
_module = importer.find_module(modname).load_module(modname)
# Fetch all available classes in module
_recipe_classes = [cls for cls in _module.__dict__.values() if
isinstance(cls, type)]
# Iterate over each Recipe class, and check for a match with the
# provided recipe name.
for cls in _recipe_classes:
recipe_cls = cls()
if hasattr(recipe_cls, "name"):
logger.info(colored_print("=> {}".format(recipe_cls.name), "blue_bold"))
if full:
logger.info(colored_print("\t {}".format(recipe_cls.__doc__), "purple_bold"))
logger.info(colored_print("Pipeline string: {}\n".format(recipe_cls.pipeline_str), "yellow_bold"))
sys.exit(0) | python | {
"resource": ""
} |
q272752 | InnuendoRecipe.validate_pipeline | test | def validate_pipeline(pipeline_string):
"""Validate pipeline string
Validates the pipeline string by searching for forbidden characters
Parameters
----------
pipeline_string : str
STring with the processes provided
Returns
-------
"""
if "(" in pipeline_string or ")" in pipeline_string or "|" in \
pipeline_string:
logger.error(
colored_print("Please provide a valid task list!", "red_bold")
)
return False
return True | python | {
"resource": ""
} |
q272753 | InnuendoRecipe.build_upstream | test | def build_upstream(self, process_descriptions, task, all_tasks,
task_pipeline,
count_forks, total_tasks, forks):
"""Builds the upstream pipeline of the current process
Checks for the upstream processes to the current process and
adds them to the current pipeline fragment if they were provided in
the process list.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][1] is not None:
if len(process_descriptions[task][1].split("|")) > 1:
local_forks = process_descriptions[task][1].split("|")
# Produces a new pipeline fragment for each forkable
# process
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.insert(
0,
process_descriptions[task][1]
)
self.define_pipeline_string(
process_descriptions,
local_fork,
False,
True,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
# Adds the process to the pipeline fragment in case it is
# provided in the task list
if process_descriptions[task][1] in total_tasks:
task_pipeline.insert(
0,
process_descriptions[task][1].split("|")[0]
)
# Proceeds building upstream until the input for a
# process is None
self.build_upstream(
process_descriptions,
process_descriptions[task][1].split("|")[0],
all_tasks,
task_pipeline,
count_forks,
total_tasks,
forks
)
else:
logger.error(
colored_print("{} not in provided protocols as "
"input for {}".format(
process_descriptions[task][1], task), "red_bold"
)
)
sys.exit()
return task_pipeline
else:
return task_pipeline | python | {
"resource": ""
} |
q272754 | InnuendoRecipe.build_downstream | test | def build_downstream(self, process_descriptions, task, all_tasks,
task_pipeline,
count_forks, total_tasks, forks):
"""Builds the downstream pipeline of the current process
Checks for the downstream processes to the current process and
adds them to the current pipeline fragment.
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
task : str
Current process
all_tasks : list
A list of all provided processes
task_pipeline : list
Current pipeline fragment
count_forks : int
Current number of forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : resulting pipeline fragment
"""
if task in process_descriptions:
if process_descriptions[task][2] is not None:
if len(process_descriptions[task][2].split("|")) > 1:
local_forks = process_descriptions[task][2].split("|")
# Adds the process to the pipeline fragment downstream
# and defines a new pipeline fragment for each fork.
# Those will only look for downstream processes
for local_fork in local_forks:
if local_fork in total_tasks:
count_forks += 1
task_pipeline.append(process_descriptions[task][2])
self.define_pipeline_string(
process_descriptions,
local_fork,
False,
True,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
if process_descriptions[task][2] in total_tasks:
task_pipeline.append(process_descriptions[task][2].split("|")[0])
# Proceeds building downstream until the output for a
# process is None
self.build_downstream(
process_descriptions,
process_descriptions[task][2].split("|")[0],
all_tasks,
task_pipeline,
count_forks,
total_tasks,
forks
)
return task_pipeline
else:
return task_pipeline | python | {
"resource": ""
} |
q272755 | InnuendoRecipe.define_pipeline_string | test | def define_pipeline_string(self, process_descriptions, tasks,
check_upstream,
check_downstream, count_forks, total_tasks,
forks):
"""Builds the possible forks and connections between the provided
processes
This method loops through all the provided tasks and builds the
upstream and downstream pipeline if required. It then returns all
possible forks than need to be merged à posteriori`
Parameters
----------
process_descriptions : dict
Information of processes input, output and if is forkable
tasks : str
Space separated processes
check_upstream : bool
If is to build the upstream pipeline of the current task
check_downstream : bool
If is to build the downstream pipeline of the current task
count_forks : int
Number of current forks
total_tasks : str
All space separated processes
forks : list
Current forks
Returns
-------
list : List with all the possible pipeline forks
"""
tasks_array = tasks.split()
for task_unsplit in tasks_array:
task = task_unsplit.split("=")[0]
if task not in process_descriptions.keys():
logger.error(
colored_print(
"{} not in the possible processes".format(task),
"red_bold"
)
)
sys.exit()
else:
process_split = task_unsplit.split("=")
if len(process_split) > 1:
self.process_to_id[process_split[0]] = process_split[1]
# Only uses the process if it is not already in the possible forks
if not bool([x for x in forks if task in x]) and not bool([y for y in forks if process_descriptions[task][2] in y]):
task_pipeline = []
if task in process_descriptions:
if check_upstream:
task_pipeline = self.build_upstream(
process_descriptions,
task,
tasks_array,
task_pipeline,
count_forks,
total_tasks,
forks
)
task_pipeline.append(task)
if check_downstream:
task_pipeline = self.build_downstream(
process_descriptions,
task,
tasks_array,
task_pipeline,
count_forks,
total_tasks,
forks
)
# Adds the pipeline fragment to the list of possible forks
forks.append(list(OrderedDict.fromkeys(task_pipeline)))
# Checks for task in fork. Case order of input processes is reversed
elif bool([y for y in forks if process_descriptions[task][2] in y]):
for fork in forks:
if task not in fork:
try:
dependent_index = fork.index(process_descriptions[task][2])
fork.insert(dependent_index, task)
except ValueError:
continue
for i in range(0, len(forks)):
for j in range(0, len(forks[i])):
try:
if len(forks[i][j].split("|")) > 1:
forks[i][j] = forks[i][j].split("|")
tmp_fork = []
for s in forks[i][j]:
if s in total_tasks:
tmp_fork.append(s)
forks[i][j] = tmp_fork
except AttributeError as e:
continue
return forks | python | {
"resource": ""
} |
q272756 | InnuendoRecipe.run_auto_pipeline | test | def run_auto_pipeline(self, tasks):
"""Main method to run the automatic pipeline creation
This method aggregates the functions required to build the pipeline
string that can be used as input for the workflow generator.
Parameters
----------
tasks : str
A string with the space separated tasks to be included in the
pipeline
Returns
-------
str : String with the pipeline definition used as input for
parse_pipeline
"""
self.forks = self.define_pipeline_string(
self.process_descriptions,
tasks,
True,
True,
self.count_forks,
tasks,
self.forks
)
self.pipeline_string = self.build_pipeline_string(self.forks)
return self.pipeline_string | python | {
"resource": ""
} |
q272757 | Recipe._get_component_str | test | def _get_component_str(component, params=None, directives=None):
""" Generates a component string based on the provided parameters and
directives
Parameters
----------
component : str
Component name
params : dict
Dictionary with parameter information
directives : dict
Dictionary with directives information
Returns
-------
str
Component string with the parameters and directives, ready for
parsing by flowcraft engine
"""
final_directives = {}
if directives:
final_directives = directives
if params:
final_directives["params"] = params
if final_directives:
return "{}={}".format(
component, json.dumps(final_directives, separators=(",", ":")))
else:
return component | python | {
"resource": ""
} |
q272758 | write_report | test | def write_report(storage_dic, output_file, sample_id):
""" Writes a report from multiple samples.
Parameters
----------
storage_dic : dict or :py:class:`OrderedDict`
Storage containing the trimming statistics. See :py:func:`parse_log`
for its generation.
output_file : str
Path where the output file will be generated.
sample_id : str
Id or name of the current sample.
"""
with open(output_file, "w") as fh, open(".report.json", "w") as json_rep:
# Write header
fh.write("Sample,Total length,Total trimmed,%,5end Trim,3end Trim,"
"bad_reads\\n")
# Write contents
for sample, vals in storage_dic.items():
fh.write("{},{}\\n".format(
sample, ",".join([str(x) for x in vals.values()])))
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "trimmed",
"value": vals["total_trim_perc"],
"table": "qc",
"columnBar": True},
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"sparkline": vals["clean_len"]
}
}],
"badReads": vals["bad_reads"]
}
json_rep.write(json.dumps(json_dic, separators=(",", ":"))) | python | {
"resource": ""
} |
q272759 | main | test | def main(log_files):
""" Main executor of the trimmomatic_report template.
Parameters
----------
log_files : list
List of paths to the trimmomatic log files.
"""
log_storage = OrderedDict()
for log in log_files:
log_id = log.rstrip("_trimlog.txt")
# Populate storage of current sample
log_storage[log_id] = parse_log(log)
# Remove temporary trim log file
os.remove(log)
write_report(log_storage, "trimmomatic_report.csv", log_id) | python | {
"resource": ""
} |
q272760 | fix_contig_names | test | def fix_contig_names(asseembly_path):
"""Removes whitespace from the assembly contig names
Parameters
----------
asseembly_path : path to assembly file
Returns
-------
str:
Path to new assembly file with fixed contig names
"""
fixed_assembly = "fixed_assembly.fa"
with open(asseembly_path) as in_hf, open(fixed_assembly, "w") as ou_fh:
for line in in_hf:
if line.startswith(">"):
fixed_line = line.replace(" ", "_")
ou_fh.write(fixed_line)
else:
ou_fh.write(line)
return fixed_assembly | python | {
"resource": ""
} |
q272761 | clean_up | test | def clean_up(fastq):
"""
Cleans the temporary fastq files. If they are symlinks, the link
source is removed
Parameters
----------
fastq : list
List of fastq files.
"""
for fq in fastq:
# Get real path of fastq files, following symlinks
rp = os.path.realpath(fq)
logger.debug("Removing temporary fastq file path: {}".format(rp))
if re.match(".*/work/.{2}/.{30}/.*", rp):
os.remove(rp) | python | {
"resource": ""
} |
q272762 | Abricate.parse_files | test | def parse_files(self, fls):
"""Public method for parsing abricate output files.
This method is called at at class instantiation for the provided
output files. Additional abricate output files can be added using
this method after the class instantiation.
Parameters
----------
fls : list
List of paths to Abricate files
"""
for f in fls:
# Make sure paths exists
if os.path.exists(f):
self._parser(f)
else:
logger.warning("File {} does not exist".format(f)) | python | {
"resource": ""
} |
q272763 | Abricate._parser | test | def _parser(self, fl):
"""Parser for a single abricate output file.
This parser will scan a single Abricate output file and populate
the :py:attr:`Abricate.storage` attribute.
Parameters
----------
fl : str
Path to abricate output file
Notes
-----
This method will populate the :py:attr:`Abricate.storage` attribute
with all compliant lines in the abricate output file. Entries are
inserted using an arbitrary key that is set by the
:py:attr:`Abricate._key` attribute.
"""
with open(fl) as fh:
for line in fh:
# Skip header and comment lines
if line.startswith("#") or line.strip() == "":
continue
fields = line.strip().split("\t")
try:
coverage = float(fields[8])
except ValueError:
coverage = None
try:
identity = float(fields[9])
except ValueError:
identity = None
try:
accession = fields[11]
except IndexError:
accession = None
self.storage[self._key] = {
"log_file": os.path.basename(fl),
"infile": fields[0],
"reference": fields[1],
"seq_range": (int(fields[2]), int(fields[3])),
"gene": fields[4],
"accession": accession,
"database": fields[10],
"coverage": coverage,
"identity": identity
}
self._key += 1 | python | {
"resource": ""
} |
q272764 | Abricate.iter_filter | test | def iter_filter(self, filters, databases=None, fields=None,
filter_behavior="and"):
"""General purpose filter iterator.
This general filter iterator allows the filtering of entries based
on one or more custom filters. These filters must contain
an entry of the `storage` attribute, a comparison operator, and the
test value. For example, to filter out entries with coverage below 80::
my_filter = ["coverage", ">=", 80]
Filters should always be provide as a list of lists::
iter_filter([["coverage", ">=", 80]])
# or
my_filters = [["coverage", ">=", 80],
["identity", ">=", 50]]
iter_filter(my_filters)
As a convenience, a list of the desired databases can be directly
specified using the `database` argument, which will only report
entries for the specified databases::
iter_filter(my_filters, databases=["plasmidfinder"])
By default, this method will yield the complete entry record. However,
the returned filters can be specified using the `fields` option::
iter_filter(my_filters, fields=["reference", "coverage"])
Parameters
----------
filters : list
List of lists with the custom filter. Each list should have three
elements. (1) the key from the entry to be compared; (2) the
comparison operator; (3) the test value. Example:
``[["identity", ">", 80]]``.
databases : list
List of databases that should be reported.
fields : list
List of fields from each individual entry that are yielded.
filter_behavior : str
options: ``'and'`` ``'or'``
Sets the behaviour of the filters, if multiple filters have been
provided. By default it is set to ``'and'``, which means that an
entry has to pass all filters. It can be set to ``'or'``, in which
case one one of the filters has to pass.
yields
------
dic : dict
Dictionary object containing a :py:attr:`Abricate.storage` entry
that passed the filters.
"""
if filter_behavior not in ["and", "or"]:
raise ValueError("Filter behavior must be either 'and' or 'or'")
for dic in self.storage.values():
# This attribute will determine whether an entry will be yielded
# or not
_pass = False
# Stores the flags with the test results for each filter
# The results will be either True or False
flag = []
# Filter for databases
if databases:
# Skip entry if not in specified database
if dic["database"] not in databases:
continue
# Apply filters
for f in filters:
# Get value of current filter
val = dic[f[0]]
if not self._test_truth(val, f[1], f[2]):
flag.append(False)
else:
flag.append(True)
# Test whether the entry will pass based on the test results
# and the filter behaviour
if filter_behavior == "and":
if all(flag):
_pass = True
elif filter_behavior == "or":
if any(flag):
_pass = True
if _pass:
if fields:
yield dict((x, y) for x, y in dic.items() if x in fields)
else:
yield dic | python | {
"resource": ""
} |
q272765 | AbricateReport._get_contig_id | test | def _get_contig_id(contig_str):
"""Tries to retrieve contig id. Returns the original string if it
is unable to retrieve the id.
Parameters
----------
contig_str : str
Full contig string (fasta header)
Returns
-------
str
Contig id
"""
contig_id = contig_str
try:
contig_id = re.search(".*NODE_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
try:
contig_id = re.search(".*Contig_([0-9]*)_.*", contig_str).group(1)
except AttributeError:
pass
return contig_id | python | {
"resource": ""
} |
q272766 | AbricateReport.get_plot_data | test | def get_plot_data(self):
""" Generates the JSON report to plot the gene boxes
Following the convention of the reports platform, this method returns
a list of JSON/dict objects with the information about each entry in
the abricate file. The information contained in this JSON is::
{contig_id: <str>,
seqRange: [<int>, <int>],
gene: <str>,
accession: <str>,
coverage: <float>,
identity: <float>
}
Note that the `seqRange` entry contains the position in the
corresponding contig, not the absolute position in the whole assembly.
Returns
-------
json_dic : list
List of JSON/dict objects with the report data.
"""
json_dic = {"plotData": []}
sample_dic = {}
sample_assembly_map = {}
for entry in self.storage.values():
sample_id = re.match("(.*)_abr", entry["log_file"]).groups()[0]
if sample_id not in sample_dic:
sample_dic[sample_id] = {}
# Get contig ID using the same regex as in `assembly_report.py`
# template
contig_id = self._get_contig_id(entry["reference"])
# Get database
database = entry["database"]
if database not in sample_dic[sample_id]:
sample_dic[sample_id][database] = []
# Update the sample-assembly correspondence dict
if sample_id not in sample_assembly_map:
sample_assembly_map[sample_id] = entry["infile"]
sample_dic[sample_id][database].append(
{"contig": contig_id,
"seqRange": entry["seq_range"],
"gene": entry["gene"].replace("'", ""),
"accession": entry["accession"],
"coverage": entry["coverage"],
"identity": entry["identity"],
},
)
for sample, data in sample_dic.items():
json_dic["plotData"].append(
{
"sample": sample,
"data": {"abricateXrange": data},
"assemblyFile": sample_assembly_map[sample]
}
)
return json_dic | python | {
"resource": ""
} |
q272767 | AbricateReport.write_report_data | test | def write_report_data(self):
"""Writes the JSON report to a json file
"""
json_plot = self.get_plot_data()
json_table = self.get_table_data()
json_dic = {**json_plot, **json_table}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":"))) | python | {
"resource": ""
} |
q272768 | main | test | def main(sample_id, assembly_file, coverage_bp_file=None):
"""Main executor of the assembly_report template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly_file : str
Path to assembly file in Fasta format.
"""
logger.info("Starting assembly report")
assembly_obj = Assembly(assembly_file, sample_id)
logger.info("Retrieving summary statistics for assembly")
assembly_obj.get_summary_stats("{}_assembly_report.csv".format(sample_id))
size_dist = [len(x) for x in assembly_obj.contigs.values()]
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [
{"header": "Contigs",
"value": assembly_obj.summary_info["ncontigs"],
"table": "assembly",
"columnBar": True},
{"header": "Assembled BP",
"value": assembly_obj.summary_info["total_len"],
"table": "assembly",
"columnBar": True},
]
}],
"plotData": [{
"sample": sample_id,
"data": {
"size_dist": size_dist
}
}]
}
if coverage_bp_file:
try:
window = 2000
gc_sliding_data = assembly_obj.get_gc_sliding(window=window)
cov_sliding_data = \
assembly_obj.get_coverage_sliding(coverage_bp_file,
window=window)
# Get total basepairs based on the individual coverage of each
# contig bpx
total_bp = sum(
[sum(x) for x in assembly_obj.contig_coverage.values()]
)
# Add data to json report
json_dic["plotData"][0]["data"]["genomeSliding"] = {
"gcData": gc_sliding_data,
"covData": cov_sliding_data,
"window": window,
"xbars": assembly_obj._get_window_labels(window),
"assemblyFile": os.path.basename(assembly_file)
}
json_dic["plotData"][0]["data"]["sparkline"] = total_bp
except:
logger.error("Unexpected error creating sliding window data:\\n"
"{}".format(traceback.format_exc()))
# Write json report
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass") | python | {
"resource": ""
} |
q272769 | Assembly._parse_assembly | test | def _parse_assembly(self, assembly_file):
"""Parse an assembly file in fasta format.
This is a Fasta parsing method that populates the
:py:attr:`Assembly.contigs` attribute with data for each contig in the
assembly.
Parameters
----------
assembly_file : str
Path to the assembly fasta file.
"""
with open(assembly_file) as fh:
header = None
logger.debug("Starting iteration of assembly file: {}".format(
assembly_file))
for line in fh:
# Skip empty lines
if not line.strip():
continue
if line.startswith(">"):
# Add contig header to contig dictionary
header = line[1:].strip()
self.contigs[header] = []
else:
# Add sequence string for the current contig
self.contigs[header].append(line.strip())
# After populating the contigs dictionary, convert the values
# list into a string sequence
self.contigs = OrderedDict(
(header, "".join(seq)) for header, seq in self.contigs.items()) | python | {
"resource": ""
} |
q272770 | Assembly.get_summary_stats | test | def get_summary_stats(self, output_csv=None):
"""Generates a CSV report with summary statistics about the assembly
The calculated statistics are:
- Number of contigs
- Average contig size
- N50
- Total assembly length
- Average GC content
- Amount of missing data
Parameters
----------
output_csv: str
Name of the output CSV file.
"""
contig_size_list = []
self.summary_info["ncontigs"] = len(self.contigs)
for contig_id, sequence in self.contigs.items():
logger.debug("Processing contig: {}".format(contig_id))
# Get contig sequence size
contig_len = len(sequence)
# Add size for average contig size
contig_size_list.append(contig_len)
# Add to total assembly length
self.summary_info["total_len"] += contig_len
# Add to average gc
self.summary_info["avg_gc"].append(
sum(map(sequence.count, ["G", "C"])) / contig_len
)
# Add to missing data
self.summary_info["missing_data"] += sequence.count("N")
# Get average contig size
logger.debug("Getting average contig size")
self.summary_info["avg_contig_size"] = \
sum(contig_size_list) / len(contig_size_list)
# Get average gc content
logger.debug("Getting average GC content")
self.summary_info["avg_gc"] = \
sum(self.summary_info["avg_gc"]) / len(self.summary_info["avg_gc"])
# Get N50
logger.debug("Getting N50")
cum_size = 0
for l in sorted(contig_size_list, reverse=True):
cum_size += l
if cum_size >= self.summary_info["total_len"] / 2:
self.summary_info["n50"] = l
break
if output_csv:
logger.debug("Writing report to csv")
# Write summary info to CSV
with open(output_csv, "w") as fh:
summary_line = "{}, {}\\n".format(
self.sample, ",".join(
[str(x) for x in self.summary_info.values()]))
fh.write(summary_line) | python | {
"resource": ""
} |
q272771 | Assembly._get_window_labels | test | def _get_window_labels(self, window):
"""Returns the mapping between sliding window points and their contigs,
and the x-axis position of contig
Parameters
----------
window : int
Size of the window.
Returns
-------
xbars : list
The x-axis position of the ending for each contig.
labels : list
The x-axis labels for each data point in the sliding window
"""
# Get summary stats, if they have not yet been triggered
if not self.summary_info:
self.get_summary_stats()
# Get contig boundary positon
c = 0
xbars = []
for contig, seq in self.contigs.items():
contig_id = self._get_contig_id(contig)
self.contig_boundaries[contig_id] = [c, c + len(seq)]
c += len(seq)
xbars.append((contig_id, c, contig))
return xbars | python | {
"resource": ""
} |
q272772 | Assembly._gc_prop | test | def _gc_prop(s, length):
"""Get proportion of GC from a string
Parameters
----------
s : str
Arbitrary string
Returns
-------
x : float
GC proportion.
"""
gc = sum(map(s.count, ["c", "g"]))
return gc / length | python | {
"resource": ""
} |
q272773 | Assembly.get_gc_sliding | test | def get_gc_sliding(self, window=2000):
"""Calculates a sliding window of the GC content for the assembly
Returns
-------
gc_res : list
List of GC proportion floats for each data point in the sliding
window
"""
gc_res = []
# Get complete sequence to calculate sliding window values
complete_seq = "".join(self.contigs.values()).lower()
for i in range(0, len(complete_seq), window):
seq_window = complete_seq[i:i + window]
# Get GC proportion
gc_res.append(round(self._gc_prop(seq_window, len(seq_window)), 2))
return gc_res | python | {
"resource": ""
} |
q272774 | main | test | def main(sample_id, fastq_pair, clear):
"""Main executor of the skesa template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
clear : str
Can be either 'true' or 'false'. If 'true', the input fastq files will
be removed at the end of the run, IF they are in the working directory
"""
logger.info("Starting skesa")
# Determine output file
if "_trim." in fastq_pair[0]:
sample_id += "_trim"
version = __get_version_skesa()["version"]
output_file = "{}_skesa{}.fasta".format(sample_id, version.replace(".", ""))
cli = [
"skesa",
"--fastq",
"{},{}".format(fastq_pair[0], fastq_pair[1]),
"--gz",
"--use_paired_ends",
"--cores",
"${task.cpus}"
]
logger.debug("Running Skesa subprocess with command: {}".format(cli))
with open(output_file, "w") as fh:
p = subprocess.Popen(cli, stdout=fh, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished Skesa subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished Skesa subprocess with STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished Skesa with return code: {}".format(
p.returncode))
# Remove input fastq files when clear option is specified.
# Only remove temporary input when the expected output exists.
if clear == "true" and os.path.exists(output_file):
clean_up(fastq_pair)
with open(".status", "w") as fh:
if p.returncode != 0:
fh.write("error")
raise SystemExit(p.returncode)
else:
fh.write("pass") | python | {
"resource": ""
} |
q272775 | write_json_report | test | def write_json_report(sample_id, data1, data2):
"""Writes the report
Parameters
----------
data1
data2
Returns
-------
"""
parser_map = {
"base_sequence_quality": ">>Per base sequence quality",
"sequence_quality": ">>Per sequence quality scores",
"base_gc_content": ">>Per sequence GC content",
"base_n_content": ">>Per base N content",
"sequence_length_dist": ">>Sequence Length Distribution",
"per_base_sequence_content": ">>Per base sequence content"
}
json_dic = {
"plotData": [{
"sample": sample_id,
"data": {
"base_sequence_quality": {"status": None, "data": []},
"sequence_quality": {"status": None, "data": []},
"base_gc_content": {"status": None, "data": []},
"base_n_content": {"status": None, "data": []},
"sequence_length_dist": {"status": None, "data": []},
"per_base_sequence_content": {"status": None, "data": []}
}
}]
}
for cat, start_str in parser_map.items():
if cat == "per_base_sequence_content":
fs = 1
fe = 5
else:
fs = 1
fe = 2
report1, status1 = _get_quality_stats(data1, start_str,
field_start=fs, field_end=fe)
report2, status2 = _get_quality_stats(data2, start_str,
field_start=fs, field_end=fe)
status = None
for i in ["fail", "warn", "pass"]:
if i in [status1, status2]:
status = i
json_dic["plotData"][0]["data"][cat]["data"] = [report1, report2]
json_dic["plotData"][0]["data"][cat]["status"] = status
return json_dic | python | {
"resource": ""
} |
q272776 | get_trim_index | test | def get_trim_index(biased_list):
"""Returns the trim index from a ``bool`` list
Provided with a list of ``bool`` elements (``[False, False, True, True]``),
this function will assess the index of the list that minimizes the number
of True elements (biased positions) at the extremities. To do so,
it will iterate over the boolean list and find an index position where
there are two consecutive ``False`` elements after a ``True`` element. This
will be considered as an optimal trim position. For example, in the
following list::
[True, True, False, True, True, False, False, False, False, ...]
The optimal trim index will be the 4th position, since it is the first
occurrence of a ``True`` element with two False elements after it.
If the provided ``bool`` list has no ``True`` elements, then the 0 index is
returned.
Parameters
----------
biased_list: list
List of ``bool`` elements, where ``True`` means a biased site.
Returns
-------
x : index position of the biased list for the optimal trim.
"""
# Return index 0 if there are no biased positions
if set(biased_list) == {False}:
return 0
if set(biased_list[:5]) == {False}:
return 0
# Iterate over the biased_list array. Keep the iteration going until
# we find a biased position with the two following positions unbiased
# (e.g.: True, False, False).
# When this condition is verified, return the last biased position
# index for subsequent trimming.
for i, val in enumerate(biased_list):
if val and set(biased_list[i+1:i+3]) == {False}:
return i + 1
# If the previous iteration could not find and index to trim, it means
# that the whole list is basically biased. Return the length of the
# biased_list
return len(biased_list) | python | {
"resource": ""
} |
q272777 | trim_range | test | def trim_range(data_file):
"""Assess the optimal trim range for a given FastQC data file.
This function will parse a single FastQC data file, namely the
*'Per base sequence content'* category. It will retrieve the A/T and G/C
content for each nucleotide position in the reads, and check whether the
G/C and A/T proportions are between 80% and 120%. If they are, that
nucleotide position is marked as biased for future removal.
Parameters
----------
data_file: str
Path to FastQC data file.
Returns
-------
trim_nt: list
List containing the range with the best trimming positions for the
corresponding FastQ file. The first element is the 5' end trim index
and the second element is the 3' end trim index.
"""
logger.debug("Starting trim range assessment")
# Target string for nucleotide bias assessment
target_nuc_bias = ">>Per base sequence content"
logger.debug("Target string to start nucleotide bias assessment set to "
"{}".format(target_nuc_bias))
# This flag will become True when gathering base proportion data
# from file.
gather = False
# This variable will store a boolean array on the biased/unbiased
# positions. Biased position will be True, while unbiased positions
# will be False
biased = []
with open(data_file) as fh:
for line in fh:
# Start assessment of nucleotide bias
if line.startswith(target_nuc_bias):
# Skip comment line
logger.debug("Found target string at line: {}".format(line))
next(fh)
gather = True
# Stop assessment when reaching end of target module
elif line.startswith(">>END_MODULE") and gather:
logger.debug("Stopping parsing at line: {}".format(line))
break
elif gather:
# Get proportions of each nucleotide
g, a, t, c = [float(x) for x in line.strip().split()[1:]]
# Get 'GC' and 'AT content
gc = (g + 0.1) / (c + 0.1)
at = (a + 0.1) / (t + 0.1)
# Assess bias
if 0.8 <= gc <= 1.2 and 0.8 <= at <= 1.2:
biased.append(False)
else:
biased.append(True)
logger.debug("Finished bias assessment with result: {}".format(biased))
# Split biased list in half to get the 5' and 3' ends
biased_5end, biased_3end = biased[:int(len(biased)/2)],\
biased[int(len(biased)/2):][::-1]
logger.debug("Getting optimal trim range from biased list")
trim_nt = [0, 0]
# Assess number of nucleotides to clip at 5' end
trim_nt[0] = get_trim_index(biased_5end)
logger.debug("Optimal trim range at 5' end set to: {}".format(trim_nt[0]))
# Assess number of nucleotides to clip at 3' end
trim_nt[1] = len(biased) - get_trim_index(biased_3end)
logger.debug("Optimal trim range at 3' end set to: {}".format(trim_nt[1]))
return trim_nt | python | {
"resource": ""
} |
q272778 | get_sample_trim | test | def get_sample_trim(p1_data, p2_data):
"""Get the optimal read trim range from data files of paired FastQ reads.
Given the FastQC data report files for paired-end FastQ reads, this
function will assess the optimal trim range for the 3' and 5' ends of
the paired-end reads. This assessment will be based on the *'Per sequence
GC content'*.
Parameters
----------
p1_data: str
Path to FastQC data report file from pair 1
p2_data: str
Path to FastQC data report file from pair 2
Returns
-------
optimal_5trim: int
Optimal trim index for the 5' end of the reads
optima_3trim: int
Optimal trim index for the 3' end of the reads
See Also
--------
trim_range
"""
sample_ranges = [trim_range(x) for x in [p1_data, p2_data]]
# Get the optimal trim position for 5' end
optimal_5trim = max([x[0] for x in sample_ranges])
# Get optimal trim position for 3' end
optimal_3trim = min([x[1] for x in sample_ranges])
return optimal_5trim, optimal_3trim | python | {
"resource": ""
} |
q272779 | get_summary | test | def get_summary(summary_file):
"""Parses a FastQC summary report file and returns it as a dictionary.
This function parses a typical FastQC summary report file, retrieving
only the information on the first two columns. For instance, a line could
be::
'PASS Basic Statistics SH10762A_1.fastq.gz'
This parser will build a dictionary with the string in the second column
as a key and the QC result as the value. In this case, the returned
``dict`` would be something like::
{"Basic Statistics": "PASS"}
Parameters
----------
summary_file: str
Path to FastQC summary report.
Returns
-------
summary_info: :py:data:`OrderedDict`
Returns the information of the FastQC summary report as an ordered
dictionary, with the categories as strings and the QC result as values.
"""
summary_info = OrderedDict()
logger.debug("Retrieving summary information from file: {}".format(
summary_file))
with open(summary_file) as fh:
for line in fh:
# Skip empty lines
if not line.strip():
continue
# Populate summary info
fields = [x.strip() for x in line.split("\t")]
summary_info[fields[1]] = fields[0]
logger.debug("Retrieved summary information from file: {}".format(
summary_info))
return summary_info | python | {
"resource": ""
} |
q272780 | check_summary_health | test | def check_summary_health(summary_file, **kwargs):
"""Checks the health of a sample from the FastQC summary file.
Parses the FastQC summary file and tests whether the sample is good
or not. There are four categories that cannot fail, and two that
must pass in order for the sample pass this check. If the sample fails
the quality checks, a list with the failing categories is also returned.
Categories that cannot fail::
fail_sensitive = [
"Per base sequence quality",
"Overrepresented sequences",
"Sequence Length Distribution",
"Per sequence GC content"
]
Categories that must pass::
must_pass = [
"Per base N content",
"Adapter Content"
]
Parameters
----------
summary_file: str
Path to FastQC summary file.
Returns
-------
x : bool
Returns ``True`` if the sample passes all tests. ``False`` if not.
summary_info : list
A list with the FastQC categories that failed the tests. Is empty
if the sample passes all tests.
"""
# Store the summary categories that cannot fail. If they fail, do not
# proceed with this sample
fail_sensitive = kwargs.get("fail_sensitive", [
"Per base sequence quality",
"Overrepresented sequences",
"Sequence Length Distribution",
"Per sequence GC content"
])
logger.debug("Fail sensitive categories: {}".format(fail_sensitive))
# Store summary categories that must pass. If they do not, do not proceed
# with that sample
must_pass = kwargs.get("must_pass", [
"Per base N content",
"Adapter Content"
])
logger.debug("Must pass categories: {}".format(must_pass))
warning_fail_sensitive = kwargs.get("warning_fail_sensitive", [
"Per base sequence quality",
"Overrepresented sequences",
])
warning_must_pass = kwargs.get("warning_must_pass", [
"Per base sequence content"
])
# Get summary dictionary
summary_info = get_summary(summary_file)
# This flag will change to False if one of the tests fails
health = True
# List of failing categories
failed = []
# List of warning categories
warning = []
for cat, test in summary_info.items():
logger.debug("Assessing category {} with result {}".format(cat, test))
# FAILURES
# Check for fail sensitive
if cat in fail_sensitive and test == "FAIL":
health = False
failed.append("{}:{}".format(cat, test))
logger.error("Category {} failed a fail sensitive "
"category".format(cat))
# Check for must pass
if cat in must_pass and test != "PASS":
health = False
failed.append("{}:{}".format(cat, test))
logger.error("Category {} failed a must pass category".format(
cat))
# WARNINGS
# Check for fail sensitive
if cat in warning_fail_sensitive and test == "FAIL":
warning.append("Failed category: {}".format(cat))
logger.warning("Category {} flagged at a fail sensitive "
"category".format(cat))
if cat in warning_must_pass and test != "PASS":
warning.append("Did not pass category: {}".format(cat))
logger.warning("Category {} flagged at a must pass "
"category".format(cat))
# Passed all tests
return health, failed, warning | python | {
"resource": ""
} |
q272781 | Bowtie.parse_log | test | def parse_log(self, bowtie_log):
"""Parse a bowtie log file.
This is a bowtie log parsing method that populates the
:py:attr:`self.n_reads, self.align_0x, self.align_1x, self.align_mt1x and self.overall_rate` attributes with
data from the log file.
Disclamer: THIS METHOD IS HORRIBLE BECAUSE THE BOWTIE LOG IS HORRIBLE.
The insertion of data on the attribytes is done by the
:py:meth:`set_attribute method.
Parameters
----------
bowtie_log : str
Path to the boetie log file.
"""
print("is here!")
# Regexes - thanks to https://github.com/ewels/MultiQC/blob/master/multiqc/modules/bowtie2/bowtie2.py
regexes = {
'unpaired': {
'unpaired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times",
'unpaired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'unpaired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times"
},
'paired': {
'paired_aligned_none': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly 0 times",
'paired_aligned_one': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly exactly 1 time",
'paired_aligned_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned concordantly >1 times",
'paired_aligned_discord_one': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly 1 time",
'paired_aligned_discord_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned discordantly >1 times",
'paired_aligned_mate_one': r"(\\d+) \\([\\d\\.]+%\\) aligned exactly 1 time",
'paired_aligned_mate_multi': r"(\\d+) \\([\\d\\.]+%\\) aligned >1 times",
'paired_aligned_mate_none': r"(\\d+) \\([\\d\\.]+%\\) aligned 0 times"
}
}
#Missing parser for unpaired (not implemented in flowcraft yet)
with open(bowtie_log, "r") as f:
#Go through log file line by line
for l in f:
print(l)
#total reads
total = re.search(r"(\\d+) reads; of these:", l)
print(total)
if total:
print(total)
self.set_n_reads(total.group(1))
# Paired end reads aka the pain
paired = re.search(r"(\\d+) \\([\\d\\.]+%\\) were paired; of these:", l)
if paired:
paired_total = int(paired.group(1))
paired_numbers = {}
# Do nested loop whilst we have this level of indentation
l = f.readline()
while l.startswith(' '):
for k, r in regexes['paired'].items():
match = re.search(r, l)
if match:
paired_numbers[k] = int(match.group(1))
l = f.readline()
align_zero_times = paired_numbers['paired_aligned_none'] + paired_numbers['paired_aligned_mate_none']
if align_zero_times:
self.set_align_0x(align_zero_times)
align_one_time = paired_numbers['paired_aligned_one'] + paired_numbers['paired_aligned_mate_one']
if align_one_time:
self.set_align_1x(align_one_time)
align_more_than_one_time = paired_numbers['paired_aligned_multi'] + paired_numbers['paired_aligned_mate_multi']
if align_more_than_one_time:
self.set_align_mt1x(align_more_than_one_time)
# Overall alignment rate
overall = re.search(r"([\\d\\.]+)% overall alignment rate", l)
if overall:
self.overall_rate = float(overall.group(1)) | python | {
"resource": ""
} |
q272782 | NextflowGenerator._parse_process_name | test | def _parse_process_name(name_str):
"""Parses the process string and returns the process name and its
directives
Process strings my contain directive information with the following
syntax::
proc_name={'directive':'val'}
This method parses this string and returns the process name as a
string and the directives information as a dictionary.
Parameters
----------
name_str : str
Raw string with process name and, potentially, directive
information
Returns
-------
str
Process name
dict or None
Process directives
"""
directives = None
fields = name_str.split("=")
process_name = fields[0]
if len(fields) == 2:
_directives = fields[1].replace("'", '"')
try:
directives = json.loads(_directives)
except json.decoder.JSONDecodeError:
raise eh.ProcessError(
"Could not parse directives for process '{}'. The raw"
" string is: {}\n"
"Possible causes include:\n"
"\t1. Spaces inside directives\n"
"\t2. Missing '=' symbol before directives\n"
"\t3. Missing quotes (' or \") around directives\n"
"A valid example: process_name={{'cpus':'2'}}".format(
process_name, name_str))
return process_name, directives | python | {
"resource": ""
} |
q272783 | NextflowGenerator._add_dependency | test | def _add_dependency(self, p, template, inlane, outlane, pid):
"""Automatically Adds a dependency of a process.
This method adds a template to the process list attribute as a
dependency. It will adapt the input lane, output lane and process
id of the process that depends on it.
Parameters
----------
p : Process
Process class that contains the dependency.
template : str
Template name of the dependency.
inlane : int
Input lane.
outlane : int
Output lane.
pid : int
Process ID.
"""
dependency_proc = self.process_map[template](template=template)
if dependency_proc.input_type != p.input_type:
logger.error("Cannot automatically add dependency with different"
" input type. Input type of process '{}' is '{}."
" Input type of dependency '{}' is '{}'".format(
p.template, p.input_type, template,
dependency_proc.input_type))
input_suf = "{}_{}_dep".format(inlane, pid)
output_suf = "{}_{}_dep".format(outlane, pid)
dependency_proc.set_main_channel_names(input_suf, output_suf, outlane)
# To insert the dependency process before the current process, we'll
# need to move the input channel name of the later to the former, and
# set a new connection between the dependency and the process.
dependency_proc.input_channel = p.input_channel
p.input_channel = dependency_proc.output_channel
# If the current process was the first in the pipeline, change the
# lanes so that the dependency becomes the first process
if not p.parent_lane:
p.parent_lane = outlane
dependency_proc.parent_lane = None
else:
dependency_proc.parent_lane = inlane
p.parent_lane = outlane
self.processes.append(dependency_proc) | python | {
"resource": ""
} |
q272784 | NextflowGenerator._search_tree_backwards | test | def _search_tree_backwards(self, template, parent_lanes):
"""Searches the process tree backwards in search of a provided process
The search takes into consideration the provided parent lanes and
searches only those
Parameters
----------
template : str
Name of the process template attribute being searched
parent_lanes : list
List of integers with the parent lanes to be searched
Returns
-------
bool
Returns True when the template is found. Otherwise returns False.
"""
for p in self.processes[::-1]:
# Ignore process in different lanes
if p.lane not in parent_lanes:
continue
# template found
if p.template == template:
return True
return False | python | {
"resource": ""
} |
q272785 | NextflowGenerator._build_header | test | def _build_header(self):
"""Adds the header template to the master template string
"""
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += hs.header | python | {
"resource": ""
} |
q272786 | NextflowGenerator._build_footer | test | def _build_footer(self):
"""Adds the footer template to the master template string"""
logger.debug("===============")
logger.debug("Building header")
logger.debug("===============")
self.template += fs.footer | python | {
"resource": ""
} |
q272787 | NextflowGenerator._set_channels | test | def _set_channels(self):
"""Sets the main channels for the pipeline
This method will parse de the :attr:`~Process.processes` attribute
and perform the following tasks for each process:
- Sets the input/output channels and main input forks and adds
them to the process's
:attr:`flowcraft.process.Process._context`
attribute (See
:func:`~NextflowGenerator.set_channels`).
- Automatically updates the main input channel of the first
process of each lane so that they fork from the user provide
parameters (See
:func:`~NextflowGenerator._update_raw_input`).
- Check for the presence of secondary channels and adds them to the
:attr:`~NextflowGenerator.secondary_channels` attribute.
Notes
-----
**On the secondary channel setup**: With this approach, there can only
be one secondary link start for each type of secondary link. For
instance, If there are two processes that start a secondary channel
for the ``SIDE_max_len`` channel, only the last one will be recorded,
and all receiving processes will get the channel from the latest
process. Secondary channels can only link if the source process if
downstream of the sink process in its "forking" path.
"""
logger.debug("=====================")
logger.debug("Setting main channels")
logger.debug("=====================")
for i, p in enumerate(self.processes):
# Set main channels for the process
logger.debug("[{}] Setting main channels with pid: {}".format(
p.template, i))
p.set_channels(pid=i)
# If there is no parent lane, set the raw input channel from user
logger.debug("{} {} {}".format(p.parent_lane, p.input_type, p.template))
if not p.parent_lane and p.input_type:
self._update_raw_input(p)
self._update_extra_inputs(p)
self._update_secondary_channels(p)
logger.info(colored_print(
"\tChannels set for {} \u2713".format(p.template))) | python | {
"resource": ""
} |
q272788 | NextflowGenerator._set_init_process | test | def _set_init_process(self):
"""Sets the main raw inputs and secondary inputs on the init process
This method will fetch the :class:`flowcraft.process.Init` process
instance and sets the raw input (
:func:`flowcraft.process.Init.set_raw_inputs`) for
that process. This will handle the connection of the user parameters
with channels that are then consumed in the pipeline.
"""
logger.debug("========================")
logger.debug("Setting secondary inputs")
logger.debug("========================")
# Get init process
init_process = self.processes[0]
logger.debug("Setting main raw inputs: "
"{}".format(self.main_raw_inputs))
init_process.set_raw_inputs(self.main_raw_inputs)
logger.debug("Setting extra inputs: {}".format(self.extra_inputs))
init_process.set_extra_inputs(self.extra_inputs) | python | {
"resource": ""
} |
q272789 | NextflowGenerator._set_secondary_channels | test | def _set_secondary_channels(self):
"""Sets the secondary channels for the pipeline
This will iterate over the
:py:attr:`NextflowGenerator.secondary_channels` dictionary that is
populated when executing
:func:`~NextflowGenerator._update_secondary_channels` method.
"""
logger.debug("==========================")
logger.debug("Setting secondary channels")
logger.debug("==========================")
logger.debug("Setting secondary channels: {}".format(
self.secondary_channels))
for source, lanes in self.secondary_channels.items():
for vals in lanes.values():
if not vals["end"]:
logger.debug("[{}] No secondary links to setup".format(
vals["p"].template))
continue
logger.debug("[{}] Setting secondary links for "
"source {}: {}".format(vals["p"].template,
source,
vals["end"]))
vals["p"].set_secondary_channel(source, vals["end"]) | python | {
"resource": ""
} |
q272790 | NextflowGenerator._set_status_channels | test | def _set_status_channels(self):
"""Compiles all status channels for the status compiler process
"""
status_inst = pc.StatusCompiler(template="status_compiler")
report_inst = pc.ReportCompiler(template="report_compiler")
# Compile status channels from pipeline process
status_channels = []
for p in [p for p in self.processes]:
if not any([isinstance(p, x) for x in self.skip_class]):
status_channels.extend(p.status_strs)
if not status_channels:
logger.debug("No status channels found. Skipping status compiler"
"process")
return
logger.debug("Setting status channels: {}".format(status_channels))
# Check for duplicate channels. Raise exception if found.
if len(status_channels) != len(set(status_channels)):
raise eh.ProcessError(
"Duplicate status channels detected. Please ensure that "
"the 'status_channels' attributes of each process are "
"unique. Here are the status channels:\n\n{}".format(
", ".join(status_channels)
))
status_inst.set_compiler_channels(status_channels)
report_channels = ["REPORT_{}".format(x.lstrip("STATUS_")) for x in
status_channels]
report_inst.set_compiler_channels(report_channels)
self.processes.extend([status_inst, report_inst]) | python | {
"resource": ""
} |
q272791 | NextflowGenerator._get_resources_string | test | def _get_resources_string(res_dict, pid):
""" Returns the nextflow resources string from a dictionary object
If the dictionary has at least on of the resource directives, these
will be compiled for each process in the dictionary and returned
as a string read for injection in the nextflow config file template.
This dictionary should be::
dict = {"processA": {"cpus": 1, "memory": "4GB"},
"processB": {"cpus": 2}}
Parameters
----------
res_dict : dict
Dictionary with the resources for processes.
pid : int
Unique identified of the process
Returns
-------
str
nextflow config string
"""
config_str = ""
ignore_directives = ["container", "version"]
for p, directives in res_dict.items():
for d, val in directives.items():
if d in ignore_directives:
continue
config_str += '\n\t${}_{}.{} = {}'.format(p, pid, d, val)
return config_str | python | {
"resource": ""
} |
q272792 | NextflowGenerator._get_container_string | test | def _get_container_string(cont_dict, pid):
""" Returns the nextflow containers string from a dictionary object
If the dictionary has at least on of the container directives, these
will be compiled for each process in the dictionary and returned
as a string read for injection in the nextflow config file template.
This dictionary should be::
dict = {"processA": {"container": "asd", "version": "1.0.0"},
"processB": {"container": "dsd"}}
Parameters
----------
cont_dict : dict
Dictionary with the containers for processes.
pid : int
Unique identified of the process
Returns
-------
str
nextflow config string
"""
config_str = ""
for p, directives in cont_dict.items():
container = ""
if "container" in directives:
container += directives["container"]
if "version" in directives:
container += ":{}".format(directives["version"])
else:
container += ":latest"
if container:
config_str += '\n\t${}_{}.container = "{}"'.format(p, pid, container)
return config_str | python | {
"resource": ""
} |
q272793 | NextflowGenerator._get_params_string | test | def _get_params_string(self):
"""Returns the nextflow params string from a dictionary object.
The params dict should be a set of key:value pairs with the
parameter name, and the default parameter value::
self.params = {
"genomeSize": 2.1,
"minCoverage": 15
}
The values are then added to the string as they are. For instance,
a ``2.1`` float will appear as ``param = 2.1`` and a
``"'teste'" string will appear as ``param = 'teste'`` (Note the
string).
Returns
-------
str
Nextflow params configuration string
"""
params_str = ""
for p in self.processes:
logger.debug("[{}] Adding parameters: {}\n".format(
p.template, p.params)
)
# Add an header with the template name to structure the params
# configuration
if p.params and p.template != "init":
p.set_param_id("_{}".format(p.pid))
params_str += "\n\t/*"
params_str += "\n\tComponent '{}_{}'\n".format(p.template,
p.pid)
params_str += "\t{}\n".format("-" * (len(p.template) + len(p.pid) + 12))
params_str += "\t*/\n"
for param, val in p.params.items():
if p.template == "init":
param_id = param
else:
param_id = "{}_{}".format(param, p.pid)
params_str += "\t{} = {}\n".format(param_id, val["default"])
return params_str | python | {
"resource": ""
} |
q272794 | NextflowGenerator._get_merged_params_string | test | def _get_merged_params_string(self):
"""Returns the merged nextflow params string from a dictionary object.
The params dict should be a set of key:value pairs with the
parameter name, and the default parameter value::
self.params = {
"genomeSize": 2.1,
"minCoverage": 15
}
The values are then added to the string as they are. For instance,
a ``2.1`` float will appear as ``param = 2.1`` and a
``"'teste'" string will appear as ``param = 'teste'`` (Note the
string).
Identical parameters in multiple processes will be merged into the same
param.
Returns
-------
str
Nextflow params configuration string
"""
params_temp = {}
for p in self.processes:
logger.debug("[{}] Adding parameters: {}".format(p.template,
p.params))
for param, val in p.params.items():
params_temp[param] = val["default"]
config_str = "\n\t" + "\n\t".join([
"{} = {}".format(param, val) for param, val in params_temp.items()
])
return config_str | python | {
"resource": ""
} |
q272795 | NextflowGenerator._get_manifest_string | test | def _get_manifest_string(self):
"""Returns the nextflow manifest config string to include in the
config file from the information on the pipeline.
Returns
-------
str
Nextflow manifest configuration string
"""
config_str = ""
config_str += '\n\tname = "{}"'.format(self.pipeline_name)
config_str += '\n\tmainScript = "{}"'.format(self.nf_file)
return config_str | python | {
"resource": ""
} |
q272796 | NextflowGenerator._set_configurations | test | def _set_configurations(self):
"""This method will iterate over all process in the pipeline and
populate the nextflow configuration files with the directives
of each process in the pipeline.
"""
logger.debug("======================")
logger.debug("Setting configurations")
logger.debug("======================")
resources = ""
containers = ""
params = ""
manifest = ""
if self.merge_params:
params += self._get_merged_params_string()
help_list = self._get_merged_params_help()
else:
params += self._get_params_string()
help_list = self._get_params_help()
for p in self.processes:
# Skip processes with the directives attribute populated
if not p.directives:
continue
logger.debug("[{}] Adding directives: {}".format(
p.template, p.directives))
resources += self._get_resources_string(p.directives, p.pid)
containers += self._get_container_string(p.directives, p.pid)
manifest = self._get_manifest_string()
self.resources = self._render_config("resources.config", {
"process_info": resources
})
self.containers = self._render_config("containers.config", {
"container_info": containers
})
self.params = self._render_config("params.config", {
"params_info": params
})
self.manifest = self._render_config("manifest.config", {
"manifest_info": manifest
})
self.help = self._render_config("Helper.groovy", {
"nf_file": basename(self.nf_file),
"help_list": help_list,
"version": __version__,
"pipeline_name": " ".join([x.upper() for x in self.pipeline_name])
})
self.user_config = self._render_config("user.config", {}) | python | {
"resource": ""
} |
q272797 | NextflowGenerator.dag_to_file | test | def dag_to_file(self, dict_viz, output_file=".treeDag.json"):
"""Writes dag to output file
Parameters
----------
dict_viz: dict
Tree like dictionary that is used to export tree data of processes
to html file and here for the dotfile .treeDag.json
"""
outfile_dag = open(os.path.join(dirname(self.nf_file), output_file)
, "w")
outfile_dag.write(json.dumps(dict_viz))
outfile_dag.close() | python | {
"resource": ""
} |
q272798 | NextflowGenerator.render_pipeline | test | def render_pipeline(self):
"""Write pipeline attributes to json
This function writes the pipeline and their attributes to a json file,
that is intended to be read by resources/pipeline_graph.html to render
a graphical output showing the DAG.
"""
dict_viz = {
"name": "root",
"children": []
}
last_of_us = {}
f_tree = self._fork_tree if self._fork_tree else {1: [1]}
for x, (k, v) in enumerate(f_tree.items()):
for p in self.processes[1:]:
if x == 0 and p.lane not in [k] + v:
continue
if x > 0 and p.lane not in v:
continue
if not p.parent_lane:
lst = dict_viz["children"]
else:
lst = last_of_us[p.parent_lane]
tooltip = {
"name": "{}_{}".format(p.template, p.pid),
"process": {
"pid": p.pid,
"input": p.input_type,
"output": p.output_type if p.output_type else "None",
"lane": p.lane,
},
"children": []
}
dir_var = ""
for k2, v2 in p.directives.items():
dir_var += k2
for d in v2:
try:
# Remove quotes from string directives
directive = v2[d].replace("'", "").replace('"', '') \
if isinstance(v2[d], str) else v2[d]
dir_var += "{}: {}".format(d, directive)
except KeyError:
pass
if dir_var:
tooltip["process"]["directives"] = dir_var
else:
tooltip["process"]["directives"] = "N/A"
lst.append(tooltip)
last_of_us[p.lane] = lst[-1]["children"]
# write to file dict_viz
self.dag_to_file(dict_viz)
# Write tree forking information for dotfile
with open(os.path.join(dirname(self.nf_file),
".forkTree.json"), "w") as fh:
fh.write(json.dumps(self._fork_tree))
# send with jinja to html resource
return self._render_config("pipeline_graph.html", {"data": dict_viz}) | python | {
"resource": ""
} |
q272799 | NextflowGenerator.write_configs | test | def write_configs(self, project_root):
"""Wrapper method that writes all configuration files to the pipeline
directory
"""
# Write resources config
with open(join(project_root, "resources.config"), "w") as fh:
fh.write(self.resources)
# Write containers config
with open(join(project_root, "containers.config"), "w") as fh:
fh.write(self.containers)
# Write containers config
with open(join(project_root, "params.config"), "w") as fh:
fh.write(self.params)
# Write manifest config
with open(join(project_root, "manifest.config"), "w") as fh:
fh.write(self.manifest)
# Write user config if not present in the project directory
if not exists(join(project_root, "user.config")):
with open(join(project_root, "user.config"), "w") as fh:
fh.write(self.user_config)
lib_dir = join(project_root, "lib")
if not exists(lib_dir):
os.makedirs(lib_dir)
with open(join(lib_dir, "Helper.groovy"), "w") as fh:
fh.write(self.help)
# Generate the pipeline DAG
pipeline_to_json = self.render_pipeline()
with open(splitext(self.nf_file)[0] + ".html", "w") as fh:
fh.write(pipeline_to_json) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.