_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3
values | text stringlengths 75 19.8k | language stringclasses 1
value | meta_information dict |
|---|---|---|---|---|---|
q272800 | NextflowGenerator.export_params | test | def export_params(self):
"""Export pipeline params as a JSON to stdout
This run mode iterates over the pipeline processes and exports the
params dictionary of each component as a JSON to stdout.
"""
params_json = {}
# Skip first init process
for p in self.processes[1:]:
params_json[p.template] = p.params
# Flush params json to stdout
sys.stdout.write(json.dumps(params_json)) | python | {
"resource": ""
} |
q272801 | NextflowGenerator.export_directives | test | def export_directives(self):
"""Export pipeline directives as a JSON to stdout
"""
directives_json = {}
# Skip first init process
for p in self.processes[1:]:
directives_json[p.template] = p.directives
# Flush params json to stdout
sys.stdout.write(json.dumps(directives_json)) | python | {
"resource": ""
} |
q272802 | NextflowGenerator.fetch_docker_tags | test | def fetch_docker_tags(self):
"""
Export all dockerhub tags associated with each component given by
the -t flag.
"""
# dict to store the already parsed components (useful when forks are
# given to the pipeline string via -t flag
dict_of_parsed = {}
# fetches terminal width and subtracts 3 because we always add a
# new line character and we want a space at the beggining and at the end
# of each line
terminal_width = shutil.get_terminal_size().columns - 3
# first header
center_string = " Selected container tags "
# starts a list with the headers
tags_list = [
[
"=" * int(terminal_width / 4),
"{0}{1}{0}".format(
"=" * int(((terminal_width/2 - len(center_string)) / 2)),
center_string)
,
"{}\n".format("=" * int(terminal_width / 4))
],
["component", "container", "tags"],
[
"=" * int(terminal_width / 4),
"=" * int(terminal_width / 2),
"=" * int(terminal_width / 4)
]
]
# Skip first init process and iterate through the others
for p in self.processes[1:]:
template = p.template
# if component has already been printed then skip and don't print
# again
if template in dict_of_parsed:
continue
# starts a list of containers for the current process in
# dict_of_parsed, in which each containers will be added to this
# list once it gets parsed
dict_of_parsed[template] = {
"container": []
}
# fetch repo name from directives of each component.
for directives in p.directives.values():
try:
repo = directives["container"]
default_version = directives["version"]
except KeyError:
# adds the default container if container key isn't present
# this happens for instance in integrity_coverage
repo = "flowcraft/flowcraft_base"
default_version = "1.0.0-1"
# checks if repo_version already exists in list of the
# containers for the current component being queried
repo_version = repo + default_version
if repo_version not in dict_of_parsed[template]["container"]:
# make the request to docker hub
r = requests.get(
"https://hub.docker.com/v2/repositories/{}/tags/"
.format(repo)
)
# checks the status code of the request, if it is 200 then
# parses docker hub entry, otherwise retrieve no tags but
# alerts the user
if r.status_code != 404:
# parse response content to dict and fetch results key
r_content = json.loads(r.content)["results"]
for version in r_content:
printed_version = (version["name"] + "*") \
if version["name"] == default_version \
else version["name"]
tags_list.append([template, repo, printed_version])
else:
tags_list.append([template, repo, "No DockerHub tags"])
dict_of_parsed[template]["container"].append(repo_version)
# iterate through each entry in tags_list and print the list of tags
# for each component. Each entry (excluding the headers) contains
# 3 elements (component name, container and tag version)
for x, entry in enumerate(tags_list):
# adds different color to the header in the first list and
# if row is pair add one color and if is even add another (different
# background)
color = "blue_bold" if x < 3 else \
("white" if x % 2 != 0 else "0;37;40m")
# generates a small list with the terminal width for each column,
# this will be given to string formatting as the 3, 4 and 5 element
final_width = [
int(terminal_width/4),
int(terminal_width/2),
int(terminal_width/4)
]
# writes the string to the stdout
sys.stdout.write(
colored_print("\n {0: <{3}} {1: ^{4}} {2: >{5}}".format(
*entry, *final_width), color)
)
# assures that the entire line gets the same color
sys.stdout.write("\n{0: >{1}}\n".format("(* = default)",
terminal_width + 3)) | python | {
"resource": ""
} |
q272803 | NextflowGenerator.build | test | def build(self):
"""Main pipeline builder
This method is responsible for building the
:py:attr:`NextflowGenerator.template` attribute that will contain
the nextflow code of the pipeline.
First it builds the header, then sets the main channels, the
secondary inputs, secondary channels and finally the
status channels. When the pipeline is built, is writes the code
to a nextflow file.
"""
logger.info(colored_print(
"\tSuccessfully connected {} process(es) with {} "
"fork(s) across {} lane(s) \u2713".format(
len(self.processes[1:]), len(self._fork_tree), self.lanes)))
# Generate regular nextflow header that sets up the shebang, imports
# and all possible initial channels
self._build_header()
self._set_channels()
self._set_init_process()
self._set_secondary_channels()
logger.info(colored_print(
"\tSuccessfully set {} secondary channel(s) \u2713".format(
len(self.secondary_channels))))
self._set_compiler_channels()
self._set_configurations()
logger.info(colored_print(
"\tFinished configurations \u2713"))
for p in self.processes:
self.template += "\n{}".format(p.template_str)
self._build_footer()
project_root = dirname(self.nf_file)
# Write configs
self.write_configs(project_root)
# Write pipeline file
with open(self.nf_file, "w") as fh:
fh.write(self.template)
logger.info(colored_print(
"\tPipeline written into {} \u2713".format(self.nf_file))) | python | {
"resource": ""
} |
q272804 | set_kmers | test | def set_kmers(kmer_opt, max_read_len):
"""Returns a kmer list based on the provided kmer option and max read len.
Parameters
----------
kmer_opt : str
The k-mer option. Can be either ``'auto'``, ``'default'`` or a
sequence of space separated integers, ``'23, 45, 67'``.
max_read_len : int
The maximum read length of the current sample.
Returns
-------
kmers : list
List of k-mer values that will be provided to Spades.
"""
logger.debug("Kmer option set to: {}".format(kmer_opt))
# Check if kmer option is set to auto
if kmer_opt == "auto":
if max_read_len >= 175:
kmers = [55, 77, 99, 113, 127]
else:
kmers = [21, 33, 55, 67, 77]
logger.debug("Kmer range automatically selected based on max read"
"length of {}: {}".format(max_read_len, kmers))
# Check if manual kmers were specified
elif len(kmer_opt.split()) > 1:
kmers = kmer_opt.split()
logger.debug("Kmer range manually set to: {}".format(kmers))
else:
kmers = []
logger.debug("Kmer range set to empty (will be automatically "
"determined by SPAdes")
return kmers | python | {
"resource": ""
} |
q272805 | main | test | def main(sample_id, fastq_pair, max_len, kmer, clear):
"""Main executor of the spades template.
Parameters
----------
sample_id : str
Sample Identification string.
fastq_pair : list
Two element list containing the paired FastQ files.
max_len : int
Maximum read length. This value is determined in
:py:class:`templates.integrity_coverage`
kmer : str
Can be either ``'auto'``, ``'default'`` or a
sequence of space separated integers, ``'23, 45, 67'``.
"""
logger.info("Starting spades")
logger.info("Setting SPAdes kmers")
kmers = set_kmers(kmer, max_len)
logger.info("SPAdes kmers set to: {}".format(kmers))
cli = [
"metaspades.py",
"--only-assembler",
"--threads",
"$task.cpus",
"-o",
"."
]
# Add kmers, if any were specified
if kmers:
cli += ["-k {}".format(",".join([str(x) for x in kmers]))]
# Add FastQ files
cli += [
"-1",
fastq_pair[0],
"-2",
fastq_pair[1]
]
logger.debug("Running metaSPAdes subprocess with command: {}".format(cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished metaSPAdes subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished metaSPAdes subprocesswith STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished metaSPAdes with return code: {}".format(
p.returncode))
with open(".status", "w") as fh:
if p.returncode != 0:
fh.write("error")
return
else:
fh.write("pass")
# Change the default contigs.fasta assembly name to a more informative one
if "_trim." in fastq_pair[0]:
sample_id += "_trim"
assembly_file = "{}_metaspades.fasta".format(
sample_id)
os.rename("contigs.fasta", assembly_file)
logger.info("Setting main assembly file to: {}".format(assembly_file))
# Remove input fastq files when clear option is specified.
# Only remove temporary input when the expected output exists.
if clear == "true" and os.path.exists(assembly_file):
clean_up(fastq_pair) | python | {
"resource": ""
} |
q272806 | FlowcraftReport._get_report_id | test | def _get_report_id(self):
"""Returns a hash of the reports JSON file
"""
if self.watch:
# Searches for the first occurence of the nextflow pipeline
# file name in the .nextflow.log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = os.getcwd().encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest()
else:
with open(self.report_file) as fh:
report_json = json.loads(fh.read())
metadata = report_json["data"]["results"][0]["nfMetadata"]
try:
report_id = metadata["scriptId"] + metadata["sessionId"]
except KeyError:
raise eh.ReportError("Incomplete or corrupt report JSON file "
"missing the 'scriptId' and/or 'sessionId' "
"metadata information")
return report_id | python | {
"resource": ""
} |
q272807 | FlowcraftReport.update_trace_watch | test | def update_trace_watch(self):
"""Parses the nextflow trace file and retrieves the path of report JSON
files that have not been sent to the service yet.
"""
# Check the size stamp of the tracefile. Only proceed with the parsing
# if it changed from the previous size.
size_stamp = os.path.getsize(self.trace_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.trace_sizestamp:
return
else:
logger.debug("Updating trace size stamp to: {}".format(size_stamp))
self.trace_sizestamp = size_stamp
with open(self.trace_file) as fh:
# Skip potential empty lines at the start of file
header = next(fh).strip()
while not header:
header = next(fh).strip()
# Get header mappings before parsing the file
hm = self._header_mapping(header)
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
fields = line.strip().split("\t")
# Skip if task ID was already processes
if fields[hm["task_id"]] in self.stored_ids:
continue
if fields[hm["process"]] == "report":
self.report_queue.append(
self._expand_path(fields[hm["hash"]])
)
self.send = True
# Add the processed trace line to the stored ids. It will be
# skipped in future parsers
self.stored_ids.append(fields[hm["task_id"]]) | python | {
"resource": ""
} |
q272808 | FlowcraftReport.update_log_watch | test | def update_log_watch(self):
"""Parses nextflow log file and updates the run status
"""
# Check the size stamp of the tracefile. Only proceed with the parsing
# if it changed from the previous size.
size_stamp = os.path.getsize(self.log_file)
self.trace_retry = 0
if size_stamp and size_stamp == self.log_sizestamp:
return
else:
logger.debug("Updating log size stamp to: {}".format(size_stamp))
self.log_sizestamp = size_stamp
self._update_pipeline_status() | python | {
"resource": ""
} |
q272809 | FlowcraftReport._send_live_report | test | def _send_live_report(self, report_id):
"""Sends a PUT request with the report JSON files currently in the
report_queue attribute.
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
# Determines the maximum number of reports sent at the same time in
# the same payload
buffer_size = 100
logger.debug("Report buffer size set to: {}".format(buffer_size))
for i in range(0, len(self.report_queue), buffer_size):
# Reset the report compilation batch
reports_compilation = []
# Iterate over report JSON batches determined by buffer_size
for report in self.report_queue[i: i + buffer_size]:
try:
report_file = [x for x in os.listdir(report)
if x.endswith(".json")][0]
except IndexError:
continue
with open(join(report, report_file)) as fh:
reports_compilation.append(json.loads(fh.read()))
logger.debug("Payload sent with size: {}".format(
asizeof(json.dumps(reports_compilation))
))
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": reports_compilation,
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1)
# When there is no change in the report queue, but there is a change
# in the run status of the pipeline
if not self.report_queue:
logger.debug("status: {}".format(self.status_info))
try:
requests.put(
self.broadcast_address,
json={"run_id": report_id,
"report_json": [],
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The"
" server may be down or there is a problem with your "
"internet connection.", "red_bold"))
sys.exit(1)
# Reset the report queue after sending the request
self.report_queue = [] | python | {
"resource": ""
} |
q272810 | FlowcraftReport._init_live_reports | test | def _init_live_reports(self, report_id):
"""Sends a POST request to initialize the live reports
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
logger.debug("Sending initial POST request to {} to start report live"
" update".format(self.broadcast_address))
try:
with open(".metadata.json") as fh:
metadata = [json.load(fh)]
except:
metadata = []
start_json = {
"data": {"results": metadata}
}
try:
requests.post(
self.broadcast_address,
json={"run_id": report_id, "report_json": start_json,
"status": self.status_info}
)
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1) | python | {
"resource": ""
} |
q272811 | FlowcraftReport._close_connection | test | def _close_connection(self, report_id):
"""Sends a delete request for the report JSON hash
Parameters
----------
report_id : str
Hash of the report JSON as retrieved from :func:`~_get_report_hash`
"""
logger.debug(
"Closing connection and sending DELETE request to {}".format(
self.broadcast_address))
try:
r = requests.delete(self.broadcast_address,
json={"run_id": report_id})
if r.status_code != 202:
logger.error(colored_print(
"ERROR: There was a problem sending data to the server"
"with reason: {}".format(r.reason)))
except requests.exceptions.ConnectionError:
logger.error(colored_print(
"ERROR: Could not establish connection with server. The server"
" may be down or there is a problem with your internet "
"connection.", "red_bold"))
sys.exit(1) | python | {
"resource": ""
} |
q272812 | convert_adatpers | test | def convert_adatpers(adapter_fasta):
"""Generates an adapter file for FastQC from a fasta file.
The provided adapters file is assumed to be a simple fasta file with the
adapter's name as header and the corresponding sequence::
>TruSeq_Universal_Adapter
AATGATACGGCGACCACCGAGATCTACACTCTTTCCCTACACGACGCTCTTCCGATCT
>TruSeq_Adapter_Index 1
GATCGGAAGAGCACACGTCTGAACTCCAGTCACATCACGATCTCGTATGCCGTCTTCTGCTTG
Parameters
----------
adapter_fasta : str
Path to Fasta file with adapter sequences.
Returns
-------
adapter_out : str or None
The path to the reformatted adapter file. Returns ``None`` if the
adapters file does not exist or the path is incorrect.
"""
adapter_out = "fastqc_adapters.tab"
logger.debug("Setting output adapters file to: {}".format(adapter_out))
try:
with open(adapter_fasta) as fh, \
open(adapter_out, "w") as adap_fh:
for line in fh:
if line.startswith(">"):
head = line[1:].strip()
# Get the next line with the sequence string
sequence = next(fh).strip()
adap_fh.write("{}\\t{}\\n".format(head, sequence))
logger.info("Converted adapters file")
return adapter_out
# If an invalid adapters file is provided, return None.
except FileNotFoundError:
logger.warning("Could not find the provided adapters file: {}".format(
adapter_fasta))
return | python | {
"resource": ""
} |
q272813 | main | test | def main(fastq_pair, adapter_file, cpus):
""" Main executor of the fastq template.
Parameters
----------
fastq_pair : list
Two element list containing the paired FastQ files.
adapter_file : str
Path to adapters file.
cpus : int or str
Number of cpu's that will be by FastQC.
"""
logger.info("Starting fastqc")
# If an adapter file was provided, convert it to FastQC format
if os.path.exists(adapter_file):
logger.info("Adapters file provided: {}".format(adapter_file))
adapters = convert_adatpers(adapter_file)
else:
logger.info("Adapters file '{}' not provided or does not "
"exist".format(adapter_file))
adapters = None
# Setting command line for FastQC
cli = [
"fastqc",
"--extract",
"--nogroup",
"--format",
"fastq",
"--threads",
str(cpus)
]
# Add adapters file to command line, if it exists
if adapters:
cli += ["--adapters", "{}".format(adapters)]
# Add FastQ files at the end of command line
cli += fastq_pair
logger.debug("Running fastqc subprocess with command: {}".format(cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE, shell=False)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
logger.info("Finished fastqc subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished fastqc subprocesswith STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished fastqc with return code: {}".format(
p.returncode))
logger.info("Checking if FastQC output was correctly generated")
# Check if the FastQC output was correctly generated.
with open(".status", "w") as status_fh:
for fastq in fastq_pair:
fpath = join(fastq.rsplit(".", 2)[0] + "_fastqc",
"fastqc_data.txt")
logger.debug("Checking path: {}".format(fpath))
# If the FastQC output does not exist, pass the STDERR to
# the output status channel and exit
if not exists(fpath):
logger.warning("Path does not exist: {}".format(fpath))
status_fh.write("fail")
return
logger.debug("Found path: {}".format(fpath))
# If the output directories exist, write 'pass' to the output status
# channel
status_fh.write("pass")
logger.info("Retrieving relevant FastQC output files")
# Both FastQC have been correctly executed. Get the relevant FastQC
# output files for the output channel
for i, fastq in enumerate(fastq_pair):
# Get results for each pair
fastqc_dir = fastq.rsplit(".", 2)[0] + "_fastqc"
summary_file = join(fastqc_dir, "summary.txt")
logger.debug("Retrieving summary file: {}".format(summary_file))
fastqc_data_file = join(fastqc_dir, "fastqc_data.txt")
logger.debug("Retrieving data file: {}".format(fastqc_data_file))
# Rename output files to a file name that is easier to handle in the
# output channel
os.rename(fastqc_data_file, "pair_{}_data".format(i + 1))
os.rename(summary_file, "pair_{}_summary".format(i + 1)) | python | {
"resource": ""
} |
q272814 | send_to_output | test | def send_to_output(master_dict, mash_output, sample_id, assembly_file):
"""Send dictionary to output json file
This function sends master_dict dictionary to a json file if master_dict is
populated with entries, otherwise it won't create the file
Parameters
----------
master_dict: dict
dictionary that stores all entries for a specific query sequence
in multi-fasta given to mash dist as input against patlas database
last_seq: str
string that stores the last sequence that was parsed before writing to
file and therefore after the change of query sequence between different
rows on the input file
mash_output: str
the name/path of input file to main function, i.e., the name/path of
the mash dist output txt file.
sample_id: str
The name of the sample being parse to .report.json file
Returns
-------
"""
plot_dict = {}
# create a new file only if master_dict is populated
if master_dict:
out_file = open("{}.json".format(
"".join(mash_output.split(".")[0])), "w")
out_file.write(json.dumps(master_dict))
out_file.close()
# iterate through master_dict in order to make contigs the keys
for k,v in master_dict.items():
if not v[2] in plot_dict:
plot_dict[v[2]] = [k]
else:
plot_dict[v[2]].append(k)
number_hits = len(master_dict)
else:
number_hits = 0
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mash Dist",
"table": "plasmids",
"patlas_mashdist": master_dict,
"value": number_hits
}]
}],
"plotData": [{
"sample": sample_id,
"data": {
"patlasMashDistXrange": plot_dict
},
"assemblyFile": assembly_file
}]
}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":"))) | python | {
"resource": ""
} |
q272815 | main | test | def main(mash_output, hash_cutoff, sample_id, assembly_file):
"""
Main function that allows to dump a mash dist txt file to a json file
Parameters
----------
mash_output: str
A string with the input file.
hash_cutoff: str
the percentage cutoff for the percentage of shared hashes between query
and plasmid in database that is allowed for the plasmid to be reported
to the results outputs
sample_id: str
The name of the sample.
"""
input_f = open(mash_output, "r")
master_dict = {}
for line in input_f:
tab_split = line.split("\t")
current_seq = tab_split[1].strip()
ref_accession = "_".join(tab_split[0].strip().split("_")[0:3])
mash_dist = tab_split[2].strip()
hashes_list = tab_split[-1].strip().split("/")
# creates a percentage of the shared hashes between the sample and the
# reference
perc_hashes = float(hashes_list[0]) / float(hashes_list[1])
# if ref_accession already in dict, i.e., if the same accession number
# matches more than one contig.
if ref_accession in master_dict.keys():
current_seq += ", {}".format(master_dict[ref_accession][-1])
# assures that only the hashes with a given shared percentage are
# reported to json file
if perc_hashes > float(hash_cutoff):
master_dict[ref_accession] = [
round(1 - float(mash_dist), 2),
round(perc_hashes, 2),
current_seq
]
# assures that file is closed in last iteration of the loop
send_to_output(master_dict, mash_output, sample_id, assembly_file) | python | {
"resource": ""
} |
q272816 | MainWrapper.build_versions | test | def build_versions(self):
"""Writes versions JSON for a template file
This method creates the JSON file ``.versions`` based on the metadata
and specific functions that are present in a given template script.
It starts by fetching the template metadata, which can be specified
via the ``__version__``, ``__template__`` and ``__build__``
attributes. If all of these attributes exist, it starts to populate
a JSON/dict array (Note that the absence of any one of them will
prevent the version from being written).
Then, it will search the
template scope for functions that start with the substring
``__set_version`` (For example ``def __set_version_fastqc()`).
These functions should gather the version of
an arbitrary program and return a JSON/dict object with the following
information::
{
"program": <program_name>,
"version": <version>
"build": <build>
}
This JSON/dict object is then written in the ``.versions`` file.
"""
version_storage = []
template_version = self.context.get("__version__", None)
template_program = self.context.get("__template__", None)
template_build = self.context.get("__build__", None)
if template_version and template_program and template_build:
if self.logger:
self.logger.debug("Adding template version: {}; {}; "
"{}".format(template_program,
template_version,
template_build))
version_storage.append({
"program": template_program,
"version": template_version,
"build": template_build
})
for var, obj in self.context.items():
if var.startswith("__get_version"):
ver = obj()
version_storage.append(ver)
if self.logger:
self.logger.debug("Found additional software version"
"{}".format(ver))
with open(".versions", "w") as fh:
fh.write(json.dumps(version_storage, separators=(",", ":"))) | python | {
"resource": ""
} |
q272817 | main | test | def main(mash_output, sample_id):
'''
converts top results from mash screen txt output to json format
Parameters
----------
mash_output: str
this is a string that stores the path to this file, i.e, the name of
the file
sample_id: str
sample name
'''
logger.info("Reading file : {}".format(mash_output))
read_mash_output = open(mash_output)
dic = {}
median_list = []
filtered_dic = {}
logger.info("Generating dictionary and list to pre-process the final json")
for line in read_mash_output:
tab_split = line.split("\t")
identity = tab_split[0]
# shared_hashes = tab_split[1]
median_multiplicity = tab_split[2]
# p_value = tab_split[3]
query_id = tab_split[4]
# query-comment should not exist here and it is irrelevant
# here identity is what in fact interests to report to json but
# median_multiplicity also is important since it gives an rough
# estimation of the coverage depth for each plasmid.
# Plasmids should have higher coverage depth due to their increased
# copy number in relation to the chromosome.
dic[query_id] = [identity, median_multiplicity]
median_list.append(float(median_multiplicity))
output_json = open(" ".join(mash_output.split(".")[:-1]) + ".json", "w")
# median cutoff is twice the median of all median_multiplicity values
# reported by mash screen. In the case of plasmids, since the database
# has 9k entries and reads shouldn't have that many sequences it seems ok...
if len(median_list) > 0:
# this statement assures that median_list has indeed any entries
median_cutoff = median(median_list)
logger.info("Generating final json to dump to a file")
for k, v in dic.items():
# estimated copy number
copy_number = int(float(v[1]) / median_cutoff)
# assure that plasmid as at least twice the median coverage depth
if float(v[1]) > median_cutoff:
filtered_dic["_".join(k.split("_")[0:3])] = [
round(float(v[0]),2),
copy_number
]
logger.info(
"Exported dictionary has {} entries".format(len(filtered_dic)))
else:
# if no entries were found raise an error
logger.error("No matches were found using mash screen for the queried reads")
output_json.write(json.dumps(filtered_dic))
output_json.close()
json_dic = {
"tableRow": [{
"sample": sample_id,
"data": [{
"header": "Mash Screen",
"table": "plasmids",
"patlas_mashscreen": filtered_dic,
"value": len(filtered_dic)
}]
}],
}
with open(".report.json", "w") as json_report:
json_report.write(json.dumps(json_dic, separators=(",", ":"))) | python | {
"resource": ""
} |
q272818 | colored_print | test | def colored_print(msg, color_label="white_bold"):
"""
This function enables users to add a color to the print. It also enables
to pass end_char to print allowing to print several strings in the same line
in different prints.
Parameters
----------
color_string: str
The color code to pass to the function, which enables color change as
well as background color change.
msg: str
The actual text to be printed
end_char: str
The character in which each print should finish. By default it will be
"\n".
"""
if sys.stdout.encoding != "UTF-8":
msg = "".join([i if ord(i) < 128 else "" for i in msg])
# try except first looks for the color in COLORS dictionary, otherwise use
# color_label as the color.
try:
col = COLORS[color_label]
except KeyError:
col = color_label
return "\x1b[{}{}\x1b[0m".format(col, msg) | python | {
"resource": ""
} |
q272819 | procs_dict_parser | test | def procs_dict_parser(procs_dict):
"""
This function handles the dictionary of attributes of each Process class
to print to stdout lists of all the components or the components which the
user specifies in the -t flag.
Parameters
----------
procs_dict: dict
A dictionary with the class attributes for all the components (or
components that are used by the -t flag), that allow to create
both the short_list and detailed_list. Dictionary example:
{"abyss": {'input_type': 'fastq', 'output_type': 'fasta',
'dependencies': [], 'directives': {'abyss': {'cpus': 4,
'memory': '{ 5.GB * task.attempt }', 'container': 'flowcraft/abyss',
'version': '2.1.1', 'scratch': 'true'}}}
"""
logger.info(colored_print(
"\n===== L I S T O F P R O C E S S E S =====\n", "green_bold"))
#Sort to print alphabetically ordered list of processes to ease reading
procs_dict_ordered = {k: procs_dict[k] for k in sorted(procs_dict)}
for template, dict_proc_info in procs_dict_ordered.items():
template_str = "=> {}".format(template)
logger.info(colored_print(template_str, "blue_bold"))
for info in dict_proc_info:
info_str = "{}:".format(info)
if isinstance(dict_proc_info[info], list):
if not dict_proc_info[info]:
arg_msg = "None"
else:
arg_msg = ", ".join(dict_proc_info[info])
elif info == "directives":
# this is used for the "directives", which is a dict
if not dict_proc_info[info]:
# if dict is empty then add None to the message
arg_msg = "None"
else:
# otherwise fetch all template names within a component
# and all the directives for each template to a list
list_msg = ["\n {}: {}".format(
templt,
" , ".join(["{}: {}".format(dr, val)
for dr, val in drs.items()]))
for templt, drs in dict_proc_info[info].items()
]
# write list to a str
arg_msg = "".join(list_msg)
else:
arg_msg = dict_proc_info[info]
logger.info(" {} {}".format(
colored_print(info_str, "white_underline"), arg_msg
)) | python | {
"resource": ""
} |
q272820 | proc_collector | test | def proc_collector(process_map, args, pipeline_string):
"""
Function that collects all processes available and stores a dictionary of
the required arguments of each process class to be passed to
procs_dict_parser
Parameters
----------
process_map: dict
The dictionary with the Processes currently available in flowcraft
and their corresponding classes as values
args: argparse.Namespace
The arguments passed through argparser that will be access to check the
type of list to be printed
pipeline_string: str
the pipeline string
"""
arguments_list = []
# prints a detailed list of the process class arguments
if args.detailed_list:
# list of attributes to be passed to proc_collector
arguments_list += [
"input_type",
"output_type",
"description",
"dependencies",
"conflicts",
"directives"
]
# prints a short list with each process and the corresponding description
if args.short_list:
arguments_list += [
"description"
]
if arguments_list:
# dict to store only the required entries
procs_dict = {}
# loops between all process_map Processes
for name, cls in process_map.items():
# instantiates each Process class
cls_inst = cls(template=name)
# checks if recipe is provided
if pipeline_string:
if name not in pipeline_string:
continue
d = {arg_key: vars(cls_inst)[arg_key] for arg_key in
vars(cls_inst) if arg_key in arguments_list}
procs_dict[name] = d
procs_dict_parser(procs_dict)
sys.exit(0) | python | {
"resource": ""
} |
q272821 | guess_file_compression | test | def guess_file_compression(file_path, magic_dict=None):
"""Guesses the compression of an input file.
This function guesses the compression of a given file by checking for
a binary signature at the beginning of the file. These signatures are
stored in the :py:data:`MAGIC_DICT` dictionary. The supported compression
formats are gzip, bzip2 and zip. If none of the signatures in this
dictionary are found at the beginning of the file, it returns ``None``.
Parameters
----------
file_path : str
Path to input file.
magic_dict : dict, optional
Dictionary containing the signatures of the compression types. The
key should be the binary signature and the value should be the
compression format. If left ``None``, it falls back to
:py:data:`MAGIC_DICT`.
Returns
-------
file_type : str or None
If a compression type is detected, returns a string with the format.
If not, returns ``None``.
"""
if not magic_dict:
magic_dict = MAGIC_DICT
max_len = max(len(x) for x in magic_dict)
with open(file_path, "rb") as f:
file_start = f.read(max_len)
logger.debug("Binary signature start: {}".format(file_start))
for magic, file_type in magic_dict.items():
if file_start.startswith(magic):
return file_type
return None | python | {
"resource": ""
} |
q272822 | get_qual_range | test | def get_qual_range(qual_str):
""" Get range of the Unicode encode range for a given string of characters.
The encoding is determined from the result of the :py:func:`ord` built-in.
Parameters
----------
qual_str : str
Arbitrary string.
Returns
-------
x : tuple
(Minimum Unicode code, Maximum Unicode code).
"""
vals = [ord(c) for c in qual_str]
return min(vals), max(vals) | python | {
"resource": ""
} |
q272823 | get_encodings_in_range | test | def get_encodings_in_range(rmin, rmax):
""" Returns the valid encodings for a given encoding range.
The encoding ranges are stored in the :py:data:`RANGES` dictionary, with
the encoding name as a string and a list as a value containing the
phred score and a tuple with the encoding range. For a given encoding
range provided via the two first arguments, this function will return
all possible encodings and phred scores.
Parameters
----------
rmin : int
Minimum Unicode code in range.
rmax : int
Maximum Unicode code in range.
Returns
-------
valid_encodings : list
List of all possible encodings for the provided range.
valid_phred : list
List of all possible phred scores.
"""
valid_encodings = []
valid_phred = []
for encoding, (phred, (emin, emax)) in RANGES.items():
if rmin >= emin and rmax <= emax:
valid_encodings.append(encoding)
valid_phred.append(phred)
return valid_encodings, valid_phred | python | {
"resource": ""
} |
q272824 | parse_coverage_table | test | def parse_coverage_table(coverage_file):
"""Parses a file with coverage information into objects.
This function parses a TSV file containing coverage results for
all contigs in a given assembly and will build an ``OrderedDict``
with the information about their coverage and length. The length
information is actually gathered from the contig header using a
regular expression that assumes the usual header produced by Spades::
contig_len = int(re.search("length_(.+?)_", line).group(1))
Parameters
----------
coverage_file : str
Path to TSV file containing the coverage results.
Returns
-------
coverage_dict : OrderedDict
Contains the coverage and length information for each contig.
total_size : int
Total size of the assembly in base pairs.
total_cov : int
Sum of coverage values across all contigs.
"""
# Stores the correspondence between a contig and the corresponding coverage
# e.g.: {"contig_1": {"cov": 424} }
coverage_dict = OrderedDict()
# Stores the total coverage
total_cov = 0
with open(coverage_file) as fh:
for line in fh:
# Get contig and coverage
contig, cov = line.strip().split()
coverage_dict[contig] = {"cov": int(cov)}
# Add total coverage
total_cov += int(cov)
logger.debug("Processing contig '{}' with coverage '{}'"
"".format(contig, cov))
return coverage_dict, total_cov | python | {
"resource": ""
} |
q272825 | filter_assembly | test | def filter_assembly(assembly_file, minimum_coverage, coverage_info,
output_file):
"""Generates a filtered assembly file.
This function generates a filtered assembly file based on an original
assembly and a minimum coverage threshold.
Parameters
----------
assembly_file : str
Path to original assembly file.
minimum_coverage : int or float
Minimum coverage required for a contig to pass the filter.
coverage_info : OrderedDict or dict
Dictionary containing the coverage information for each contig.
output_file : str
Path where the filtered assembly file will be generated.
"""
# This flag will determine whether sequence data should be written or
# ignored because the current contig did not pass the minimum
# coverage threshold
write_flag = False
with open(assembly_file) as fh, open(output_file, "w") as out_fh:
for line in fh:
if line.startswith(">"):
# Reset write_flag
write_flag = False
# Get header of contig
header = line.strip()[1:]
# Check coverage for current contig
contig_cov = coverage_info[header]["cov"]
# If the contig coverage is above the threshold, write to
# output filtered assembly
if contig_cov >= minimum_coverage:
write_flag = True
out_fh.write(line)
elif write_flag:
out_fh.write(line) | python | {
"resource": ""
} |
q272826 | filter_bam | test | def filter_bam(coverage_info, bam_file, min_coverage, output_bam):
"""Uses Samtools to filter a BAM file according to minimum coverage
Provided with a minimum coverage value, this function will use Samtools
to filter a BAM file. This is performed to apply the same filter to
the BAM file as the one applied to the assembly file in
:py:func:`filter_assembly`.
Parameters
----------
coverage_info : OrderedDict or dict
Dictionary containing the coverage information for each contig.
bam_file : str
Path to the BAM file.
min_coverage : int
Minimum coverage required for a contig to pass the filter.
output_bam : str
Path to the generated filtered BAM file.
"""
# Get list of contigs that will be kept
contig_list = [x for x, vals in coverage_info.items()
if vals["cov"] >= min_coverage]
cli = [
"samtools",
"view",
"-bh",
"-F",
"4",
"-o",
output_bam,
"-@",
"1",
bam_file,
]
cli += contig_list
logger.debug("Runnig samtools view subprocess with command: {}".format(
cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
# Attempt to decode STDERR output from bytes. If unsuccessful, coerce to
# string
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished samtools view subprocess with STDOUT:\\n"
"======================================\\n{}".format(stdout))
logger.info("Fished samtools view subprocesswith STDERR:\\n"
"======================================\\n{}".format(stderr))
logger.info("Finished samtools view with return code: {}".format(
p.returncode))
if not p.returncode:
# Create index
cli = [
"samtools",
"index",
output_bam
]
logger.debug("Runnig samtools index subprocess with command: "
"{}".format(cli))
p = subprocess.Popen(cli, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
try:
stderr = stderr.decode("utf8")
stdout = stdout.decode("utf8")
except (UnicodeDecodeError, AttributeError):
stderr = str(stderr)
stdout = str(stdout)
logger.info("Finished samtools index subprocess with STDOUT:\\n"
"======================================\\n{}".format(
stdout))
logger.info("Fished samtools index subprocesswith STDERR:\\n"
"======================================\\n{}".format(
stderr))
logger.info("Finished samtools index with return code: {}".format(
p.returncode)) | python | {
"resource": ""
} |
q272827 | evaluate_min_coverage | test | def evaluate_min_coverage(coverage_opt, assembly_coverage, assembly_size):
""" Evaluates the minimum coverage threshold from the value provided in
the coverage_opt.
Parameters
----------
coverage_opt : str or int or float
If set to "auto" it will try to automatically determine the coverage
to 1/3 of the assembly size, to a minimum value of 10. If it set
to a int or float, the specified value will be used.
assembly_coverage : int or float
The average assembly coverage for a genome assembly. This value
is retrieved by the `:py:func:parse_coverage_table` function.
assembly_size : int
The size of the genome assembly. This value is retrieved by the
`py:func:get_assembly_size` function.
Returns
-------
x: int
Minimum coverage threshold.
"""
if coverage_opt == "auto":
# Get the 1/3 value of the current assembly coverage
min_coverage = (assembly_coverage / assembly_size) * .3
logger.info("Minimum assembly coverage automatically set to: "
"{}".format(min_coverage))
# If the 1/3 coverage is lower than 10, change it to the minimum of
# 10
if min_coverage < 10:
logger.info("Minimum assembly coverage cannot be set to lower"
" that 10. Setting to 10")
min_coverage = 10
else:
min_coverage = int(coverage_opt)
logger.info("Minimum assembly coverage manually set to: {}".format(
min_coverage))
return min_coverage | python | {
"resource": ""
} |
q272828 | get_assembly_size | test | def get_assembly_size(assembly_file):
"""Returns the number of nucleotides and the size per contig for the
provided assembly file path
Parameters
----------
assembly_file : str
Path to assembly file.
Returns
-------
assembly_size : int
Size of the assembly in nucleotides
contig_size : dict
Length of each contig (contig name as key and length as value)
"""
assembly_size = 0
contig_size = {}
header = ""
with open(assembly_file) as fh:
for line in fh:
# Skip empty lines
if line.strip() == "":
continue
if line.startswith(">"):
header = line.strip()[1:]
contig_size[header] = 0
else:
line_len = len(line.strip())
assembly_size += line_len
contig_size[header] += line_len
return assembly_size, contig_size | python | {
"resource": ""
} |
q272829 | main | test | def main(sample_id, assembly_file, coverage_file, coverage_bp_file, bam_file,
opts, gsize):
"""Main executor of the process_assembly_mapping template.
Parameters
----------
sample_id : str
Sample Identification string.
assembly_file : str
Path to assembly file in Fasta format.
coverage_file : str
Path to TSV file with coverage information for each contig.
coverage_bp_file : str
Path to TSV file with coverage information for each base.
bam_file : str
Path to BAM file.
opts : list
List of options for processing assembly mapping.
gsize : int
Expected genome size
"""
min_assembly_coverage, max_contigs = opts
logger.info("Starting assembly mapping processing")
# Get coverage info, total size and total coverage from the assembly
logger.info("Parsing coverage table")
coverage_info, a_cov = parse_coverage_table(coverage_file)
a_size, contig_size = get_assembly_size(assembly_file)
logger.info("Assembly processed with a total size of '{}' and coverage"
" of '{}'".format(a_size, a_cov))
# Get number of assembled bp after filters
logger.info("Parsing coverage per bp table")
coverage_bp_data = get_coverage_from_file(coverage_bp_file)
# Assess the minimum assembly coverage
min_coverage = evaluate_min_coverage(min_assembly_coverage, a_cov, a_size)
# Check if filtering the assembly using the provided min_coverage will
# reduce the final bp number to less than 80% of the estimated genome
# size.
# If the check below passes with True, then the filtered assembly
# is above the 80% genome size threshold.
filtered_assembly = "{}_filt.fasta".format(
os.path.splitext(assembly_file)[0])
filtered_bam = "filtered.bam"
logger.info("Checking filtered assembly")
if check_filtered_assembly(coverage_info, coverage_bp_data, min_coverage,
gsize, contig_size, int(max_contigs),
sample_id):
# Filter assembly contigs based on the minimum coverage.
logger.info("Filtered assembly passed minimum size threshold")
logger.info("Writting filtered assembly")
filter_assembly(assembly_file, min_coverage, coverage_info,
filtered_assembly)
logger.info("Filtering BAM file according to saved contigs")
filter_bam(coverage_info, bam_file, min_coverage, filtered_bam)
# Could not filter the assembly as it would drop below acceptable
# length levels. Copy the original assembly to the output assembly file
# for compliance with the output channel
else:
shutil.copy(assembly_file, filtered_assembly)
shutil.copy(bam_file, filtered_bam)
shutil.copy(bam_file + ".bai", filtered_bam + ".bai")
with open(".status", "w") as status_fh:
status_fh.write("pass") | python | {
"resource": ""
} |
q272830 | convert_camel_case | test | def convert_camel_case(name):
"""Convers a CamelCase string into a snake_case one
Parameters
----------
name : str
An arbitrary string that may be CamelCase
Returns
-------
str
The input string converted into snake_case
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() | python | {
"resource": ""
} |
q272831 | collect_process_map | test | def collect_process_map():
"""Collects Process classes and return dict mapping templates to classes
This function crawls through the components module and retrieves all
classes that inherit from the Process class. Then, it converts the name
of the classes (which should be CamelCase) to snake_case, which is used
as the template name.
Returns
-------
dict
Dictionary mapping the template name (snake_case) to the corresponding
process class.
"""
process_map = {}
prefix = "{}.".format(components.__name__)
for importer, modname, _ in pkgutil.iter_modules(components.__path__,
prefix):
_module = importer.find_module(modname).load_module(modname)
_component_classes = [
cls for cls in _module.__dict__.values() if
isinstance(cls, type) and cls.__name__ != "Process"
]
for cls in _component_classes:
process_map[convert_camel_case(cls.__name__)] = cls
return process_map | python | {
"resource": ""
} |
q272832 | main | test | def main(newick):
"""Main executor of the process_newick template.
Parameters
----------
newick : str
path to the newick file.
"""
logger.info("Starting newick file processing")
print(newick)
tree = dendropy.Tree.get(file=open(newick, 'r'), schema="newick")
tree.reroot_at_midpoint()
to_write=tree.as_string("newick").strip().replace("[&R] ", '').replace(' ', '_').replace("'", "")
with open(".report.json", "w") as json_report:
json_dic = {
"treeData": [{
"trees": [
to_write
]
}],
}
json_report.write(json.dumps(json_dic, separators=(",", ":")))
with open(".status", "w") as status_fh:
status_fh.write("pass") | python | {
"resource": ""
} |
q272833 | quickhull | test | def quickhull(sample):
""" Find data points on the convex hull of a supplied data set
Args:
sample: data points as column vectors n x d
n - number samples
d - data dimension (should be two)
Returns:
a k x d matrix containint the convex hull data points
"""
link = lambda a, b: np.concatenate((a, b[1:]))
edge = lambda a, b: np.concatenate(([a], [b]))
def dome(sample, base):
h, t = base
dists = np.dot(sample - h, np.dot(((0, -1), (1, 0)), (t - h)))
outer = np.repeat(sample, dists > 0, axis=0)
if len(outer):
pivot = sample[np.argmax(dists)]
return link(dome(outer, edge(h, pivot)),
dome(outer, edge(pivot, t)))
else:
return base
if len(sample) > 2:
axis = sample[:, 0]
base = np.take(sample, [np.argmin(axis), np.argmax(axis)], axis=0)
return link(dome(sample, base),
dome(sample, base[::-1]))
else:
return sample | python | {
"resource": ""
} |
q272834 | CHNMF._map_w_to_data | test | def _map_w_to_data(self):
""" Return data points that are most similar to basis vectors W
"""
# assign W to the next best data sample
self._Wmapped_index = vq(self.data, self.W)
self.Wmapped = np.zeros(self.W.shape)
# do not directly assign, i.e. Wdist = self.data[:,sel]
# as self might be unsorted (in non ascending order)
# -> sorting sel would screw the matching to W if
# self.data is stored as a hdf5 table (see h5py)
for i, s in enumerate(self._Wmapped_index):
self.Wmapped[:,i] = self.data[:,s] | python | {
"resource": ""
} |
q272835 | median_filter | test | def median_filter(X, M=8):
"""Median filter along the first axis of the feature matrix X."""
for i in range(X.shape[1]):
X[:, i] = filters.median_filter(X[:, i], size=M)
return X | python | {
"resource": ""
} |
q272836 | compute_gaussian_krnl | test | def compute_gaussian_krnl(M):
"""Creates a gaussian kernel following Foote's paper."""
g = signal.gaussian(M, M // 3., sym=True)
G = np.dot(g.reshape(-1, 1), g.reshape(1, -1))
G[M // 2:, :M // 2] = -G[M // 2:, :M // 2]
G[:M // 2, M // 2:] = -G[:M // 2, M // 2:]
return G | python | {
"resource": ""
} |
q272837 | compute_ssm | test | def compute_ssm(X, metric="seuclidean"):
"""Computes the self-similarity matrix of X."""
D = distance.pdist(X, metric=metric)
D = distance.squareform(D)
D /= D.max()
return 1 - D | python | {
"resource": ""
} |
q272838 | compute_nc | test | def compute_nc(X, G):
"""Computes the novelty curve from the self-similarity matrix X and
the gaussian kernel G."""
N = X.shape[0]
M = G.shape[0]
nc = np.zeros(N)
for i in range(M // 2, N - M // 2 + 1):
nc[i] = np.sum(X[i - M // 2:i + M // 2, i - M // 2:i + M // 2] * G)
# Normalize
nc += nc.min()
nc /= nc.max()
return nc | python | {
"resource": ""
} |
q272839 | gaussian_filter | test | def gaussian_filter(X, M=8, axis=0):
"""Gaussian filter along the first axis of the feature matrix X."""
for i in range(X.shape[axis]):
if axis == 1:
X[:, i] = filters.gaussian_filter(X[:, i], sigma=M / 2.)
elif axis == 0:
X[i, :] = filters.gaussian_filter(X[i, :], sigma=M / 2.)
return X | python | {
"resource": ""
} |
q272840 | compute_nc | test | def compute_nc(X):
"""Computes the novelty curve from the structural features."""
N = X.shape[0]
# nc = np.sum(np.diff(X, axis=0), axis=1) # Difference between SF's
nc = np.zeros(N)
for i in range(N - 1):
nc[i] = distance.euclidean(X[i, :], X[i + 1, :])
# Normalize
nc += np.abs(nc.min())
nc /= float(nc.max())
return nc | python | {
"resource": ""
} |
q272841 | circular_shift | test | def circular_shift(X):
"""Shifts circularly the X squre matrix in order to get a
time-lag matrix."""
N = X.shape[0]
L = np.zeros(X.shape)
for i in range(N):
L[i, :] = np.asarray([X[(i + j) % N, j] for j in range(N)])
return L | python | {
"resource": ""
} |
q272842 | embedded_space | test | def embedded_space(X, m, tau=1):
"""Time-delay embedding with m dimensions and tau delays."""
N = X.shape[0] - int(np.ceil(m))
Y = np.zeros((N, int(np.ceil(X.shape[1] * m))))
for i in range(N):
# print X[i:i+m,:].flatten().shape, w, X.shape
# print Y[i,:].shape
rem = int((m % 1) * X.shape[1]) # Reminder for float m
Y[i, :] = np.concatenate((X[i:i + int(m), :].flatten(),
X[i + int(m), :rem]))
return Y | python | {
"resource": ""
} |
q272843 | _plot_formatting | test | def _plot_formatting(title, est_file, algo_ids, last_bound, N, output_file):
"""Formats the plot with the correct axis labels, title, ticks, and
so on."""
import matplotlib.pyplot as plt
if title is None:
title = os.path.basename(est_file).split(".")[0]
plt.title(title)
plt.yticks(np.arange(0, 1, 1 / float(N)) + 1 / (float(N) * 2))
plt.gcf().subplots_adjust(bottom=0.22)
plt.gca().set_yticklabels(algo_ids)
plt.xlabel("Time (seconds)")
plt.xlim((0, last_bound))
plt.tight_layout()
if output_file is not None:
plt.savefig(output_file)
plt.show() | python | {
"resource": ""
} |
q272844 | plot_boundaries | test | def plot_boundaries(all_boundaries, est_file, algo_ids=None, title=None,
output_file=None):
"""Plots all the boundaries.
Parameters
----------
all_boundaries: list
A list of np.arrays containing the times of the boundaries, one array
for each algorithm.
est_file: str
Path to the estimated file (JSON file)
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
title : str
Title of the plot. If None, the name of the file is printed instead.
"""
import matplotlib.pyplot as plt
N = len(all_boundaries) # Number of lists of boundaries
if algo_ids is None:
algo_ids = io.get_algo_ids(est_file)
# Translate ids
for i, algo_id in enumerate(algo_ids):
algo_ids[i] = translate_ids[algo_id]
algo_ids = ["GT"] + algo_ids
figsize = (6, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, est_file, algo_ids, all_boundaries[0][-1], N,
output_file) | python | {
"resource": ""
} |
q272845 | plot_labels | test | def plot_labels(all_labels, gt_times, est_file, algo_ids=None, title=None,
output_file=None):
"""Plots all the labels.
Parameters
----------
all_labels: list
A list of np.arrays containing the labels of the boundaries, one array
for each algorithm.
gt_times: np.array
Array with the ground truth boundaries.
est_file: str
Path to the estimated file (JSON file)
algo_ids : list
List of algorithm ids to to read boundaries from.
If None, all algorithm ids are read.
title : str
Title of the plot. If None, the name of the file is printed instead.
"""
import matplotlib.pyplot as plt
N = len(all_labels) # Number of lists of labels
if algo_ids is None:
algo_ids = io.get_algo_ids(est_file)
# Translate ids
for i, algo_id in enumerate(algo_ids):
algo_ids[i] = translate_ids[algo_id]
algo_ids = ["GT"] + algo_ids
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
# To intervals
gt_inters = utils.times_to_intervals(gt_times)
# Plot labels
figsize = (6, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, labels in enumerate(all_labels):
for label, inter in zip(labels, gt_inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Draw the boundary lines
for bound in gt_times:
plt.axvline(bound, color="g")
# Format plot
_plot_formatting(title, est_file, algo_ids, gt_times[-1], N,
output_file) | python | {
"resource": ""
} |
q272846 | plot_one_track | test | def plot_one_track(file_struct, est_times, est_labels, boundaries_id, labels_id,
title=None):
"""Plots the results of one track, with ground truth if it exists."""
import matplotlib.pyplot as plt
# Set up the boundaries id
bid_lid = boundaries_id
if labels_id is not None:
bid_lid += " + " + labels_id
try:
# Read file
jam = jams.load(file_struct.ref_file)
ann = jam.search(namespace='segment_.*')[0]
ref_inters, ref_labels = ann.to_interval_values()
# To times
ref_times = utils.intervals_to_times(ref_inters)
all_boundaries = [ref_times, est_times]
all_labels = [ref_labels, est_labels]
algo_ids = ["GT", bid_lid]
except:
logging.warning("No references found in %s. Not plotting groundtruth"
% file_struct.ref_file)
all_boundaries = [est_times]
all_labels = [est_labels]
algo_ids = [bid_lid]
N = len(all_boundaries)
# Index the labels to normalize them
for i, labels in enumerate(all_labels):
all_labels[i] = mir_eval.util.index_labels(labels)[0]
# Get color map
cm = plt.get_cmap('gist_rainbow')
max_label = max(max(labels) for labels in all_labels)
figsize = (8, 4)
plt.figure(1, figsize=figsize, dpi=120, facecolor='w', edgecolor='k')
for i, boundaries in enumerate(all_boundaries):
color = "b"
if i == 0:
color = "g"
for b in boundaries:
plt.axvline(b, i / float(N), (i + 1) / float(N), color=color)
if labels_id is not None:
labels = all_labels[i]
inters = utils.times_to_intervals(boundaries)
for label, inter in zip(labels, inters):
plt.axvspan(inter[0], inter[1], ymin=i / float(N),
ymax=(i + 1) / float(N), alpha=0.6,
color=cm(label / float(max_label)))
plt.axhline(i / float(N), color="k", linewidth=1)
# Format plot
_plot_formatting(title, os.path.basename(file_struct.audio_file), algo_ids,
all_boundaries[0][-1], N, None) | python | {
"resource": ""
} |
q272847 | plot_tree | test | def plot_tree(T, res=None, title=None, cmap_id="Pastel2"):
"""Plots a given tree, containing hierarchical segmentation.
Parameters
----------
T: mir_eval.segment.tree
A tree object containing the hierarchical segmentation.
res: float
Frame-rate resolution of the tree (None to use seconds).
title: str
Title for the plot. `None` for no title.
cmap_id: str
Color Map ID
"""
import matplotlib.pyplot as plt
def round_time(t, res=0.1):
v = int(t / float(res)) * res
return v
# Get color map
cmap = plt.get_cmap(cmap_id)
# Get segments by level
level_bounds = []
for level in T.levels:
if level == "root":
continue
segments = T.get_segments_in_level(level)
level_bounds.append(segments)
# Plot axvspans for each segment
B = float(len(level_bounds))
#plt.figure(figsize=figsize)
for i, segments in enumerate(level_bounds):
labels = utils.segment_labels_to_floats(segments)
for segment, label in zip(segments, labels):
#print i, label, cmap(label)
if res is None:
start = segment.start
end = segment.end
xlabel = "Time (seconds)"
else:
start = int(round_time(segment.start, res=res) / res)
end = int(round_time(segment.end, res=res) / res)
xlabel = "Time (frames)"
plt.axvspan(start, end,
ymax=(len(level_bounds) - i) / B,
ymin=(len(level_bounds) - i - 1) / B,
facecolor=cmap(label))
# Plot labels
L = float(len(T.levels) - 1)
plt.yticks(np.linspace(0, (L - 1) / L, num=L) + 1 / L / 2.,
T.levels[1:][::-1])
plt.xlabel(xlabel)
if title is not None:
plt.title(title)
plt.gca().set_xlim([0, end]) | python | {
"resource": ""
} |
q272848 | get_feat_segments | test | def get_feat_segments(F, bound_idxs):
"""Returns a set of segments defined by the bound_idxs.
Parameters
----------
F: np.ndarray
Matrix containing the features, one feature vector per row.
bound_idxs: np.ndarray
Array with boundary indeces.
Returns
-------
feat_segments: list
List of segments, one for each boundary interval.
"""
# Make sure bound_idxs are not empty
assert len(bound_idxs) > 0, "Boundaries can't be empty"
# Make sure that boundaries are sorted
bound_idxs = np.sort(bound_idxs)
# Make sure we're not out of bounds
assert bound_idxs[0] >= 0 and bound_idxs[-1] < F.shape[0], \
"Boundaries are not correct for the given feature dimensions."
# Obtain the segments
feat_segments = []
for i in range(len(bound_idxs) - 1):
feat_segments.append(F[bound_idxs[i]:bound_idxs[i + 1], :])
return feat_segments | python | {
"resource": ""
} |
q272849 | feat_segments_to_2dfmc_max | test | def feat_segments_to_2dfmc_max(feat_segments, offset=4):
"""From a list of feature segments, return a list of 2D-Fourier Magnitude
Coefs using the maximum segment size as main size and zero pad the rest.
Parameters
----------
feat_segments: list
List of segments, one for each boundary interval.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
fmcs: np.ndarray
Tensor containing the 2D-FMC matrices, one matrix per segment.
"""
if len(feat_segments) == 0:
return []
# Get maximum segment size
max_len = max([feat_segment.shape[0] for feat_segment in feat_segments])
fmcs = []
for feat_segment in feat_segments:
# Zero pad if needed
X = np.zeros((max_len, feat_segment.shape[1]))
# Remove a set of frames in the beginning an end of the segment
if feat_segment.shape[0] <= offset or offset == 0:
X[:feat_segment.shape[0], :] = feat_segment
else:
X[:feat_segment.shape[0] - offset, :] = \
feat_segment[offset // 2:-offset // 2, :]
# Compute the 2D-FMC
try:
fmcs.append(utils2d.compute_ffmc2d(X))
except:
logging.warning("Couldn't compute the 2D Fourier Transform")
fmcs.append(np.zeros((X.shape[0] * X.shape[1]) // 2 + 1))
# Normalize
# fmcs[-1] = fmcs[-1] / float(fmcs[-1].max())
return np.asarray(fmcs) | python | {
"resource": ""
} |
q272850 | compute_similarity | test | def compute_similarity(F, bound_idxs, dirichlet=False, xmeans=False, k=5,
offset=4):
"""Main function to compute the segment similarity of file file_struct.
Parameters
----------
F: np.ndarray
Matrix containing one feature vector per row.
bound_idxs: np.ndarray
Array with the indeces of the segment boundaries.
dirichlet: boolean
Whether to use the dirichlet estimator of the number of unique labels.
xmeans: boolean
Whether to use the xmeans estimator of the number of unique labels.
k: int > 0
If the other two predictors are `False`, use fixed number of labels.
offset: int >= 0
Number of frames to ignore from beginning and end of each segment.
Returns
-------
labels_est: np.ndarray
Estimated labels, containing integer identifiers.
"""
# Get the feature segments
feat_segments = get_feat_segments(F, bound_idxs)
# Get the 2D-FMCs segments
fmcs = feat_segments_to_2dfmc_max(feat_segments, offset)
if len(fmcs) == 0:
return np.arange(len(bound_idxs) - 1)
# Compute the labels using kmeans
if dirichlet:
k_init = np.min([fmcs.shape[0], k])
# Only compute the dirichlet method if the fmc shape is small enough
if fmcs.shape[1] > 500:
labels_est = compute_labels_kmeans(fmcs, k=k)
else:
dpgmm = mixture.DPGMM(n_components=k_init, covariance_type='full')
# dpgmm = mixture.VBGMM(n_components=k_init, covariance_type='full')
dpgmm.fit(fmcs)
k = len(dpgmm.means_)
labels_est = dpgmm.predict(fmcs)
# print("Estimated with Dirichlet Process:", k)
if xmeans:
xm = XMeans(fmcs, plot=False)
k = xm.estimate_K_knee(th=0.01, maxK=8)
labels_est = compute_labels_kmeans(fmcs, k=k)
# print("Estimated with Xmeans:", k)
else:
labels_est = compute_labels_kmeans(fmcs, k=k)
return labels_est | python | {
"resource": ""
} |
q272851 | OLDA.fit | test | def fit(self, X, Y):
'''Fit the OLDA model
Parameters
----------
X : array-like, shape [n_samples]
Training data: each example is an n_features-by-* data array
Y : array-like, shape [n_samples]
Training labels: each label is an array of change-points
(eg, a list of segment boundaries)
Returns
-------
self : object
'''
# Re-initialize the scatter matrices
self.scatter_ordinal_ = None
self.scatter_within_ = None
# Reduce to partial-fit
self.partial_fit(X, Y)
return self | python | {
"resource": ""
} |
q272852 | OLDA.partial_fit | test | def partial_fit(self, X, Y):
'''Partial-fit the OLDA model
Parameters
----------
X : array-like, shape [n_samples]
Training data: each example is an n_features-by-* data array
Y : array-like, shape [n_samples]
Training labels: each label is an array of change-points
(eg, a list of segment boundaries)
Returns
-------
self : object
'''
for (xi, yi) in itertools.izip(X, Y):
prev_mean = None
prev_length = None
if self.scatter_within_ is None:
# First round: initialize
d, n = xi.shape
if yi[0] > 0:
yi = np.concatenate([np.array([0]), yi])
if yi[-1] < n:
yi = np.concatenate([yi, np.array([n])])
self.scatter_within_ = self.sigma * np.eye(d)
self.scatter_ordinal_ = np.zeros(d)
# iterate over segments
for (seg_start, seg_end) in zip(yi[:-1], yi[1:]):
seg_length = seg_end - seg_start
if seg_length < 2:
continue
seg_mean = np.mean(xi[:, seg_start:seg_end], axis=1, keepdims=True)
seg_cov = np.cov(xi[:, seg_start:seg_end])
self.scatter_within_ = self.scatter_within_ + seg_length * seg_cov
if prev_mean is not None:
diff_ord = seg_mean - (prev_length * prev_mean + seg_length * seg_mean) / (prev_length + seg_length)
self.scatter_ordinal_ = self.scatter_ordinal_ + seg_length * np.dot(diff_ord, diff_ord.T)
diff_ord = prev_mean - (prev_length * prev_mean + seg_length * seg_mean) / (prev_length + seg_length)
self.scatter_ordinal_ = self.scatter_ordinal_ + prev_length * np.dot(diff_ord, diff_ord.T)
prev_mean = seg_mean
prev_length = seg_length
e_vals, e_vecs = scipy.linalg.eig(self.scatter_ordinal_, self.scatter_within_)
self.e_vals_ = e_vals
self.e_vecs_ = e_vecs
self.components_ = e_vecs.T
return self | python | {
"resource": ""
} |
q272853 | read_references | test | def read_references(audio_path, annotator_id=0):
"""Reads the boundary times and the labels.
Parameters
----------
audio_path : str
Path to the audio file
Returns
-------
ref_times : list
List of boundary times
ref_labels : list
List of labels
Raises
------
IOError: if `audio_path` doesn't exist.
"""
# Dataset path
ds_path = os.path.dirname(os.path.dirname(audio_path))
# Read references
jam_path = os.path.join(ds_path, ds_config.references_dir,
os.path.basename(audio_path)[:-4] +
ds_config.references_ext)
jam = jams.load(jam_path, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inters, ref_labels = ann.to_interval_values()
# Intervals to times
ref_times = utils.intervals_to_times(ref_inters)
return ref_times, ref_labels | python | {
"resource": ""
} |
q272854 | find_estimation | test | def find_estimation(jam, boundaries_id, labels_id, params):
"""Finds the correct estimation from all the estimations contained in a
JAMS file given the specified arguments.
Parameters
----------
jam : jams.JAMS
JAMS object.
boundaries_id : str
Identifier of the algorithm used to compute the boundaries.
labels_id : str
Identifier of the algorithm used to compute the labels.
params : dict
Additional search parameters. E.g. {"feature" : "pcp"}.
Returns
-------
ann : jams.Annotation
Found estimation.
`None` if it couldn't be found.
"""
# Use handy JAMS search interface
namespace = "multi_segment" if params["hier"] else "segment_open"
# TODO: This is a workaround to issue in JAMS. Should be
# resolved in JAMS 0.2.3, but for now, this works too.
ann = jam.search(namespace=namespace).\
search(**{"Sandbox.boundaries_id": boundaries_id}).\
search(**{"Sandbox.labels_id": lambda x:
(isinstance(x, six.string_types) and
re.match(labels_id, x) is not None) or x is None})
for key, val in zip(params.keys(), params.values()):
if isinstance(val, six.string_types):
ann = ann.search(**{"Sandbox.%s" % key: val})
else:
ann = ann.search(**{"Sandbox.%s" % key: lambda x: x == val})
# Check estimations found
if len(ann) > 1:
logging.warning("More than one estimation with same parameters.")
if len(ann) > 0:
ann = ann[0]
# If we couldn't find anything, let's return None
if not ann:
ann = None
return ann | python | {
"resource": ""
} |
q272855 | save_estimations | test | def save_estimations(file_struct, times, labels, boundaries_id, labels_id,
**params):
"""Saves the segment estimations in a JAMS file.
Parameters
----------
file_struct : FileStruct
Object with the different file paths of the current file.
times : np.array or list
Estimated boundary times.
If `list`, estimated hierarchical boundaries.
labels : np.array(N, 2)
Estimated labels (None in case we are only storing boundary
evaluations).
boundaries_id : str
Boundary algorithm identifier.
labels_id : str
Labels algorithm identifier.
params : dict
Dictionary with additional parameters for both algorithms.
"""
# Remove features if they exist
params.pop("features", None)
# Get duration
dur = get_duration(file_struct.features_file)
# Convert to intervals and sanity check
if 'numpy' in str(type(times)):
# Flat check
inters = utils.times_to_intervals(times)
assert len(inters) == len(labels), "Number of boundary intervals " \
"(%d) and labels (%d) do not match" % (len(inters), len(labels))
# Put into lists to simplify the writing process later
inters = [inters]
labels = [labels]
else:
# Hierarchical check
inters = []
for level in range(len(times)):
est_inters = utils.times_to_intervals(times[level])
inters.append(est_inters)
assert len(inters[level]) == len(labels[level]), \
"Number of boundary intervals (%d) and labels (%d) do not " \
"match in level %d" % (len(inters[level]), len(labels[level]),
level)
# Create new estimation
namespace = "multi_segment" if params["hier"] else "segment_open"
ann = jams.Annotation(namespace=namespace)
# Find estimation in file
if os.path.isfile(file_struct.est_file):
jam = jams.load(file_struct.est_file, validate=False)
curr_ann = find_estimation(jam, boundaries_id, labels_id, params)
if curr_ann is not None:
curr_ann.data = ann.data # cleanup all data
ann = curr_ann # This will overwrite the existing estimation
else:
jam.annotations.append(ann)
else:
# Create new JAMS if it doesn't exist
jam = jams.JAMS()
jam.file_metadata.duration = dur
jam.annotations.append(ann)
# Save metadata and parameters
ann.annotation_metadata.version = msaf.__version__
ann.annotation_metadata.data_source = "MSAF"
sandbox = {}
sandbox["boundaries_id"] = boundaries_id
sandbox["labels_id"] = labels_id
sandbox["timestamp"] = \
datetime.datetime.today().strftime("%Y/%m/%d %H:%M:%S")
for key in params:
sandbox[key] = params[key]
ann.sandbox = sandbox
# Save actual data
for i, (level_inters, level_labels) in enumerate(zip(inters, labels)):
for bound_inter, label in zip(level_inters, level_labels):
dur = float(bound_inter[1]) - float(bound_inter[0])
label = chr(int(label) + 65)
if params["hier"]:
value = {"label": label, "level": i}
else:
value = label
ann.append(time=bound_inter[0], duration=dur,
value=value)
# Write results
jam.save(file_struct.est_file) | python | {
"resource": ""
} |
q272856 | get_all_boundary_algorithms | test | def get_all_boundary_algorithms():
"""Gets all the possible boundary algorithms in MSAF.
Returns
-------
algo_ids : list
List of all the IDs of boundary algorithms (strings).
"""
algo_ids = []
for name in msaf.algorithms.__all__:
module = eval(msaf.algorithms.__name__ + "." + name)
if module.is_boundary_type:
algo_ids.append(module.algo_id)
return algo_ids | python | {
"resource": ""
} |
q272857 | get_configuration | test | def get_configuration(feature, annot_beats, framesync, boundaries_id,
labels_id):
"""Gets the configuration dictionary from the current parameters of the
algorithms to be evaluated."""
config = {}
config["annot_beats"] = annot_beats
config["feature"] = feature
config["framesync"] = framesync
bound_config = {}
if boundaries_id != "gt":
bound_config = \
eval(msaf.algorithms.__name__ + "." + boundaries_id).config
config.update(bound_config)
if labels_id is not None:
label_config = \
eval(msaf.algorithms.__name__ + "." + labels_id).config
# Make sure we don't have parameter name duplicates
if labels_id != boundaries_id:
overlap = set(bound_config.keys()). \
intersection(set(label_config.keys()))
assert len(overlap) == 0, \
"Parameter %s must not exist both in %s and %s algorithms" % \
(overlap, boundaries_id, labels_id)
config.update(label_config)
return config | python | {
"resource": ""
} |
q272858 | get_dataset_files | test | def get_dataset_files(in_path):
"""Gets the files of the given dataset."""
# Get audio files
audio_files = []
for ext in ds_config.audio_exts:
audio_files += glob.glob(
os.path.join(in_path, ds_config.audio_dir, "*" + ext))
# Make sure directories exist
utils.ensure_dir(os.path.join(in_path, ds_config.features_dir))
utils.ensure_dir(os.path.join(in_path, ds_config.estimations_dir))
utils.ensure_dir(os.path.join(in_path, ds_config.references_dir))
# Get the file structs
file_structs = []
for audio_file in audio_files:
file_structs.append(FileStruct(audio_file))
# Sort by audio file name
file_structs = sorted(file_structs,
key=lambda file_struct: file_struct.audio_file)
return file_structs | python | {
"resource": ""
} |
q272859 | read_hier_references | test | def read_hier_references(jams_file, annotation_id=0, exclude_levels=[]):
"""Reads hierarchical references from a jams file.
Parameters
----------
jams_file : str
Path to the jams file.
annotation_id : int > 0
Identifier of the annotator to read from.
exclude_levels: list
List of levels to exclude. Empty list to include all levels.
Returns
-------
hier_bounds : list
List of the segment boundary times in seconds for each level.
hier_labels : list
List of the segment labels for each level.
hier_levels : list
List of strings for the level identifiers.
"""
hier_bounds = []
hier_labels = []
hier_levels = []
jam = jams.load(jams_file)
namespaces = ["segment_salami_upper", "segment_salami_function",
"segment_open", "segment_tut", "segment_salami_lower"]
# Remove levels if needed
for exclude in exclude_levels:
if exclude in namespaces:
namespaces.remove(exclude)
# Build hierarchy references
for ns in namespaces:
ann = jam.search(namespace=ns)
if not ann:
continue
ref_inters, ref_labels = ann[annotation_id].to_interval_values()
hier_bounds.append(utils.intervals_to_times(ref_inters))
hier_labels.append(ref_labels)
hier_levels.append(ns)
return hier_bounds, hier_labels, hier_levels | python | {
"resource": ""
} |
q272860 | get_duration | test | def get_duration(features_file):
"""Reads the duration of a given features file.
Parameters
----------
features_file: str
Path to the JSON file containing the features.
Returns
-------
dur: float
Duration of the analyzed file.
"""
with open(features_file) as f:
feats = json.load(f)
return float(feats["globals"]["dur"]) | python | {
"resource": ""
} |
q272861 | write_mirex | test | def write_mirex(times, labels, out_file):
"""Writes results to file using the standard MIREX format.
Parameters
----------
times: np.array
Times in seconds of the boundaries.
labels: np.array
Labels associated to the segments defined by the boundaries.
out_file: str
Output file path to save the results.
"""
inters = msaf.utils.times_to_intervals(times)
assert len(inters) == len(labels)
out_str = ""
for inter, label in zip(inters, labels):
out_str += "%.3f\t%.3f\t%s\n" % (inter[0], inter[1], label)
with open(out_file, "w") as f:
f.write(out_str[:-1]) | python | {
"resource": ""
} |
q272862 | FileStruct._get_dataset_file | test | def _get_dataset_file(self, dir, ext):
"""Gets the desired dataset file."""
audio_file_ext = "." + self.audio_file.split(".")[-1]
base_file = os.path.basename(self.audio_file).replace(
audio_file_ext, ext)
return os.path.join(self.ds_path, dir, base_file) | python | {
"resource": ""
} |
q272863 | align_segmentation | test | def align_segmentation(beat_times, song):
'''Load a ground-truth segmentation, and align times to the nearest
detected beats.
Arguments:
beat_times -- array
song -- path to the audio file
Returns:
segment_beats -- array
beat-aligned segment boundaries
segment_times -- array
true segment times
segment_labels -- array
list of segment labels
'''
try:
segment_times, segment_labels = msaf.io.read_references(song)
except:
return None, None, None
segment_times = np.asarray(segment_times)
# Map to intervals
segment_intervals = msaf.utils.times_to_intervals(segment_times)
# Map beats to intervals
beat_intervals = np.asarray(zip(beat_times[:-1], beat_times[1:]))
# Map beats to segments
beat_segment_ids = librosa.util.match_intervals(beat_intervals,
segment_intervals)
segment_beats = []
segment_times_out = []
segment_labels_out = []
# print segment_times, beat_segment_ids, len(beat_times),
# len(beat_segment_ids)
for i in range(segment_times.shape[0]):
hits = np.argwhere(beat_segment_ids == i)
if len(hits) > 0 and i < len(segment_intervals) and \
i < len(segment_labels):
segment_beats.extend(hits[0])
segment_times_out.append(segment_intervals[i, :])
segment_labels_out.append(segment_labels[i])
# Pull out the segment start times
segment_beats = list(segment_beats)
# segment_times_out = np.asarray(
# segment_times_out)[:, 0].squeeze().reshape((-1, 1))
# if segment_times_out.ndim == 0:
# segment_times_out = segment_times_out[np.newaxis]
segment_times_out = segment_times
return segment_beats, segment_times_out, segment_labels_out | python | {
"resource": ""
} |
q272864 | Features.estimate_beats | test | def estimate_beats(self):
"""Estimates the beats using librosa.
Returns
-------
times: np.array
Times of estimated beats in seconds.
frames: np.array
Frame indeces of estimated beats.
"""
# Compute harmonic-percussive source separation if needed
if self._audio_percussive is None:
self._audio_harmonic, self._audio_percussive = self.compute_HPSS()
# Compute beats
tempo, frames = librosa.beat.beat_track(
y=self._audio_percussive, sr=self.sr,
hop_length=self.hop_length)
# To times
times = librosa.frames_to_time(frames, sr=self.sr,
hop_length=self.hop_length)
# TODO: Is this really necessary?
if len(times) > 0 and times[0] == 0:
times = times[1:]
frames = frames[1:]
return times, frames | python | {
"resource": ""
} |
q272865 | Features.read_ann_beats | test | def read_ann_beats(self):
"""Reads the annotated beats if available.
Returns
-------
times: np.array
Times of annotated beats in seconds.
frames: np.array
Frame indeces of annotated beats.
"""
times, frames = (None, None)
# Read annotations if they exist in correct folder
if os.path.isfile(self.file_struct.ref_file):
try:
jam = jams.load(self.file_struct.ref_file)
except TypeError:
logging.warning(
"Can't read JAMS file %s. Maybe it's not "
"compatible with current JAMS version?" %
self.file_struct.ref_file)
return times, frames
beat_annot = jam.search(namespace="beat.*")
# If beat annotations exist, get times and frames
if len(beat_annot) > 0:
beats_inters, _ = beat_annot[0].to_interval_values()
times = beats_inters[:, 0]
frames = librosa.time_to_frames(times, sr=self.sr,
hop_length=self.hop_length)
return times, frames | python | {
"resource": ""
} |
q272866 | Features.compute_beat_sync_features | test | def compute_beat_sync_features(self, beat_frames, beat_times, pad):
"""Make the features beat-synchronous.
Parameters
----------
beat_frames: np.array
The frame indeces of the beat positions.
beat_times: np.array
The time points of the beat positions (in seconds).
pad: boolean
If `True`, `beat_frames` is padded to span the full range.
Returns
-------
beatsync_feats: np.array
The beat-synchronized features.
`None` if the beat_frames was `None`.
beatsync_times: np.array
The beat-synchronized times.
`None` if the beat_frames was `None`.
"""
if beat_frames is None:
return None, None
# Make beat synchronous
beatsync_feats = librosa.util.utils.sync(self._framesync_features.T,
beat_frames, pad=pad).T
# Assign times (and add last time if padded)
beatsync_times = np.copy(beat_times)
if beatsync_times.shape[0] != beatsync_feats.shape[0]:
beatsync_times = np.concatenate((beatsync_times,
[self._framesync_times[-1]]))
return beatsync_feats, beatsync_times | python | {
"resource": ""
} |
q272867 | Features.read_features | test | def read_features(self, tol=1e-3):
"""Reads the features from a file and stores them in the current
object.
Parameters
----------
tol: float
Tolerance level to detect duration of audio.
"""
try:
# Read JSON file
with open(self.file_struct.features_file) as f:
feats = json.load(f)
# Store duration
if self.dur is None:
self.dur = float(feats["globals"]["dur"])
# Check that we have the correct global parameters
assert(np.isclose(
self.dur, float(feats["globals"]["dur"]), rtol=tol))
assert(self.sr == int(feats["globals"]["sample_rate"]))
assert(self.hop_length == int(feats["globals"]["hop_length"]))
assert(os.path.basename(self.file_struct.audio_file) ==
os.path.basename(feats["globals"]["audio_file"]))
# Check for specific features params
feat_params_err = FeatureParamsError(
"Couldn't find features for %s id in file %s" %
(self.get_id(), self.file_struct.features_file))
if self.get_id() not in feats.keys():
raise feat_params_err
for param_name in self.get_param_names():
value = getattr(self, param_name)
if hasattr(value, '__call__'):
# Special case of functions
if value.__name__ != \
feats[self.get_id()]["params"][param_name]:
raise feat_params_err
else:
if str(value) != \
feats[self.get_id()]["params"][param_name]:
raise feat_params_err
# Store actual features
self._est_beats_times = np.array(feats["est_beats"])
self._est_beatsync_times = np.array(feats["est_beatsync_times"])
self._est_beats_frames = librosa.core.time_to_frames(
self._est_beats_times, sr=self.sr, hop_length=self.hop_length)
self._framesync_features = \
np.array(feats[self.get_id()]["framesync"])
self._est_beatsync_features = \
np.array(feats[self.get_id()]["est_beatsync"])
# Read annotated beats if available
if "ann_beats" in feats.keys():
self._ann_beats_times = np.array(feats["ann_beats"])
self._ann_beatsync_times = np.array(feats["ann_beatsync_times"])
self._ann_beats_frames = librosa.core.time_to_frames(
self._ann_beats_times, sr=self.sr,
hop_length=self.hop_length)
self._ann_beatsync_features = \
np.array(feats[self.get_id()]["ann_beatsync"])
except KeyError:
raise WrongFeaturesFormatError(
"The features file %s is not correctly formatted" %
self.file_struct.features_file)
except AssertionError:
raise FeaturesNotFound(
"The features for the given parameters were not found in "
"features file %s" % self.file_struct.features_file)
except IOError:
raise NoFeaturesFileError("Could not find features file %s",
self.file_struct.features_file) | python | {
"resource": ""
} |
q272868 | Features.write_features | test | def write_features(self):
"""Saves features to file."""
out_json = collections.OrderedDict()
try:
# Only save the necessary information
self.read_features()
except (WrongFeaturesFormatError, FeaturesNotFound,
NoFeaturesFileError):
# We need to create the file or overwite it
# Metadata
out_json = collections.OrderedDict({"metadata": {
"versions": {"librosa": librosa.__version__,
"msaf": msaf.__version__,
"numpy": np.__version__},
"timestamp": datetime.datetime.today().strftime(
"%Y/%m/%d %H:%M:%S")}})
# Global parameters
out_json["globals"] = {
"dur": self.dur,
"sample_rate": self.sr,
"hop_length": self.hop_length,
"audio_file": self.file_struct.audio_file
}
# Beats
out_json["est_beats"] = self._est_beats_times.tolist()
out_json["est_beatsync_times"] = self._est_beatsync_times.tolist()
if self._ann_beats_times is not None:
out_json["ann_beats"] = self._ann_beats_times.tolist()
out_json["ann_beatsync_times"] = self._ann_beatsync_times.tolist()
except FeatureParamsError:
# We have other features in the file, simply add these ones
with open(self.file_struct.features_file) as f:
out_json = json.load(f)
finally:
# Specific parameters of the current features
out_json[self.get_id()] = {}
out_json[self.get_id()]["params"] = {}
for param_name in self.get_param_names():
value = getattr(self, param_name)
# Check for special case of functions
if hasattr(value, '__call__'):
value = value.__name__
else:
value = str(value)
out_json[self.get_id()]["params"][param_name] = value
# Actual features
out_json[self.get_id()]["framesync"] = \
self._framesync_features.tolist()
out_json[self.get_id()]["est_beatsync"] = \
self._est_beatsync_features.tolist()
if self._ann_beatsync_features is not None:
out_json[self.get_id()]["ann_beatsync"] = \
self._ann_beatsync_features.tolist()
# Save it
with open(self.file_struct.features_file, "w") as f:
json.dump(out_json, f, indent=2) | python | {
"resource": ""
} |
q272869 | Features.get_param_names | test | def get_param_names(self):
"""Returns the parameter names for these features, avoiding
the global parameters."""
return [name for name in vars(self) if not name.startswith('_') and
name not in self._global_param_names] | python | {
"resource": ""
} |
q272870 | Features._compute_framesync_times | test | def _compute_framesync_times(self):
"""Computes the framesync times based on the framesync features."""
self._framesync_times = librosa.core.frames_to_time(
np.arange(self._framesync_features.shape[0]), self.sr,
self.hop_length) | python | {
"resource": ""
} |
q272871 | Features.frame_times | test | def frame_times(self):
"""This getter returns the frame times, for the corresponding type of
features."""
frame_times = None
# Make sure we have already computed the features
self.features
if self.feat_type is FeatureTypes.framesync:
self._compute_framesync_times()
frame_times = self._framesync_times
elif self.feat_type is FeatureTypes.est_beatsync:
frame_times = self._est_beatsync_times
elif self.feat_type is FeatureTypes.ann_beatsync:
frame_times = self._ann_beatsync_times
return frame_times | python | {
"resource": ""
} |
q272872 | Features.features | test | def features(self):
"""This getter will compute the actual features if they haven't
been computed yet.
Returns
-------
features: np.array
The actual features. Each row corresponds to a feature vector.
"""
# Compute features if needed
if self._features is None:
try:
self.read_features()
except (NoFeaturesFileError, FeaturesNotFound,
WrongFeaturesFormatError, FeatureParamsError) as e:
try:
self._compute_all_features()
self.write_features()
except IOError:
if isinstance(e, FeaturesNotFound) or \
isinstance(e, FeatureParamsError):
msg = "Computation of the features is needed for " \
"current parameters but no audio file was found." \
"Please, change your parameters or add the audio" \
" file in %s"
else:
msg = "Couldn't find audio file in %s"
raise NoAudioFileError(msg % self.file_struct.audio_file)
# Choose features based on type
if self.feat_type is FeatureTypes.framesync:
self._features = self._framesync_features
elif self.feat_type is FeatureTypes.est_beatsync:
self._features = self._est_beatsync_features
elif self.feat_type is FeatureTypes.ann_beatsync:
if self._ann_beatsync_features is None:
raise FeatureTypeNotFound(
"Feature type %s is not valid because no annotated beats "
"were found" % self.feat_type)
self._features = self._ann_beatsync_features
else:
raise FeatureTypeNotFound("Feature type %s is not valid." %
self.feat_type)
return self._features | python | {
"resource": ""
} |
q272873 | Features.select_features | test | def select_features(cls, features_id, file_struct, annot_beats, framesync):
"""Selects the features from the given parameters.
Parameters
----------
features_id: str
The identifier of the features (it must be a key inside the
`features_registry`)
file_struct: msaf.io.FileStruct
The file struct containing the files to extract the features from
annot_beats: boolean
Whether to use annotated (`True`) or estimated (`False`) beats
framesync: boolean
Whether to use framesync (`True`) or beatsync (`False`) features
Returns
-------
features: obj
The actual features object that inherits from `msaf.Features`
"""
if not annot_beats and framesync:
feat_type = FeatureTypes.framesync
elif annot_beats and not framesync:
feat_type = FeatureTypes.ann_beatsync
elif not annot_beats and not framesync:
feat_type = FeatureTypes.est_beatsync
else:
raise FeatureTypeNotFound("Type of features not valid.")
# Select features with default parameters
if features_id not in features_registry.keys():
raise FeaturesNotFound(
"The features '%s' are invalid (valid features are %s)"
% (features_id, features_registry.keys()))
return features_registry[features_id](file_struct, feat_type) | python | {
"resource": ""
} |
q272874 | SegmenterInterface._preprocess | test | def _preprocess(self, valid_features=["pcp", "tonnetz", "mfcc",
"cqt", "tempogram"]):
"""This method obtains the actual features."""
# Use specific feature
if self.feature_str not in valid_features:
raise RuntimeError("Feature %s in not valid for algorithm: %s "
"(valid features are %s)." %
(self.feature_str, __name__, valid_features))
else:
try:
F = self.features.features
except KeyError:
raise RuntimeError("Feature %s in not supported by MSAF" %
(self.feature_str))
return F | python | {
"resource": ""
} |
q272875 | SegmenterInterface._postprocess | test | def _postprocess(self, est_idxs, est_labels):
"""Post processes the estimations from the algorithm, removing empty
segments and making sure the lenghts of the boundaries and labels
match."""
# Make sure we are using the previously input bounds, if any
if self.in_bound_idxs is not None:
F = self._preprocess()
est_labels = U.synchronize_labels(self.in_bound_idxs, est_idxs,
est_labels, F.shape[0])
est_idxs = self.in_bound_idxs
# Remove empty segments if needed
est_idxs, est_labels = U.remove_empty_segments(est_idxs, est_labels)
assert len(est_idxs) - 1 == len(est_labels), "Number of boundaries " \
"(%d) and number of labels(%d) don't match" % (len(est_idxs),
len(est_labels))
# Make sure the indeces are integers
est_idxs = np.asarray(est_idxs, dtype=int)
return est_idxs, est_labels | python | {
"resource": ""
} |
q272876 | main | test | def main():
"""Main function to sweep parameters of a certain algorithm."""
parser = argparse.ArgumentParser(
description="Runs the speficied algorithm(s) on the MSAF "
"formatted dataset.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("in_path",
action="store",
help="Input dataset")
parser.add_argument("-f",
action="store",
dest="feature",
default="pcp",
type=str,
help="Type of features",
choices=["pcp", "tonnetz", "mfcc", "cqt", "tempogram"])
parser.add_argument("-b",
action="store_true",
dest="annot_beats",
help="Use annotated beats",
default=False)
parser.add_argument("-fs",
action="store_true",
dest="framesync",
help="Use frame-synchronous features",
default=False)
parser.add_argument("-bid",
action="store",
help="Boundary algorithm identifier",
dest="boundaries_id",
default="gt",
choices=["gt"] +
io.get_all_boundary_algorithms())
parser.add_argument("-lid",
action="store",
help="Label algorithm identifier",
dest="labels_id",
default=None,
choices=io.get_all_label_algorithms())
parser.add_argument("-j",
action="store",
dest="n_jobs",
default=4,
type=int,
help="The number of threads to use")
args = parser.parse_args()
start_time = time.time()
# Run the algorithm(s)
process(args.in_path, annot_beats=args.annot_beats, feature=args.feature,
framesync=args.framesync, boundaries_id=args.boundaries_id,
labels_id=args.labels_id, n_jobs=args.n_jobs)
# Done!
logging.info("Done! Took %.2f seconds." % (time.time() - start_time)) | python | {
"resource": ""
} |
q272877 | print_results | test | def print_results(results):
"""Print all the results.
Parameters
----------
results: pd.DataFrame
Dataframe with all the results
"""
if len(results) == 0:
logging.warning("No results to print!")
return
res = results.mean()
logging.info("Results:\n%s" % res) | python | {
"resource": ""
} |
q272878 | compute_gt_results | test | def compute_gt_results(est_file, ref_file, boundaries_id, labels_id, config,
bins=251, annotator_id=0):
"""Computes the results by using the ground truth dataset identified by
the annotator parameter.
Return
------
results : dict
Dictionary of the results (see function compute_results).
"""
if config["hier"]:
ref_times, ref_labels, ref_levels = \
msaf.io.read_hier_references(
ref_file, annotation_id=annotator_id,
exclude_levels=["segment_salami_function"])
else:
jam = jams.load(ref_file, validate=False)
ann = jam.search(namespace='segment_.*')[annotator_id]
ref_inter, ref_labels = ann.to_interval_values()
# Read estimations with correct configuration
est_inter, est_labels = io.read_estimations(est_file, boundaries_id,
labels_id, **config)
# Compute the results and return
logging.info("Evaluating %s" % os.path.basename(est_file))
if config["hier"]:
# Hierarchical
assert len(est_inter) == len(est_labels), "Same number of levels " \
"are required in the boundaries and labels for the hierarchical " \
"evaluation."
est_times = []
est_labels = []
# Sort based on how many segments per level
est_inter = sorted(est_inter, key=lambda level: len(level))
for inter in est_inter:
est_times.append(msaf.utils.intervals_to_times(inter))
# Add fake labels (hierarchical eval does not use labels --yet--)
est_labels.append(np.ones(len(est_times[-1]) - 1) * -1)
# Align the times
utils.align_end_hierarchies(est_times, ref_times, thres=1)
# To intervals
est_hier = [utils.times_to_intervals(times) for times in est_times]
ref_hier = [utils.times_to_intervals(times) for times in ref_times]
# Compute evaluations
res = {}
res["t_recall10"], res["t_precision10"], res["t_measure10"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=10)
res["t_recall15"], res["t_precision15"], res["t_measure15"] = \
mir_eval.hierarchy.tmeasure(ref_hier, est_hier, window=15)
res["track_id"] = os.path.basename(est_file)[:-5]
return res
else:
# Flat
return compute_results(ref_inter, est_inter, ref_labels, est_labels,
bins, est_file) | python | {
"resource": ""
} |
q272879 | compute_information_gain | test | def compute_information_gain(ann_inter, est_inter, est_file, bins):
"""Computes the information gain of the est_file from the annotated
intervals and the estimated intervals."""
ann_times = utils.intervals_to_times(ann_inter)
est_times = utils.intervals_to_times(est_inter)
return mir_eval.beat.information_gain(ann_times, est_times, bins=bins) | python | {
"resource": ""
} |
q272880 | process_track | test | def process_track(file_struct, boundaries_id, labels_id, config,
annotator_id=0):
"""Processes a single track.
Parameters
----------
file_struct : object (FileStruct) or str
File struct or full path of the audio file to be evaluated.
boundaries_id : str
Identifier of the boundaries algorithm.
labels_id : str
Identifier of the labels algorithm.
config : dict
Configuration of the algorithms to be evaluated.
annotator_id : int
Number identifiying the annotator.
Returns
-------
one_res : dict
Dictionary of the results (see function compute_results).
"""
# Convert to file_struct if string is passed
if isinstance(file_struct, six.string_types):
file_struct = io.FileStruct(file_struct)
est_file = file_struct.est_file
ref_file = file_struct.ref_file
# Sanity check
assert os.path.basename(est_file)[:-4] == \
os.path.basename(ref_file)[:-4], "File names are different %s --- %s" \
% (os.path.basename(est_file)[:-4], os.path.basename(ref_file)[:-4])
if not os.path.isfile(ref_file):
raise NoReferencesError("Reference file %s does not exist. You must "
"have annotated references to run "
"evaluations." % ref_file)
one_res = compute_gt_results(est_file, ref_file, boundaries_id, labels_id,
config, annotator_id=annotator_id)
return one_res | python | {
"resource": ""
} |
q272881 | get_results_file_name | test | def get_results_file_name(boundaries_id, labels_id, config,
annotator_id):
"""Based on the config and the dataset, get the file name to store the
results."""
utils.ensure_dir(msaf.config.results_dir)
file_name = os.path.join(msaf.config.results_dir, "results")
file_name += "_boundsE%s_labelsE%s" % (boundaries_id, labels_id)
file_name += "_annotatorE%d" % (annotator_id)
sorted_keys = sorted(config.keys(), key=str.lower)
for key in sorted_keys:
file_name += "_%sE%s" % (key, str(config[key]).replace("/", "_"))
# Check for max file length
if len(file_name) > 255 - len(msaf.config.results_ext):
file_name = file_name[:255 - len(msaf.config.results_ext)]
return file_name + msaf.config.results_ext | python | {
"resource": ""
} |
q272882 | process | test | def process(in_path, boundaries_id=msaf.config.default_bound_id,
labels_id=msaf.config.default_label_id, annot_beats=False,
framesync=False, feature="pcp", hier=False, save=False,
out_file=None, n_jobs=4, annotator_id=0, config=None):
"""Main process to evaluate algorithms' results.
Parameters
----------
in_path : str
Path to the dataset root folder.
boundaries_id : str
Boundaries algorithm identifier (e.g. siplca, cnmf)
labels_id : str
Labels algorithm identifier (e.g. siplca, cnmf)
ds_name : str
Name of the dataset to be evaluated (e.g. SALAMI). * stands for all.
annot_beats : boolean
Whether to use the annotated beats or not.
framesync: str
Whether to use framesync features or not (default: False -> beatsync)
feature: str
String representing the feature to be used (e.g. pcp, mfcc, tonnetz)
hier : bool
Whether to compute a hierarchical or flat segmentation.
save: boolean
Whether to save the results into the `out_file` csv file.
out_file: str
Path to the csv file to save the results (if `None` and `save = True`
it will save the results in the default file name obtained by
calling `get_results_file_name`).
n_jobs: int
Number of processes to run in parallel. Only available in collection
mode.
annotator_id : int
Number identifiying the annotator.
config: dict
Dictionary containing custom configuration parameters for the
algorithms. If None, the default parameters are used.
Return
------
results : pd.DataFrame
DataFrame containing the evaluations for each file.
"""
# Set up configuration based on algorithms parameters
if config is None:
config = io.get_configuration(feature, annot_beats, framesync,
boundaries_id, labels_id)
# Hierarchical segmentation
config["hier"] = hier
# Remove actual features
config.pop("features", None)
# Get out file in case we want to save results
if out_file is None:
out_file = get_results_file_name(boundaries_id, labels_id, config,
annotator_id)
# If out_file already exists, read and return them
if os.path.exists(out_file):
logging.warning("Results already exists, reading from file %s" %
out_file)
results = pd.read_csv(out_file)
print_results(results)
return results
# Perform actual evaluations
if os.path.isfile(in_path):
# Single File mode
evals = [process_track(in_path, boundaries_id, labels_id, config,
annotator_id=annotator_id)]
else:
# Collection mode
# Get files
file_structs = io.get_dataset_files(in_path)
# Evaluate in parallel
logging.info("Evaluating %d tracks..." % len(file_structs))
evals = Parallel(n_jobs=n_jobs)(delayed(process_track)(
file_struct, boundaries_id, labels_id, config,
annotator_id=annotator_id) for file_struct in file_structs[:])
# Aggregate evaluations in pandas format
results = pd.DataFrame()
for e in evals:
if e != []:
results = results.append(e, ignore_index=True)
logging.info("%d tracks analyzed" % len(results))
# Print results
print_results(results)
# Save all results
if save:
logging.info("Writing results in %s" % out_file)
results.to_csv(out_file)
return results | python | {
"resource": ""
} |
q272883 | AddConfigVar | test | def AddConfigVar(name, doc, configparam, root=config):
"""Add a new variable to msaf.config
Parameters
----------
name: str
String of the form "[section0.[section1.[etc]]]option", containing the
full name for this configuration variable.
string: str
What does this variable specify?
configparam: `ConfigParam`
An object for getting and setting this configuration parameter.
root: object
Used for recursive calls -- do not provide an argument for this
parameter.
"""
# This method also performs some of the work of initializing ConfigParam
# instances
if root is config:
# only set the name in the first call, not the recursive ones
configparam.fullname = name
sections = name.split('.')
if len(sections) > 1:
# set up a subobject
if not hasattr(root, sections[0]):
# every internal node in the config tree is an instance of its own
# unique class
class SubObj(object):
_i_am_a_config_class = True
setattr(root.__class__, sections[0], SubObj())
newroot = getattr(root, sections[0])
if (not getattr(newroot, '_i_am_a_config_class', False) or
isinstance(newroot, type)):
raise TypeError(
'Internal config nodes must be config class instances',
newroot)
return AddConfigVar('.'.join(sections[1:]), doc, configparam,
root=newroot)
else:
if hasattr(root, name):
raise AttributeError('This name is already taken',
configparam.fullname)
configparam.doc = doc
# Trigger a read of the value from config files and env vars
# This allow to filter wrong value from the user.
if not callable(configparam.default):
configparam.__get__(root, type(root), delete_key=True)
else:
# We do not want to evaluate now the default value
# when it is a callable.
try:
fetch_val_for_key(configparam.fullname)
# The user provided a value, filter it now.
configparam.__get__(root, type(root), delete_key=True)
except KeyError:
pass
setattr(root.__class__, sections[0], configparam)
_config_var_list.append(configparam) | python | {
"resource": ""
} |
q272884 | compute_all_features | test | def compute_all_features(file_struct, framesync):
"""Computes all features for the given file."""
for feature_id in msaf.features_registry:
logging.info("Computing %s for file %s" % (feature_id,
file_struct.audio_file))
feats = Features.select_features(feature_id, file_struct, False, framesync)
feats.features | python | {
"resource": ""
} |
q272885 | process | test | def process(in_path, out_file, n_jobs, framesync):
"""Computes the features for the selected dataset or file."""
if os.path.isfile(in_path):
# Single file mode
# Get (if they exitst) or compute features
file_struct = msaf.io.FileStruct(in_path)
file_struct.features_file = out_file
compute_all_features(file_struct, framesync)
else:
# Collection mode
file_structs = msaf.io.get_dataset_files(in_path)
# Call in parallel
return Parallel(n_jobs=n_jobs)(delayed(compute_all_features)(
file_struct, framesync) for file_struct in file_structs) | python | {
"resource": ""
} |
q272886 | gaussian_cost | test | def gaussian_cost(X):
'''Return the average log-likelihood of data under a standard normal
'''
d, n = X.shape
if n < 2:
return 0
sigma = np.var(X, axis=1, ddof=1)
cost = -0.5 * d * n * np.log(2. * np.pi) - 0.5 * (n - 1.) * np.sum(sigma)
return cost | python | {
"resource": ""
} |
q272887 | lognormalize | test | def lognormalize(F, floor=0.1, min_db=-80):
"""Log-normalizes features such that each vector is between min_db to 0."""
assert min_db < 0
F = min_max_normalize(F, floor=floor)
F = np.abs(min_db) * np.log10(F) # Normalize from min_db to 0
return F | python | {
"resource": ""
} |
q272888 | min_max_normalize | test | def min_max_normalize(F, floor=0.001):
"""Normalizes features such that each vector is between floor to 1."""
F += -F.min() + floor
F = F / F.max(axis=0)
return F | python | {
"resource": ""
} |
q272889 | normalize | test | def normalize(X, norm_type, floor=0.0, min_db=-80):
"""Normalizes the given matrix of features.
Parameters
----------
X: np.array
Each row represents a feature vector.
norm_type: {"min_max", "log", np.inf, -np.inf, 0, float > 0, None}
- `"min_max"`: Min/max scaling is performed
- `"log"`: Logarithmic scaling is performed
- `np.inf`: Maximum absolute value
- `-np.inf`: Mininum absolute value
- `0`: Number of non-zeros
- float: Corresponding l_p norm.
- None : No normalization is performed
Returns
-------
norm_X: np.array
Normalized `X` according the the input parameters.
"""
if isinstance(norm_type, six.string_types):
if norm_type == "min_max":
return min_max_normalize(X, floor=floor)
if norm_type == "log":
return lognormalize(X, floor=floor, min_db=min_db)
return librosa.util.normalize(X, norm=norm_type, axis=1) | python | {
"resource": ""
} |
q272890 | get_time_frames | test | def get_time_frames(dur, anal):
"""Gets the time frames and puts them in a numpy array."""
n_frames = get_num_frames(dur, anal)
return np.linspace(0, dur, num=n_frames) | python | {
"resource": ""
} |
q272891 | remove_empty_segments | test | def remove_empty_segments(times, labels):
"""Removes empty segments if needed."""
assert len(times) - 1 == len(labels)
inters = times_to_intervals(times)
new_inters = []
new_labels = []
for inter, label in zip(inters, labels):
if inter[0] < inter[1]:
new_inters.append(inter)
new_labels.append(label)
return intervals_to_times(np.asarray(new_inters)), new_labels | python | {
"resource": ""
} |
q272892 | sonify_clicks | test | def sonify_clicks(audio, clicks, out_file, fs, offset=0):
"""Sonifies the estimated times into the output file.
Parameters
----------
audio: np.array
Audio samples of the input track.
clicks: np.array
Click positions in seconds.
out_file: str
Path to the output file.
fs: int
Sample rate.
offset: float
Offset of the clicks with respect to the audio.
"""
# Generate clicks (this should be done by mir_eval, but its
# latest release is not compatible with latest numpy)
times = clicks + offset
# 1 kHz tone, 100ms
click = np.sin(2 * np.pi * np.arange(fs * .1) * 1000 / (1. * fs))
# Exponential decay
click *= np.exp(-np.arange(fs * .1) / (fs * .01))
length = int(times.max() * fs + click.shape[0] + 1)
audio_clicks = mir_eval.sonify.clicks(times, fs, length=length)
# Create array to store the audio plus the clicks
out_audio = np.zeros(max(len(audio), len(audio_clicks)))
# Assign the audio and the clicks
out_audio[:len(audio)] = audio
out_audio[:len(audio_clicks)] += audio_clicks
# Write to file
scipy.io.wavfile.write(out_file, fs, out_audio) | python | {
"resource": ""
} |
q272893 | synchronize_labels | test | def synchronize_labels(new_bound_idxs, old_bound_idxs, old_labels, N):
"""Synchronizes the labels from the old_bound_idxs to the new_bound_idxs.
Parameters
----------
new_bound_idxs: np.array
New indeces to synchronize with.
old_bound_idxs: np.array
Old indeces, same shape as labels + 1.
old_labels: np.array
Labels associated to the old_bound_idxs.
N: int
Total number of frames.
Returns
-------
new_labels: np.array
New labels, synchronized to the new boundary indeces.
"""
assert len(old_bound_idxs) - 1 == len(old_labels)
# Construct unfolded labels array
unfold_labels = np.zeros(N)
for i, (bound_idx, label) in enumerate(
zip(old_bound_idxs[:-1], old_labels)):
unfold_labels[bound_idx:old_bound_idxs[i + 1]] = label
# Constuct new labels
new_labels = np.zeros(len(new_bound_idxs) - 1)
for i, bound_idx in enumerate(new_bound_idxs[:-1]):
new_labels[i] = np.median(
unfold_labels[bound_idx:new_bound_idxs[i + 1]])
return new_labels | python | {
"resource": ""
} |
q272894 | process_segmentation_level | test | def process_segmentation_level(est_idxs, est_labels, N, frame_times, dur):
"""Processes a level of segmentation, and converts it into times.
Parameters
----------
est_idxs: np.array
Estimated boundaries in frame indeces.
est_labels: np.array
Estimated labels.
N: int
Number of frames in the whole track.
frame_times: np.array
Time stamp for each frame.
dur: float
Duration of the audio track.
Returns
-------
est_times: np.array
Estimated segment boundaries in seconds.
est_labels: np.array
Estimated labels for each segment.
"""
assert est_idxs[0] == 0 and est_idxs[-1] == N - 1
assert len(est_idxs) - 1 == len(est_labels)
# Add silences, if needed
est_times = np.concatenate(([0], frame_times[est_idxs], [dur]))
silence_label = np.max(est_labels) + 1
est_labels = np.concatenate(([silence_label], est_labels, [silence_label]))
# Remove empty segments if needed
est_times, est_labels = remove_empty_segments(est_times, est_labels)
# Make sure that the first and last times are 0 and duration, respectively
assert np.allclose([est_times[0]], [0]) and \
np.allclose([est_times[-1]], [dur])
return est_times, est_labels | python | {
"resource": ""
} |
q272895 | align_end_hierarchies | test | def align_end_hierarchies(hier1, hier2, thres=0.5):
"""Align the end of the hierarchies such that they end at the same exact
second as long they have the same duration within a certain threshold.
Parameters
----------
hier1: list
List containing hierarchical segment boundaries.
hier2: list
List containing hierarchical segment boundaries.
thres: float > 0
Threshold to decide whether two values are the same.
"""
# Make sure we have correctly formatted hierarchies
dur_h1 = hier1[0][-1]
for hier in hier1:
assert hier[-1] == dur_h1, "hier1 is not correctly " \
"formatted {} {}".format(hier[-1], dur_h1)
dur_h2 = hier2[0][-1]
for hier in hier2:
assert hier[-1] == dur_h2, "hier2 is not correctly formatted"
# If durations are different, do nothing
if abs(dur_h1 - dur_h2) > thres:
return
# Align h1 with h2
for hier in hier1:
hier[-1] = dur_h2 | python | {
"resource": ""
} |
q272896 | SIVM._distance | test | def _distance(self, idx):
""" compute distances of a specific data point to all other samples"""
if scipy.sparse.issparse(self.data):
step = self.data.shape[1]
else:
step = 50000
d = np.zeros((self.data.shape[1]))
if idx == -1:
# set vec to origin if idx=-1
vec = np.zeros((self.data.shape[0], 1))
if scipy.sparse.issparse(self.data):
vec = scipy.sparse.csc_matrix(vec)
else:
vec = self.data[:, idx:idx+1]
self._logger.info('compute distance to node ' + str(idx))
# slice data into smaller chunks
for idx_start in range(0, self.data.shape[1], step):
if idx_start + step > self.data.shape[1]:
idx_end = self.data.shape[1]
else:
idx_end = idx_start + step
d[idx_start:idx_end] = self._distfunc(
self.data[:,idx_start:idx_end], vec)
self._logger.info('completed:' +
str(idx_end/(self.data.shape[1]/100.0)) + "%")
return d | python | {
"resource": ""
} |
q272897 | XMeans.estimate_K_knee | test | def estimate_K_knee(self, th=.015, maxK=12):
"""Estimates the K using K-means and BIC, by sweeping various K and
choosing the optimal BIC."""
# Sweep K-means
if self.X.shape[0] < maxK:
maxK = self.X.shape[0]
if maxK < 2:
maxK = 2
K = np.arange(1, maxK)
bics = []
for k in K:
means, labels = self.run_kmeans(self.X, k)
bic = self.compute_bic(self.X, means, labels, K=k,
R=self.X.shape[0])
bics.append(bic)
diff_bics = np.diff(bics)
finalK = K[-1]
if len(bics) == 1:
finalK = 2
else:
# Normalize
bics = np.asarray(bics)
bics -= bics.min()
#bics /= bics.max()
diff_bics -= diff_bics.min()
#diff_bics /= diff_bics.max()
#print bics, diff_bics
# Find optimum K
for i in range(len(K[:-1])):
#if bics[i] > diff_bics[i]:
if diff_bics[i] < th and K[i] != 1:
finalK = K[i]
break
#print "Estimated K: ", finalK
if self.plot:
plt.subplot(2, 1, 1)
plt.plot(K, bics, label="BIC")
plt.plot(K[:-1], diff_bics, label="BIC diff")
plt.legend(loc=2)
plt.subplot(2, 1, 2)
plt.scatter(self.X[:, 0], self.X[:, 1])
plt.show()
return finalK | python | {
"resource": ""
} |
q272898 | XMeans.get_clustered_data | test | def get_clustered_data(self, X, labels, label_index):
"""Returns the data with a specific label_index, using the previously
learned labels."""
D = X[np.argwhere(labels == label_index)]
return D.reshape((D.shape[0], D.shape[-1])) | python | {
"resource": ""
} |
q272899 | XMeans.run_kmeans | test | def run_kmeans(self, X, K):
"""Runs k-means and returns the labels assigned to the data."""
wX = vq.whiten(X)
means, dist = vq.kmeans(wX, K, iter=100)
labels, dist = vq.vq(wX, means)
return means, labels | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.