id int32 0 252k | repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 51 19.8k | code_tokens list | docstring stringlengths 3 17.3k | docstring_tokens list | sha stringlengths 40 40 | url stringlengths 87 242 |
|---|---|---|---|---|---|---|---|---|---|---|---|
21,400 | dgovil/PySignal | PySignal.py | Signal.emit | def emit(self, *args, **kwargs):
"""
Calls all the connected slots with the provided args and kwargs unless block is activated
"""
if self._block:
return
for slot in self._slots:
if not slot:
continue
elif isinstance(slot, partial):
slot()
elif isinstance(slot, weakref.WeakKeyDictionary):
# For class methods, get the class object and call the method accordingly.
for obj, method in slot.items():
method(obj, *args, **kwargs)
elif isinstance(slot, weakref.ref):
# If it's a weakref, call the ref to get the instance and then call the func
# Don't wrap in try/except so we don't risk masking exceptions from the actual func call
if (slot() is not None):
slot()(*args, **kwargs)
else:
# Else call it in a standard way. Should be just lambdas at this point
slot(*args, **kwargs) | python | def emit(self, *args, **kwargs):
if self._block:
return
for slot in self._slots:
if not slot:
continue
elif isinstance(slot, partial):
slot()
elif isinstance(slot, weakref.WeakKeyDictionary):
# For class methods, get the class object and call the method accordingly.
for obj, method in slot.items():
method(obj, *args, **kwargs)
elif isinstance(slot, weakref.ref):
# If it's a weakref, call the ref to get the instance and then call the func
# Don't wrap in try/except so we don't risk masking exceptions from the actual func call
if (slot() is not None):
slot()(*args, **kwargs)
else:
# Else call it in a standard way. Should be just lambdas at this point
slot(*args, **kwargs) | [
"def",
"emit",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"self",
".",
"_block",
":",
"return",
"for",
"slot",
"in",
"self",
".",
"_slots",
":",
"if",
"not",
"slot",
":",
"continue",
"elif",
"isinstance",
"(",
"slot",
... | Calls all the connected slots with the provided args and kwargs unless block is activated | [
"Calls",
"all",
"the",
"connected",
"slots",
"with",
"the",
"provided",
"args",
"and",
"kwargs",
"unless",
"block",
"is",
"activated"
] | 72f4ced949f81e5438bd8f15247ef7890e8cc5ff | https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L25-L49 |
21,401 | dgovil/PySignal | PySignal.py | Signal.connect | def connect(self, slot):
"""
Connects the signal to any callable object
"""
if not callable(slot):
raise ValueError("Connection to non-callable '%s' object failed" % slot.__class__.__name__)
if (isinstance(slot, partial) or '<' in slot.__name__):
# If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find
if slot not in self._slots:
self._slots.append(slot)
elif inspect.ismethod(slot):
# Check if it's an instance method and store it with the instance as the key
slotSelf = slot.__self__
slotDict = weakref.WeakKeyDictionary()
slotDict[slotSelf] = slot.__func__
if slotDict not in self._slots:
self._slots.append(slotDict)
else:
# If it's just a function then just store it as a weakref.
newSlotRef = weakref.ref(slot)
if newSlotRef not in self._slots:
self._slots.append(newSlotRef) | python | def connect(self, slot):
if not callable(slot):
raise ValueError("Connection to non-callable '%s' object failed" % slot.__class__.__name__)
if (isinstance(slot, partial) or '<' in slot.__name__):
# If it's a partial or a lambda. The '<' check is the only py2 and py3 compatible way I could find
if slot not in self._slots:
self._slots.append(slot)
elif inspect.ismethod(slot):
# Check if it's an instance method and store it with the instance as the key
slotSelf = slot.__self__
slotDict = weakref.WeakKeyDictionary()
slotDict[slotSelf] = slot.__func__
if slotDict not in self._slots:
self._slots.append(slotDict)
else:
# If it's just a function then just store it as a weakref.
newSlotRef = weakref.ref(slot)
if newSlotRef not in self._slots:
self._slots.append(newSlotRef) | [
"def",
"connect",
"(",
"self",
",",
"slot",
")",
":",
"if",
"not",
"callable",
"(",
"slot",
")",
":",
"raise",
"ValueError",
"(",
"\"Connection to non-callable '%s' object failed\"",
"%",
"slot",
".",
"__class__",
".",
"__name__",
")",
"if",
"(",
"isinstance",... | Connects the signal to any callable object | [
"Connects",
"the",
"signal",
"to",
"any",
"callable",
"object"
] | 72f4ced949f81e5438bd8f15247ef7890e8cc5ff | https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L51-L73 |
21,402 | dgovil/PySignal | PySignal.py | Signal.disconnect | def disconnect(self, slot):
"""
Disconnects the slot from the signal
"""
if not callable(slot):
return
if inspect.ismethod(slot):
# If it's a method, then find it by its instance
slotSelf = slot.__self__
for s in self._slots:
if isinstance(s, weakref.WeakKeyDictionary) and (slotSelf in s) and (s[slotSelf] is slot.__func__):
self._slots.remove(s)
break
elif isinstance(slot, partial) or '<' in slot.__name__:
# If it's a partial or lambda, try to remove directly
try:
self._slots.remove(slot)
except ValueError:
pass
else:
# It's probably a function, so try to remove by weakref
try:
self._slots.remove(weakref.ref(slot))
except ValueError:
pass | python | def disconnect(self, slot):
if not callable(slot):
return
if inspect.ismethod(slot):
# If it's a method, then find it by its instance
slotSelf = slot.__self__
for s in self._slots:
if isinstance(s, weakref.WeakKeyDictionary) and (slotSelf in s) and (s[slotSelf] is slot.__func__):
self._slots.remove(s)
break
elif isinstance(slot, partial) or '<' in slot.__name__:
# If it's a partial or lambda, try to remove directly
try:
self._slots.remove(slot)
except ValueError:
pass
else:
# It's probably a function, so try to remove by weakref
try:
self._slots.remove(weakref.ref(slot))
except ValueError:
pass | [
"def",
"disconnect",
"(",
"self",
",",
"slot",
")",
":",
"if",
"not",
"callable",
"(",
"slot",
")",
":",
"return",
"if",
"inspect",
".",
"ismethod",
"(",
"slot",
")",
":",
"# If it's a method, then find it by its instance",
"slotSelf",
"=",
"slot",
".",
"__s... | Disconnects the slot from the signal | [
"Disconnects",
"the",
"slot",
"from",
"the",
"signal"
] | 72f4ced949f81e5438bd8f15247ef7890e8cc5ff | https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L75-L100 |
21,403 | dgovil/PySignal | PySignal.py | SignalFactory.block | def block(self, signals=None, isBlocked=True):
"""
Sets the block on any provided signals, or to all signals
:param signals: defaults to all signals. Accepts either a single string or a list of strings
:param isBlocked: the state to set the signal to
"""
if signals:
try:
if isinstance(signals, basestring):
signals = [signals]
except NameError:
if isinstance(signals, str):
signals = [signals]
signals = signals or self.keys()
for signal in signals:
if signal not in self:
raise RuntimeError("Could not find signal matching %s" % signal)
self[signal].block(isBlocked) | python | def block(self, signals=None, isBlocked=True):
if signals:
try:
if isinstance(signals, basestring):
signals = [signals]
except NameError:
if isinstance(signals, str):
signals = [signals]
signals = signals or self.keys()
for signal in signals:
if signal not in self:
raise RuntimeError("Could not find signal matching %s" % signal)
self[signal].block(isBlocked) | [
"def",
"block",
"(",
"self",
",",
"signals",
"=",
"None",
",",
"isBlocked",
"=",
"True",
")",
":",
"if",
"signals",
":",
"try",
":",
"if",
"isinstance",
"(",
"signals",
",",
"basestring",
")",
":",
"signals",
"=",
"[",
"signals",
"]",
"except",
"Name... | Sets the block on any provided signals, or to all signals
:param signals: defaults to all signals. Accepts either a single string or a list of strings
:param isBlocked: the state to set the signal to | [
"Sets",
"the",
"block",
"on",
"any",
"provided",
"signals",
"or",
"to",
"all",
"signals"
] | 72f4ced949f81e5438bd8f15247ef7890e8cc5ff | https://github.com/dgovil/PySignal/blob/72f4ced949f81e5438bd8f15247ef7890e8cc5ff/PySignal.py#L167-L187 |
21,404 | craffel/mir_eval | mir_eval/io.py | _open | def _open(file_or_str, **kwargs):
'''Either open a file handle, or use an existing file-like object.
This will behave as the `open` function if `file_or_str` is a string.
If `file_or_str` has the `read` attribute, it will return `file_or_str`.
Otherwise, an `IOError` is raised.
'''
if hasattr(file_or_str, 'read'):
yield file_or_str
elif isinstance(file_or_str, six.string_types):
with open(file_or_str, **kwargs) as file_desc:
yield file_desc
else:
raise IOError('Invalid file-or-str object: {}'.format(file_or_str)) | python | def _open(file_or_str, **kwargs):
'''Either open a file handle, or use an existing file-like object.
This will behave as the `open` function if `file_or_str` is a string.
If `file_or_str` has the `read` attribute, it will return `file_or_str`.
Otherwise, an `IOError` is raised.
'''
if hasattr(file_or_str, 'read'):
yield file_or_str
elif isinstance(file_or_str, six.string_types):
with open(file_or_str, **kwargs) as file_desc:
yield file_desc
else:
raise IOError('Invalid file-or-str object: {}'.format(file_or_str)) | [
"def",
"_open",
"(",
"file_or_str",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"hasattr",
"(",
"file_or_str",
",",
"'read'",
")",
":",
"yield",
"file_or_str",
"elif",
"isinstance",
"(",
"file_or_str",
",",
"six",
".",
"string_types",
")",
":",
"with",
"ope... | Either open a file handle, or use an existing file-like object.
This will behave as the `open` function if `file_or_str` is a string.
If `file_or_str` has the `read` attribute, it will return `file_or_str`.
Otherwise, an `IOError` is raised. | [
"Either",
"open",
"a",
"file",
"handle",
"or",
"use",
"an",
"existing",
"file",
"-",
"like",
"object",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L18-L33 |
21,405 | craffel/mir_eval | mir_eval/io.py | load_delimited | def load_delimited(filename, converters, delimiter=r'\s+'):
r"""Utility function for loading in data from an annotation file where columns
are delimited. The number of columns is inferred from the length of
the provided converters list.
Examples
--------
>>> # Load in a one-column list of event times (floats)
>>> load_delimited('events.txt', [float])
>>> # Load in a list of labeled events, separated by commas
>>> load_delimited('labeled_events.csv', [float, str], ',')
Parameters
----------
filename : str
Path to the annotation file
converters : list of functions
Each entry in column ``n`` of the file will be cast by the function
``converters[n]``.
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
columns : tuple of lists
Each list in this tuple corresponds to values in one of the columns
in the file.
"""
# Initialize list of empty lists
n_columns = len(converters)
columns = tuple(list() for _ in range(n_columns))
# Create re object for splitting lines
splitter = re.compile(delimiter)
# Note: we do io manually here for two reasons.
# 1. The csv module has difficulties with unicode, which may lead
# to failures on certain annotation strings
#
# 2. numpy's text loader does not handle non-numeric data
#
with _open(filename, mode='r') as input_file:
for row, line in enumerate(input_file, 1):
# Split each line using the supplied delimiter
data = splitter.split(line.strip(), n_columns - 1)
# Throw a helpful error if we got an unexpected # of columns
if n_columns != len(data):
raise ValueError('Expected {} columns, got {} at '
'{}:{:d}:\n\t{}'.format(n_columns, len(data),
filename, row, line))
for value, column, converter in zip(data, columns, converters):
# Try converting the value, throw a helpful error on failure
try:
converted_value = converter(value)
except:
raise ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
value, converter.__name__, filename,
row, line))
column.append(converted_value)
# Sane output
if n_columns == 1:
return columns[0]
else:
return columns | python | def load_delimited(filename, converters, delimiter=r'\s+'):
r"""Utility function for loading in data from an annotation file where columns
are delimited. The number of columns is inferred from the length of
the provided converters list.
Examples
--------
>>> # Load in a one-column list of event times (floats)
>>> load_delimited('events.txt', [float])
>>> # Load in a list of labeled events, separated by commas
>>> load_delimited('labeled_events.csv', [float, str], ',')
Parameters
----------
filename : str
Path to the annotation file
converters : list of functions
Each entry in column ``n`` of the file will be cast by the function
``converters[n]``.
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
columns : tuple of lists
Each list in this tuple corresponds to values in one of the columns
in the file.
"""
# Initialize list of empty lists
n_columns = len(converters)
columns = tuple(list() for _ in range(n_columns))
# Create re object for splitting lines
splitter = re.compile(delimiter)
# Note: we do io manually here for two reasons.
# 1. The csv module has difficulties with unicode, which may lead
# to failures on certain annotation strings
#
# 2. numpy's text loader does not handle non-numeric data
#
with _open(filename, mode='r') as input_file:
for row, line in enumerate(input_file, 1):
# Split each line using the supplied delimiter
data = splitter.split(line.strip(), n_columns - 1)
# Throw a helpful error if we got an unexpected # of columns
if n_columns != len(data):
raise ValueError('Expected {} columns, got {} at '
'{}:{:d}:\n\t{}'.format(n_columns, len(data),
filename, row, line))
for value, column, converter in zip(data, columns, converters):
# Try converting the value, throw a helpful error on failure
try:
converted_value = converter(value)
except:
raise ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
value, converter.__name__, filename,
row, line))
column.append(converted_value)
# Sane output
if n_columns == 1:
return columns[0]
else:
return columns | [
"def",
"load_delimited",
"(",
"filename",
",",
"converters",
",",
"delimiter",
"=",
"r'\\s+'",
")",
":",
"# Initialize list of empty lists",
"n_columns",
"=",
"len",
"(",
"converters",
")",
"columns",
"=",
"tuple",
"(",
"list",
"(",
")",
"for",
"_",
"in",
"r... | r"""Utility function for loading in data from an annotation file where columns
are delimited. The number of columns is inferred from the length of
the provided converters list.
Examples
--------
>>> # Load in a one-column list of event times (floats)
>>> load_delimited('events.txt', [float])
>>> # Load in a list of labeled events, separated by commas
>>> load_delimited('labeled_events.csv', [float, str], ',')
Parameters
----------
filename : str
Path to the annotation file
converters : list of functions
Each entry in column ``n`` of the file will be cast by the function
``converters[n]``.
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
columns : tuple of lists
Each list in this tuple corresponds to values in one of the columns
in the file. | [
"r",
"Utility",
"function",
"for",
"loading",
"in",
"data",
"from",
"an",
"annotation",
"file",
"where",
"columns",
"are",
"delimited",
".",
"The",
"number",
"of",
"columns",
"is",
"inferred",
"from",
"the",
"length",
"of",
"the",
"provided",
"converters",
"... | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L36-L105 |
21,406 | craffel/mir_eval | mir_eval/io.py | load_events | def load_events(filename, delimiter=r'\s+'):
r"""Import time-stamp events from an annotation file. The file should
consist of a single column of numeric values corresponding to the event
times. This is primarily useful for processing events which lack duration,
such as beats or onsets.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
"""
# Use our universal function to load in the events
events = load_delimited(filename, [float], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events | python | def load_events(filename, delimiter=r'\s+'):
r"""Import time-stamp events from an annotation file. The file should
consist of a single column of numeric values corresponding to the event
times. This is primarily useful for processing events which lack duration,
such as beats or onsets.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
"""
# Use our universal function to load in the events
events = load_delimited(filename, [float], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events | [
"def",
"load_events",
"(",
"filename",
",",
"delimiter",
"=",
"r'\\s+'",
")",
":",
"# Use our universal function to load in the events",
"events",
"=",
"load_delimited",
"(",
"filename",
",",
"[",
"float",
"]",
",",
"delimiter",
")",
"events",
"=",
"np",
".",
"a... | r"""Import time-stamp events from an annotation file. The file should
consist of a single column of numeric values corresponding to the event
times. This is primarily useful for processing events which lack duration,
such as beats or onsets.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float) | [
"r",
"Import",
"time",
"-",
"stamp",
"events",
"from",
"an",
"annotation",
"file",
".",
"The",
"file",
"should",
"consist",
"of",
"a",
"single",
"column",
"of",
"numeric",
"values",
"corresponding",
"to",
"the",
"event",
"times",
".",
"This",
"is",
"primar... | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L108-L137 |
21,407 | craffel/mir_eval | mir_eval/io.py | load_labeled_events | def load_labeled_events(filename, delimiter=r'\s+'):
r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels
"""
# Use our universal function to load in the events
events, labels = load_delimited(filename, [float, str], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events, labels | python | def load_labeled_events(filename, delimiter=r'\s+'):
r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels
"""
# Use our universal function to load in the events
events, labels = load_delimited(filename, [float, str], delimiter)
events = np.array(events)
# Validate them, but throw a warning in place of an error
try:
util.validate_events(events)
except ValueError as error:
warnings.warn(error.args[0])
return events, labels | [
"def",
"load_labeled_events",
"(",
"filename",
",",
"delimiter",
"=",
"r'\\s+'",
")",
":",
"# Use our universal function to load in the events",
"events",
",",
"labels",
"=",
"load_delimited",
"(",
"filename",
",",
"[",
"float",
",",
"str",
"]",
",",
"delimiter",
... | r"""Import labeled time-stamp events from an annotation file. The file should
consist of two columns; the first having numeric values corresponding to
the event times and the second having string labels for each event. This
is primarily useful for processing labeled events which lack duration, such
as beats with metric beat number or onsets with an instrument label.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
event_times : np.ndarray
array of event times (float)
labels : list of str
list of labels | [
"r",
"Import",
"labeled",
"time",
"-",
"stamp",
"events",
"from",
"an",
"annotation",
"file",
".",
"The",
"file",
"should",
"consist",
"of",
"two",
"columns",
";",
"the",
"first",
"having",
"numeric",
"values",
"corresponding",
"to",
"the",
"event",
"times",... | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L140-L172 |
21,408 | craffel/mir_eval | mir_eval/io.py | load_time_series | def load_time_series(filename, delimiter=r'\s+'):
r"""Import a time series from an annotation file. The file should consist of
two columns of numeric values corresponding to the time and value of each
sample of the time series.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
times : np.ndarray
array of timestamps (float)
values : np.ndarray
array of corresponding numeric values (float)
"""
# Use our universal function to load in the events
times, values = load_delimited(filename, [float, float], delimiter)
times = np.array(times)
values = np.array(values)
return times, values | python | def load_time_series(filename, delimiter=r'\s+'):
r"""Import a time series from an annotation file. The file should consist of
two columns of numeric values corresponding to the time and value of each
sample of the time series.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
times : np.ndarray
array of timestamps (float)
values : np.ndarray
array of corresponding numeric values (float)
"""
# Use our universal function to load in the events
times, values = load_delimited(filename, [float, float], delimiter)
times = np.array(times)
values = np.array(values)
return times, values | [
"def",
"load_time_series",
"(",
"filename",
",",
"delimiter",
"=",
"r'\\s+'",
")",
":",
"# Use our universal function to load in the events",
"times",
",",
"values",
"=",
"load_delimited",
"(",
"filename",
",",
"[",
"float",
",",
"float",
"]",
",",
"delimiter",
")... | r"""Import a time series from an annotation file. The file should consist of
two columns of numeric values corresponding to the time and value of each
sample of the time series.
Parameters
----------
filename : str
Path to the annotation file
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
Returns
-------
times : np.ndarray
array of timestamps (float)
values : np.ndarray
array of corresponding numeric values (float) | [
"r",
"Import",
"a",
"time",
"series",
"from",
"an",
"annotation",
"file",
".",
"The",
"file",
"should",
"consist",
"of",
"two",
"columns",
"of",
"numeric",
"values",
"corresponding",
"to",
"the",
"time",
"and",
"value",
"of",
"each",
"sample",
"of",
"the",... | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L245-L271 |
21,409 | craffel/mir_eval | mir_eval/io.py | load_wav | def load_wav(path, mono=True):
"""Loads a .wav file as a numpy array using ``scipy.io.wavfile``.
Parameters
----------
path : str
Path to a .wav file
mono : bool
If the provided .wav has more than one channel, it will be
converted to mono if ``mono=True``. (Default value = True)
Returns
-------
audio_data : np.ndarray
Array of audio samples, normalized to the range [-1., 1.]
fs : int
Sampling rate of the audio data
"""
fs, audio_data = scipy.io.wavfile.read(path)
# Make float in range [-1, 1]
if audio_data.dtype == 'int8':
audio_data = audio_data/float(2**8)
elif audio_data.dtype == 'int16':
audio_data = audio_data/float(2**16)
elif audio_data.dtype == 'int32':
audio_data = audio_data/float(2**24)
else:
raise ValueError('Got unexpected .wav data type '
'{}'.format(audio_data.dtype))
# Optionally convert to mono
if mono and audio_data.ndim != 1:
audio_data = audio_data.mean(axis=1)
return audio_data, fs | python | def load_wav(path, mono=True):
fs, audio_data = scipy.io.wavfile.read(path)
# Make float in range [-1, 1]
if audio_data.dtype == 'int8':
audio_data = audio_data/float(2**8)
elif audio_data.dtype == 'int16':
audio_data = audio_data/float(2**16)
elif audio_data.dtype == 'int32':
audio_data = audio_data/float(2**24)
else:
raise ValueError('Got unexpected .wav data type '
'{}'.format(audio_data.dtype))
# Optionally convert to mono
if mono and audio_data.ndim != 1:
audio_data = audio_data.mean(axis=1)
return audio_data, fs | [
"def",
"load_wav",
"(",
"path",
",",
"mono",
"=",
"True",
")",
":",
"fs",
",",
"audio_data",
"=",
"scipy",
".",
"io",
".",
"wavfile",
".",
"read",
"(",
"path",
")",
"# Make float in range [-1, 1]",
"if",
"audio_data",
".",
"dtype",
"==",
"'int8'",
":",
... | Loads a .wav file as a numpy array using ``scipy.io.wavfile``.
Parameters
----------
path : str
Path to a .wav file
mono : bool
If the provided .wav has more than one channel, it will be
converted to mono if ``mono=True``. (Default value = True)
Returns
-------
audio_data : np.ndarray
Array of audio samples, normalized to the range [-1., 1.]
fs : int
Sampling rate of the audio data | [
"Loads",
"a",
".",
"wav",
"file",
"as",
"a",
"numpy",
"array",
"using",
"scipy",
".",
"io",
".",
"wavfile",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L353-L387 |
21,410 | craffel/mir_eval | mir_eval/io.py | load_ragged_time_series | def load_ragged_time_series(filename, dtype=float, delimiter=r'\s+',
header=False):
r"""Utility function for loading in data from a delimited time series
annotation file with a variable number of columns.
Assumes that column 0 contains time stamps and columns 1 through n contain
values. n may be variable from time stamp to time stamp.
Examples
--------
>>> # Load a ragged list of tab-delimited multi-f0 midi notes
>>> times, vals = load_ragged_time_series('multif0.txt', dtype=int,
delimiter='\t')
>>> # Load a raggled list of space delimited multi-f0 values with a header
>>> times, vals = load_ragged_time_series('labeled_events.csv',
header=True)
Parameters
----------
filename : str
Path to the annotation file
dtype : function
Data type to apply to values columns.
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
header : bool
Indicates whether a header row is present or not.
By default, assumes no header is present.
Returns
-------
times : np.ndarray
array of timestamps (float)
values : list of np.ndarray
list of arrays of corresponding values
"""
# Initialize empty lists
times = []
values = []
# Create re object for splitting lines
splitter = re.compile(delimiter)
if header:
start_row = 1
else:
start_row = 0
with _open(filename, mode='r') as input_file:
for row, line in enumerate(input_file, start_row):
# Split each line using the supplied delimiter
data = splitter.split(line.strip())
try:
converted_time = float(data[0])
except (TypeError, ValueError) as exe:
six.raise_from(ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
data[0], float.__name__,
filename, row, line)), exe)
times.append(converted_time)
# cast values to a numpy array. time stamps with no values are cast
# to an empty array.
try:
converted_value = np.array(data[1:], dtype=dtype)
except (TypeError, ValueError) as exe:
six.raise_from(ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
data[1:], dtype.__name__,
filename, row, line)), exe)
values.append(converted_value)
return np.array(times), values | python | def load_ragged_time_series(filename, dtype=float, delimiter=r'\s+',
header=False):
r"""Utility function for loading in data from a delimited time series
annotation file with a variable number of columns.
Assumes that column 0 contains time stamps and columns 1 through n contain
values. n may be variable from time stamp to time stamp.
Examples
--------
>>> # Load a ragged list of tab-delimited multi-f0 midi notes
>>> times, vals = load_ragged_time_series('multif0.txt', dtype=int,
delimiter='\t')
>>> # Load a raggled list of space delimited multi-f0 values with a header
>>> times, vals = load_ragged_time_series('labeled_events.csv',
header=True)
Parameters
----------
filename : str
Path to the annotation file
dtype : function
Data type to apply to values columns.
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
header : bool
Indicates whether a header row is present or not.
By default, assumes no header is present.
Returns
-------
times : np.ndarray
array of timestamps (float)
values : list of np.ndarray
list of arrays of corresponding values
"""
# Initialize empty lists
times = []
values = []
# Create re object for splitting lines
splitter = re.compile(delimiter)
if header:
start_row = 1
else:
start_row = 0
with _open(filename, mode='r') as input_file:
for row, line in enumerate(input_file, start_row):
# Split each line using the supplied delimiter
data = splitter.split(line.strip())
try:
converted_time = float(data[0])
except (TypeError, ValueError) as exe:
six.raise_from(ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
data[0], float.__name__,
filename, row, line)), exe)
times.append(converted_time)
# cast values to a numpy array. time stamps with no values are cast
# to an empty array.
try:
converted_value = np.array(data[1:], dtype=dtype)
except (TypeError, ValueError) as exe:
six.raise_from(ValueError("Couldn't convert value {} using {} "
"found at {}:{:d}:\n\t{}".format(
data[1:], dtype.__name__,
filename, row, line)), exe)
values.append(converted_value)
return np.array(times), values | [
"def",
"load_ragged_time_series",
"(",
"filename",
",",
"dtype",
"=",
"float",
",",
"delimiter",
"=",
"r'\\s+'",
",",
"header",
"=",
"False",
")",
":",
"# Initialize empty lists",
"times",
"=",
"[",
"]",
"values",
"=",
"[",
"]",
"# Create re object for splitting... | r"""Utility function for loading in data from a delimited time series
annotation file with a variable number of columns.
Assumes that column 0 contains time stamps and columns 1 through n contain
values. n may be variable from time stamp to time stamp.
Examples
--------
>>> # Load a ragged list of tab-delimited multi-f0 midi notes
>>> times, vals = load_ragged_time_series('multif0.txt', dtype=int,
delimiter='\t')
>>> # Load a raggled list of space delimited multi-f0 values with a header
>>> times, vals = load_ragged_time_series('labeled_events.csv',
header=True)
Parameters
----------
filename : str
Path to the annotation file
dtype : function
Data type to apply to values columns.
delimiter : str
Separator regular expression.
By default, lines will be split by any amount of whitespace.
header : bool
Indicates whether a header row is present or not.
By default, assumes no header is present.
Returns
-------
times : np.ndarray
array of timestamps (float)
values : list of np.ndarray
list of arrays of corresponding values | [
"r",
"Utility",
"function",
"for",
"loading",
"in",
"data",
"from",
"a",
"delimited",
"time",
"series",
"annotation",
"file",
"with",
"a",
"variable",
"number",
"of",
"columns",
".",
"Assumes",
"that",
"column",
"0",
"contains",
"time",
"stamps",
"and",
"col... | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/io.py#L511-L583 |
21,411 | craffel/mir_eval | mir_eval/chord.py | pitch_class_to_semitone | def pitch_class_to_semitone(pitch_class):
r'''Convert a pitch class to semitone.
Parameters
----------
pitch_class : str
Spelling of a given pitch class, e.g. 'C#', 'Gbb'
Returns
-------
semitone : int
Semitone value of the pitch class.
'''
semitone = 0
for idx, char in enumerate(pitch_class):
if char == '#' and idx > 0:
semitone += 1
elif char == 'b' and idx > 0:
semitone -= 1
elif idx == 0:
semitone = PITCH_CLASSES.get(char)
else:
raise InvalidChordException(
"Pitch class improperly formed: %s" % pitch_class)
return semitone % 12 | python | def pitch_class_to_semitone(pitch_class):
r'''Convert a pitch class to semitone.
Parameters
----------
pitch_class : str
Spelling of a given pitch class, e.g. 'C#', 'Gbb'
Returns
-------
semitone : int
Semitone value of the pitch class.
'''
semitone = 0
for idx, char in enumerate(pitch_class):
if char == '#' and idx > 0:
semitone += 1
elif char == 'b' and idx > 0:
semitone -= 1
elif idx == 0:
semitone = PITCH_CLASSES.get(char)
else:
raise InvalidChordException(
"Pitch class improperly formed: %s" % pitch_class)
return semitone % 12 | [
"def",
"pitch_class_to_semitone",
"(",
"pitch_class",
")",
":",
"semitone",
"=",
"0",
"for",
"idx",
",",
"char",
"in",
"enumerate",
"(",
"pitch_class",
")",
":",
"if",
"char",
"==",
"'#'",
"and",
"idx",
">",
"0",
":",
"semitone",
"+=",
"1",
"elif",
"ch... | r'''Convert a pitch class to semitone.
Parameters
----------
pitch_class : str
Spelling of a given pitch class, e.g. 'C#', 'Gbb'
Returns
-------
semitone : int
Semitone value of the pitch class. | [
"r",
"Convert",
"a",
"pitch",
"class",
"to",
"semitone",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L143-L168 |
21,412 | craffel/mir_eval | mir_eval/chord.py | scale_degree_to_semitone | def scale_degree_to_semitone(scale_degree):
r"""Convert a scale degree to semitone.
Parameters
----------
scale degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
Returns
-------
semitone : int
Relative semitone of the scale degree, wrapped to a single octave
Raises
------
InvalidChordException if `scale_degree` is invalid.
"""
semitone = 0
offset = 0
if scale_degree.startswith("#"):
offset = scale_degree.count("#")
scale_degree = scale_degree.strip("#")
elif scale_degree.startswith('b'):
offset = -1 * scale_degree.count("b")
scale_degree = scale_degree.strip("b")
semitone = SCALE_DEGREES.get(scale_degree, None)
if semitone is None:
raise InvalidChordException(
"Scale degree improperly formed: {}, expected one of {}."
.format(scale_degree, list(SCALE_DEGREES.keys())))
return semitone + offset | python | def scale_degree_to_semitone(scale_degree):
r"""Convert a scale degree to semitone.
Parameters
----------
scale degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
Returns
-------
semitone : int
Relative semitone of the scale degree, wrapped to a single octave
Raises
------
InvalidChordException if `scale_degree` is invalid.
"""
semitone = 0
offset = 0
if scale_degree.startswith("#"):
offset = scale_degree.count("#")
scale_degree = scale_degree.strip("#")
elif scale_degree.startswith('b'):
offset = -1 * scale_degree.count("b")
scale_degree = scale_degree.strip("b")
semitone = SCALE_DEGREES.get(scale_degree, None)
if semitone is None:
raise InvalidChordException(
"Scale degree improperly formed: {}, expected one of {}."
.format(scale_degree, list(SCALE_DEGREES.keys())))
return semitone + offset | [
"def",
"scale_degree_to_semitone",
"(",
"scale_degree",
")",
":",
"semitone",
"=",
"0",
"offset",
"=",
"0",
"if",
"scale_degree",
".",
"startswith",
"(",
"\"#\"",
")",
":",
"offset",
"=",
"scale_degree",
".",
"count",
"(",
"\"#\"",
")",
"scale_degree",
"=",
... | r"""Convert a scale degree to semitone.
Parameters
----------
scale degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
Returns
-------
semitone : int
Relative semitone of the scale degree, wrapped to a single octave
Raises
------
InvalidChordException if `scale_degree` is invalid. | [
"r",
"Convert",
"a",
"scale",
"degree",
"to",
"semitone",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L175-L206 |
21,413 | craffel/mir_eval | mir_eval/chord.py | scale_degree_to_bitmap | def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH):
"""Create a bitmap representation of a scale degree.
Note that values in the bitmap may be negative, indicating that the
semitone is to be removed.
Parameters
----------
scale_degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
modulo : bool, default=True
If a scale degree exceeds the length of the bit-vector, modulo the
scale degree back into the bit-vector; otherwise it is discarded.
length : int, default=12
Length of the bit-vector to produce
Returns
-------
bitmap : np.ndarray, in [-1, 0, 1], len=`length`
Bitmap representation of this scale degree.
"""
sign = 1
if scale_degree.startswith("*"):
sign = -1
scale_degree = scale_degree.strip("*")
edit_map = [0] * length
sd_idx = scale_degree_to_semitone(scale_degree)
if sd_idx < length or modulo:
edit_map[sd_idx % length] = sign
return np.array(edit_map) | python | def scale_degree_to_bitmap(scale_degree, modulo=False, length=BITMAP_LENGTH):
sign = 1
if scale_degree.startswith("*"):
sign = -1
scale_degree = scale_degree.strip("*")
edit_map = [0] * length
sd_idx = scale_degree_to_semitone(scale_degree)
if sd_idx < length or modulo:
edit_map[sd_idx % length] = sign
return np.array(edit_map) | [
"def",
"scale_degree_to_bitmap",
"(",
"scale_degree",
",",
"modulo",
"=",
"False",
",",
"length",
"=",
"BITMAP_LENGTH",
")",
":",
"sign",
"=",
"1",
"if",
"scale_degree",
".",
"startswith",
"(",
"\"*\"",
")",
":",
"sign",
"=",
"-",
"1",
"scale_degree",
"=",... | Create a bitmap representation of a scale degree.
Note that values in the bitmap may be negative, indicating that the
semitone is to be removed.
Parameters
----------
scale_degree : str
Spelling of a relative scale degree, e.g. 'b3', '7', '#5'
modulo : bool, default=True
If a scale degree exceeds the length of the bit-vector, modulo the
scale degree back into the bit-vector; otherwise it is discarded.
length : int, default=12
Length of the bit-vector to produce
Returns
-------
bitmap : np.ndarray, in [-1, 0, 1], len=`length`
Bitmap representation of this scale degree. | [
"Create",
"a",
"bitmap",
"representation",
"of",
"a",
"scale",
"degree",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L209-L238 |
21,414 | craffel/mir_eval | mir_eval/chord.py | quality_to_bitmap | def quality_to_bitmap(quality):
"""Return the bitmap for a given quality.
Parameters
----------
quality : str
Chord quality name.
Returns
-------
bitmap : np.ndarray
Bitmap representation of this quality (12-dim).
"""
if quality not in QUALITIES:
raise InvalidChordException(
"Unsupported chord quality shorthand: '%s' "
"Did you mean to reduce extended chords?" % quality)
return np.array(QUALITIES[quality]) | python | def quality_to_bitmap(quality):
if quality not in QUALITIES:
raise InvalidChordException(
"Unsupported chord quality shorthand: '%s' "
"Did you mean to reduce extended chords?" % quality)
return np.array(QUALITIES[quality]) | [
"def",
"quality_to_bitmap",
"(",
"quality",
")",
":",
"if",
"quality",
"not",
"in",
"QUALITIES",
":",
"raise",
"InvalidChordException",
"(",
"\"Unsupported chord quality shorthand: '%s' \"",
"\"Did you mean to reduce extended chords?\"",
"%",
"quality",
")",
"return",
"np",... | Return the bitmap for a given quality.
Parameters
----------
quality : str
Chord quality name.
Returns
-------
bitmap : np.ndarray
Bitmap representation of this quality (12-dim). | [
"Return",
"the",
"bitmap",
"for",
"a",
"given",
"quality",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L276-L294 |
21,415 | craffel/mir_eval | mir_eval/chord.py | validate_chord_label | def validate_chord_label(chord_label):
"""Test for well-formedness of a chord label.
Parameters
----------
chord : str
Chord label to validate.
"""
# This monster regexp is pulled from the JAMS chord namespace,
# which is in turn derived from the context-free grammar of
# Harte et al., 2005.
pattern = re.compile(r'''^((N|X)|(([A-G](b*|#*))((:(maj|min|dim|aug|1|5|sus2|sus4|maj6|min6|7|maj7|min7|dim7|hdim7|minmaj7|aug7|9|maj9|min9|11|maj11|min11|13|maj13|min13)(\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\))?)|(:\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\)))?((/((b*|#*)([1-9]|1[0-3]?)))?)?))$''') # nopep8
if not pattern.match(chord_label):
raise InvalidChordException('Invalid chord label: '
'{}'.format(chord_label))
pass | python | def validate_chord_label(chord_label):
# This monster regexp is pulled from the JAMS chord namespace,
# which is in turn derived from the context-free grammar of
# Harte et al., 2005.
pattern = re.compile(r'''^((N|X)|(([A-G](b*|#*))((:(maj|min|dim|aug|1|5|sus2|sus4|maj6|min6|7|maj7|min7|dim7|hdim7|minmaj7|aug7|9|maj9|min9|11|maj11|min11|13|maj13|min13)(\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\))?)|(:\((\*?((b*|#*)([1-9]|1[0-3]?))(,\*?((b*|#*)([1-9]|1[0-3]?)))*)\)))?((/((b*|#*)([1-9]|1[0-3]?)))?)?))$''') # nopep8
if not pattern.match(chord_label):
raise InvalidChordException('Invalid chord label: '
'{}'.format(chord_label))
pass | [
"def",
"validate_chord_label",
"(",
"chord_label",
")",
":",
"# This monster regexp is pulled from the JAMS chord namespace,",
"# which is in turn derived from the context-free grammar of",
"# Harte et al., 2005.",
"pattern",
"=",
"re",
".",
"compile",
"(",
"r'''^((N|X)|(([A-G](b*|#*))... | Test for well-formedness of a chord label.
Parameters
----------
chord : str
Chord label to validate. | [
"Test",
"for",
"well",
"-",
"formedness",
"of",
"a",
"chord",
"label",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L338-L357 |
21,416 | craffel/mir_eval | mir_eval/chord.py | join | def join(chord_root, quality='', extensions=None, bass=''):
r"""Join the parts of a chord into a complete chord label.
Parameters
----------
chord_root : str
Root pitch class of the chord, e.g. 'C', 'Eb'
quality : str
Quality of the chord, e.g. 'maj', 'hdim7'
(Default value = '')
extensions : list
Any added or absent scaled degrees for this chord, e.g. ['4', '\*3']
(Default value = None)
bass : str
Scale degree of the bass note, e.g. '5'.
(Default value = '')
Returns
-------
chord_label : str
A complete chord label.
"""
chord_label = chord_root
if quality or extensions:
chord_label += ":%s" % quality
if extensions:
chord_label += "(%s)" % ",".join(extensions)
if bass and bass != '1':
chord_label += "/%s" % bass
validate_chord_label(chord_label)
return chord_label | python | def join(chord_root, quality='', extensions=None, bass=''):
r"""Join the parts of a chord into a complete chord label.
Parameters
----------
chord_root : str
Root pitch class of the chord, e.g. 'C', 'Eb'
quality : str
Quality of the chord, e.g. 'maj', 'hdim7'
(Default value = '')
extensions : list
Any added or absent scaled degrees for this chord, e.g. ['4', '\*3']
(Default value = None)
bass : str
Scale degree of the bass note, e.g. '5'.
(Default value = '')
Returns
-------
chord_label : str
A complete chord label.
"""
chord_label = chord_root
if quality or extensions:
chord_label += ":%s" % quality
if extensions:
chord_label += "(%s)" % ",".join(extensions)
if bass and bass != '1':
chord_label += "/%s" % bass
validate_chord_label(chord_label)
return chord_label | [
"def",
"join",
"(",
"chord_root",
",",
"quality",
"=",
"''",
",",
"extensions",
"=",
"None",
",",
"bass",
"=",
"''",
")",
":",
"chord_label",
"=",
"chord_root",
"if",
"quality",
"or",
"extensions",
":",
"chord_label",
"+=",
"\":%s\"",
"%",
"quality",
"if... | r"""Join the parts of a chord into a complete chord label.
Parameters
----------
chord_root : str
Root pitch class of the chord, e.g. 'C', 'Eb'
quality : str
Quality of the chord, e.g. 'maj', 'hdim7'
(Default value = '')
extensions : list
Any added or absent scaled degrees for this chord, e.g. ['4', '\*3']
(Default value = None)
bass : str
Scale degree of the bass note, e.g. '5'.
(Default value = '')
Returns
-------
chord_label : str
A complete chord label. | [
"r",
"Join",
"the",
"parts",
"of",
"a",
"chord",
"into",
"a",
"complete",
"chord",
"label",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L434-L465 |
21,417 | craffel/mir_eval | mir_eval/chord.py | encode | def encode(chord_label, reduce_extended_chords=False,
strict_bass_intervals=False):
"""Translate a chord label to numerical representations for evaluation.
Parameters
----------
chord_label : str
Chord label to encode.
reduce_extended_chords : bool
Whether to map the upper voicings of extended chords (9's, 11's, 13's)
to semitone extensions.
(Default value = False)
strict_bass_intervals : bool
Whether to require that the bass scale degree is present in the chord.
(Default value = False)
Returns
-------
root_number : int
Absolute semitone of the chord's root.
semitone_bitmap : np.ndarray, dtype=int
12-dim vector of relative semitones in the chord spelling.
bass_number : int
Relative semitone of the chord's bass note, e.g. 0=root, 7=fifth, etc.
"""
if chord_label == NO_CHORD:
return NO_CHORD_ENCODED
if chord_label == X_CHORD:
return X_CHORD_ENCODED
chord_root, quality, scale_degrees, bass = split(
chord_label, reduce_extended_chords=reduce_extended_chords)
root_number = pitch_class_to_semitone(chord_root)
bass_number = scale_degree_to_semitone(bass) % 12
semitone_bitmap = quality_to_bitmap(quality)
semitone_bitmap[0] = 1
for scale_degree in scale_degrees:
semitone_bitmap += scale_degree_to_bitmap(scale_degree,
reduce_extended_chords)
semitone_bitmap = (semitone_bitmap > 0).astype(np.int)
if not semitone_bitmap[bass_number] and strict_bass_intervals:
raise InvalidChordException(
"Given bass scale degree is absent from this chord: "
"%s" % chord_label, chord_label)
else:
semitone_bitmap[bass_number] = 1
return root_number, semitone_bitmap, bass_number | python | def encode(chord_label, reduce_extended_chords=False,
strict_bass_intervals=False):
if chord_label == NO_CHORD:
return NO_CHORD_ENCODED
if chord_label == X_CHORD:
return X_CHORD_ENCODED
chord_root, quality, scale_degrees, bass = split(
chord_label, reduce_extended_chords=reduce_extended_chords)
root_number = pitch_class_to_semitone(chord_root)
bass_number = scale_degree_to_semitone(bass) % 12
semitone_bitmap = quality_to_bitmap(quality)
semitone_bitmap[0] = 1
for scale_degree in scale_degrees:
semitone_bitmap += scale_degree_to_bitmap(scale_degree,
reduce_extended_chords)
semitone_bitmap = (semitone_bitmap > 0).astype(np.int)
if not semitone_bitmap[bass_number] and strict_bass_intervals:
raise InvalidChordException(
"Given bass scale degree is absent from this chord: "
"%s" % chord_label, chord_label)
else:
semitone_bitmap[bass_number] = 1
return root_number, semitone_bitmap, bass_number | [
"def",
"encode",
"(",
"chord_label",
",",
"reduce_extended_chords",
"=",
"False",
",",
"strict_bass_intervals",
"=",
"False",
")",
":",
"if",
"chord_label",
"==",
"NO_CHORD",
":",
"return",
"NO_CHORD_ENCODED",
"if",
"chord_label",
"==",
"X_CHORD",
":",
"return",
... | Translate a chord label to numerical representations for evaluation.
Parameters
----------
chord_label : str
Chord label to encode.
reduce_extended_chords : bool
Whether to map the upper voicings of extended chords (9's, 11's, 13's)
to semitone extensions.
(Default value = False)
strict_bass_intervals : bool
Whether to require that the bass scale degree is present in the chord.
(Default value = False)
Returns
-------
root_number : int
Absolute semitone of the chord's root.
semitone_bitmap : np.ndarray, dtype=int
12-dim vector of relative semitones in the chord spelling.
bass_number : int
Relative semitone of the chord's bass note, e.g. 0=root, 7=fifth, etc. | [
"Translate",
"a",
"chord",
"label",
"to",
"numerical",
"representations",
"for",
"evaluation",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L469-L520 |
21,418 | craffel/mir_eval | mir_eval/chord.py | encode_many | def encode_many(chord_labels, reduce_extended_chords=False):
"""Translate a set of chord labels to numerical representations for sane
evaluation.
Parameters
----------
chord_labels : list
Set of chord labels to encode.
reduce_extended_chords : bool
Whether to map the upper voicings of extended chords (9's, 11's, 13's)
to semitone extensions.
(Default value = False)
Returns
-------
root_number : np.ndarray, dtype=int
Absolute semitone of the chord's root.
interval_bitmap : np.ndarray, dtype=int
12-dim vector of relative semitones in the given chord quality.
bass_number : np.ndarray, dtype=int
Relative semitones of the chord's bass notes.
"""
num_items = len(chord_labels)
roots, basses = np.zeros([2, num_items], dtype=np.int)
semitones = np.zeros([num_items, 12], dtype=np.int)
local_cache = dict()
for i, label in enumerate(chord_labels):
result = local_cache.get(label, None)
if result is None:
result = encode(label, reduce_extended_chords)
local_cache[label] = result
roots[i], semitones[i], basses[i] = result
return roots, semitones, basses | python | def encode_many(chord_labels, reduce_extended_chords=False):
num_items = len(chord_labels)
roots, basses = np.zeros([2, num_items], dtype=np.int)
semitones = np.zeros([num_items, 12], dtype=np.int)
local_cache = dict()
for i, label in enumerate(chord_labels):
result = local_cache.get(label, None)
if result is None:
result = encode(label, reduce_extended_chords)
local_cache[label] = result
roots[i], semitones[i], basses[i] = result
return roots, semitones, basses | [
"def",
"encode_many",
"(",
"chord_labels",
",",
"reduce_extended_chords",
"=",
"False",
")",
":",
"num_items",
"=",
"len",
"(",
"chord_labels",
")",
"roots",
",",
"basses",
"=",
"np",
".",
"zeros",
"(",
"[",
"2",
",",
"num_items",
"]",
",",
"dtype",
"=",... | Translate a set of chord labels to numerical representations for sane
evaluation.
Parameters
----------
chord_labels : list
Set of chord labels to encode.
reduce_extended_chords : bool
Whether to map the upper voicings of extended chords (9's, 11's, 13's)
to semitone extensions.
(Default value = False)
Returns
-------
root_number : np.ndarray, dtype=int
Absolute semitone of the chord's root.
interval_bitmap : np.ndarray, dtype=int
12-dim vector of relative semitones in the given chord quality.
bass_number : np.ndarray, dtype=int
Relative semitones of the chord's bass notes. | [
"Translate",
"a",
"set",
"of",
"chord",
"labels",
"to",
"numerical",
"representations",
"for",
"sane",
"evaluation",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L523-L556 |
21,419 | craffel/mir_eval | mir_eval/chord.py | rotate_bitmap_to_root | def rotate_bitmap_to_root(bitmap, chord_root):
"""Circularly shift a relative bitmap to its asbolute pitch classes.
For clarity, the best explanation is an example. Given 'G:Maj', the root
and quality map are as follows::
root=5
quality=[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] # Relative chord shape
After rotating to the root, the resulting bitmap becomes::
abs_quality = [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1] # G, B, and D
Parameters
----------
bitmap : np.ndarray, shape=(12,)
Bitmap of active notes, relative to the given root.
chord_root : int
Absolute pitch class number.
Returns
-------
bitmap : np.ndarray, shape=(12,)
Absolute bitmap of active pitch classes.
"""
bitmap = np.asarray(bitmap)
assert bitmap.ndim == 1, "Currently only 1D bitmaps are supported."
idxs = list(np.nonzero(bitmap))
idxs[-1] = (idxs[-1] + chord_root) % 12
abs_bitmap = np.zeros_like(bitmap)
abs_bitmap[tuple(idxs)] = 1
return abs_bitmap | python | def rotate_bitmap_to_root(bitmap, chord_root):
bitmap = np.asarray(bitmap)
assert bitmap.ndim == 1, "Currently only 1D bitmaps are supported."
idxs = list(np.nonzero(bitmap))
idxs[-1] = (idxs[-1] + chord_root) % 12
abs_bitmap = np.zeros_like(bitmap)
abs_bitmap[tuple(idxs)] = 1
return abs_bitmap | [
"def",
"rotate_bitmap_to_root",
"(",
"bitmap",
",",
"chord_root",
")",
":",
"bitmap",
"=",
"np",
".",
"asarray",
"(",
"bitmap",
")",
"assert",
"bitmap",
".",
"ndim",
"==",
"1",
",",
"\"Currently only 1D bitmaps are supported.\"",
"idxs",
"=",
"list",
"(",
"np"... | Circularly shift a relative bitmap to its asbolute pitch classes.
For clarity, the best explanation is an example. Given 'G:Maj', the root
and quality map are as follows::
root=5
quality=[1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0] # Relative chord shape
After rotating to the root, the resulting bitmap becomes::
abs_quality = [0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1] # G, B, and D
Parameters
----------
bitmap : np.ndarray, shape=(12,)
Bitmap of active notes, relative to the given root.
chord_root : int
Absolute pitch class number.
Returns
-------
bitmap : np.ndarray, shape=(12,)
Absolute bitmap of active pitch classes. | [
"Circularly",
"shift",
"a",
"relative",
"bitmap",
"to",
"its",
"asbolute",
"pitch",
"classes",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L559-L591 |
21,420 | craffel/mir_eval | mir_eval/chord.py | rotate_bitmaps_to_roots | def rotate_bitmaps_to_roots(bitmaps, roots):
"""Circularly shift a relative bitmaps to asbolute pitch classes.
See :func:`rotate_bitmap_to_root` for more information.
Parameters
----------
bitmap : np.ndarray, shape=(N, 12)
Bitmap of active notes, relative to the given root.
root : np.ndarray, shape=(N,)
Absolute pitch class number.
Returns
-------
bitmap : np.ndarray, shape=(N, 12)
Absolute bitmaps of active pitch classes.
"""
abs_bitmaps = []
for bitmap, chord_root in zip(bitmaps, roots):
abs_bitmaps.append(rotate_bitmap_to_root(bitmap, chord_root))
return np.asarray(abs_bitmaps) | python | def rotate_bitmaps_to_roots(bitmaps, roots):
abs_bitmaps = []
for bitmap, chord_root in zip(bitmaps, roots):
abs_bitmaps.append(rotate_bitmap_to_root(bitmap, chord_root))
return np.asarray(abs_bitmaps) | [
"def",
"rotate_bitmaps_to_roots",
"(",
"bitmaps",
",",
"roots",
")",
":",
"abs_bitmaps",
"=",
"[",
"]",
"for",
"bitmap",
",",
"chord_root",
"in",
"zip",
"(",
"bitmaps",
",",
"roots",
")",
":",
"abs_bitmaps",
".",
"append",
"(",
"rotate_bitmap_to_root",
"(",
... | Circularly shift a relative bitmaps to asbolute pitch classes.
See :func:`rotate_bitmap_to_root` for more information.
Parameters
----------
bitmap : np.ndarray, shape=(N, 12)
Bitmap of active notes, relative to the given root.
root : np.ndarray, shape=(N,)
Absolute pitch class number.
Returns
-------
bitmap : np.ndarray, shape=(N, 12)
Absolute bitmaps of active pitch classes. | [
"Circularly",
"shift",
"a",
"relative",
"bitmaps",
"to",
"asbolute",
"pitch",
"classes",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L594-L615 |
21,421 | craffel/mir_eval | mir_eval/chord.py | validate | def validate(reference_labels, estimated_labels):
"""Checks that the input annotations to a comparison function look like
valid chord labels.
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
"""
N = len(reference_labels)
M = len(estimated_labels)
if N != M:
raise ValueError(
"Chord comparison received different length lists: "
"len(reference)=%d\tlen(estimates)=%d" % (N, M))
for labels in [reference_labels, estimated_labels]:
for chord_label in labels:
validate_chord_label(chord_label)
# When either label list is empty, warn the user
if len(reference_labels) == 0:
warnings.warn('Reference labels are empty')
if len(estimated_labels) == 0:
warnings.warn('Estimated labels are empty') | python | def validate(reference_labels, estimated_labels):
N = len(reference_labels)
M = len(estimated_labels)
if N != M:
raise ValueError(
"Chord comparison received different length lists: "
"len(reference)=%d\tlen(estimates)=%d" % (N, M))
for labels in [reference_labels, estimated_labels]:
for chord_label in labels:
validate_chord_label(chord_label)
# When either label list is empty, warn the user
if len(reference_labels) == 0:
warnings.warn('Reference labels are empty')
if len(estimated_labels) == 0:
warnings.warn('Estimated labels are empty') | [
"def",
"validate",
"(",
"reference_labels",
",",
"estimated_labels",
")",
":",
"N",
"=",
"len",
"(",
"reference_labels",
")",
"M",
"=",
"len",
"(",
"estimated_labels",
")",
"if",
"N",
"!=",
"M",
":",
"raise",
"ValueError",
"(",
"\"Chord comparison received dif... | Checks that the input annotations to a comparison function look like
valid chord labels.
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against. | [
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"comparison",
"function",
"look",
"like",
"valid",
"chord",
"labels",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L619-L644 |
21,422 | craffel/mir_eval | mir_eval/chord.py | weighted_accuracy | def weighted_accuracy(comparisons, weights):
"""Compute the weighted accuracy of a list of chord comparisons.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> # Here, we're using the "thirds" function to compare labels
>>> # but any of the comparison functions would work.
>>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
comparisons : np.ndarray
List of chord comparison scores, in [0, 1] or -1
weights : np.ndarray
Weights (not necessarily normalized) for each comparison.
This can be a list of interval durations
Returns
-------
score : float
Weighted accuracy
"""
N = len(comparisons)
# There should be as many weights as comparisons
if weights.shape[0] != N:
raise ValueError('weights and comparisons should be of the same'
' length. len(weights) = {} but len(comparisons)'
' = {}'.format(weights.shape[0], N))
if (weights < 0).any():
raise ValueError('Weights should all be positive.')
if np.sum(weights) == 0:
warnings.warn('No nonzero weights, returning 0')
return 0
# Find all comparison scores which are valid
valid_idx = (comparisons >= 0)
# If no comparable chords were provided, warn and return 0
if valid_idx.sum() == 0:
warnings.warn("No reference chords were comparable "
"to estimated chords, returning 0.")
return 0
# Remove any uncomparable labels
comparisons = comparisons[valid_idx]
weights = weights[valid_idx]
# Normalize the weights
total_weight = float(np.sum(weights))
normalized_weights = np.asarray(weights, dtype=float)/total_weight
# Score is the sum of all weighted comparisons
return np.sum(comparisons*normalized_weights) | python | def weighted_accuracy(comparisons, weights):
N = len(comparisons)
# There should be as many weights as comparisons
if weights.shape[0] != N:
raise ValueError('weights and comparisons should be of the same'
' length. len(weights) = {} but len(comparisons)'
' = {}'.format(weights.shape[0], N))
if (weights < 0).any():
raise ValueError('Weights should all be positive.')
if np.sum(weights) == 0:
warnings.warn('No nonzero weights, returning 0')
return 0
# Find all comparison scores which are valid
valid_idx = (comparisons >= 0)
# If no comparable chords were provided, warn and return 0
if valid_idx.sum() == 0:
warnings.warn("No reference chords were comparable "
"to estimated chords, returning 0.")
return 0
# Remove any uncomparable labels
comparisons = comparisons[valid_idx]
weights = weights[valid_idx]
# Normalize the weights
total_weight = float(np.sum(weights))
normalized_weights = np.asarray(weights, dtype=float)/total_weight
# Score is the sum of all weighted comparisons
return np.sum(comparisons*normalized_weights) | [
"def",
"weighted_accuracy",
"(",
"comparisons",
",",
"weights",
")",
":",
"N",
"=",
"len",
"(",
"comparisons",
")",
"# There should be as many weights as comparisons",
"if",
"weights",
".",
"shape",
"[",
"0",
"]",
"!=",
"N",
":",
"raise",
"ValueError",
"(",
"'... | Compute the weighted accuracy of a list of chord comparisons.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> # Here, we're using the "thirds" function to compare labels
>>> # but any of the comparison functions would work.
>>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
comparisons : np.ndarray
List of chord comparison scores, in [0, 1] or -1
weights : np.ndarray
Weights (not necessarily normalized) for each comparison.
This can be a list of interval durations
Returns
-------
score : float
Weighted accuracy | [
"Compute",
"the",
"weighted",
"accuracy",
"of",
"a",
"list",
"of",
"chord",
"comparisons",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L647-L709 |
21,423 | craffel/mir_eval | mir_eval/chord.py | thirds | def thirds(reference_labels, estimated_labels):
"""Compare chords along root & third relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_roots = ref_roots == est_roots
eq_thirds = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_roots * eq_thirds).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | python | def thirds(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots, est_semitones = encode_many(estimated_labels, False)[:2]
eq_roots = ref_roots == est_roots
eq_thirds = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_roots * eq_thirds).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"def",
"thirds",
"(",
"reference_labels",
",",
"estimated_labels",
")",
":",
"validate",
"(",
"reference_labels",
",",
"estimated_labels",
")",
"ref_roots",
",",
"ref_semitones",
"=",
"encode_many",
"(",
"reference_labels",
",",
"False",
")",
"[",
":",
"2",
"]",... | Compare chords along root & third relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.thirds(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0] | [
"Compare",
"chords",
"along",
"root",
"&",
"third",
"relationships",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L712-L756 |
21,424 | craffel/mir_eval | mir_eval/chord.py | thirds_inv | def thirds_inv(reference_labels, estimated_labels):
"""Score chords along root, third, & bass relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_root = ref_roots == est_roots
eq_bass = ref_bass == est_bass
eq_third = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_root * eq_third * eq_bass).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | python | def thirds_inv(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones, ref_bass = encode_many(reference_labels, False)
est_roots, est_semitones, est_bass = encode_many(estimated_labels, False)
eq_root = ref_roots == est_roots
eq_bass = ref_bass == est_bass
eq_third = ref_semitones[:, 3] == est_semitones[:, 3]
comparison_scores = (eq_root * eq_third * eq_bass).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"def",
"thirds_inv",
"(",
"reference_labels",
",",
"estimated_labels",
")",
":",
"validate",
"(",
"reference_labels",
",",
"estimated_labels",
")",
"ref_roots",
",",
"ref_semitones",
",",
"ref_bass",
"=",
"encode_many",
"(",
"reference_labels",
",",
"False",
")",
... | Score chords along root, third, & bass relationships.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.thirds_inv(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0] | [
"Score",
"chords",
"along",
"root",
"third",
"&",
"bass",
"relationships",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L759-L804 |
21,425 | craffel/mir_eval | mir_eval/chord.py | root | def root(reference_labels, estimated_labels):
"""Compare chords according to roots.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.root(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut.
"""
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots = encode_many(estimated_labels, False)[0]
comparison_scores = (ref_roots == est_roots).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | python | def root(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
ref_roots, ref_semitones = encode_many(reference_labels, False)[:2]
est_roots = encode_many(estimated_labels, False)[0]
comparison_scores = (ref_roots == est_roots).astype(np.float)
# Ignore 'X' chords
comparison_scores[np.any(ref_semitones < 0, axis=1)] = -1.0
return comparison_scores | [
"def",
"root",
"(",
"reference_labels",
",",
"estimated_labels",
")",
":",
"validate",
"(",
"reference_labels",
",",
"estimated_labels",
")",
"ref_roots",
",",
"ref_semitones",
"=",
"encode_many",
"(",
"reference_labels",
",",
"False",
")",
"[",
":",
"2",
"]",
... | Compare chords according to roots.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.root(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0], or -1 if the comparison is out of
gamut. | [
"Compare",
"chords",
"according",
"to",
"roots",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L999-L1042 |
21,426 | craffel/mir_eval | mir_eval/chord.py | mirex | def mirex(reference_labels, estimated_labels):
"""Compare chords along MIREX rules.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.mirex(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0]
"""
validate(reference_labels, estimated_labels)
# TODO(?): Should this be an argument?
min_intersection = 3
ref_data = encode_many(reference_labels, False)
ref_chroma = rotate_bitmaps_to_roots(ref_data[1], ref_data[0])
est_data = encode_many(estimated_labels, False)
est_chroma = rotate_bitmaps_to_roots(est_data[1], est_data[0])
eq_chroma = (ref_chroma * est_chroma).sum(axis=-1)
# Chroma matching for set bits
comparison_scores = (eq_chroma >= min_intersection).astype(np.float)
# No-chord matching; match -1 roots, SKIP_CHORDS dropped next
no_root = np.logical_and(ref_data[0] == -1, est_data[0] == -1)
comparison_scores[no_root] = 1.0
# Skip chords where the number of active semitones `n` is
# 0 < n < `min_intersection`.
ref_semitone_count = (ref_data[1] > 0).sum(axis=1)
skip_idx = np.logical_and(ref_semitone_count > 0,
ref_semitone_count < min_intersection)
# Also ignore 'X' chords.
np.logical_or(skip_idx, np.any(ref_data[1] < 0, axis=1), skip_idx)
comparison_scores[skip_idx] = -1.0
return comparison_scores | python | def mirex(reference_labels, estimated_labels):
validate(reference_labels, estimated_labels)
# TODO(?): Should this be an argument?
min_intersection = 3
ref_data = encode_many(reference_labels, False)
ref_chroma = rotate_bitmaps_to_roots(ref_data[1], ref_data[0])
est_data = encode_many(estimated_labels, False)
est_chroma = rotate_bitmaps_to_roots(est_data[1], est_data[0])
eq_chroma = (ref_chroma * est_chroma).sum(axis=-1)
# Chroma matching for set bits
comparison_scores = (eq_chroma >= min_intersection).astype(np.float)
# No-chord matching; match -1 roots, SKIP_CHORDS dropped next
no_root = np.logical_and(ref_data[0] == -1, est_data[0] == -1)
comparison_scores[no_root] = 1.0
# Skip chords where the number of active semitones `n` is
# 0 < n < `min_intersection`.
ref_semitone_count = (ref_data[1] > 0).sum(axis=1)
skip_idx = np.logical_and(ref_semitone_count > 0,
ref_semitone_count < min_intersection)
# Also ignore 'X' chords.
np.logical_or(skip_idx, np.any(ref_data[1] < 0, axis=1), skip_idx)
comparison_scores[skip_idx] = -1.0
return comparison_scores | [
"def",
"mirex",
"(",
"reference_labels",
",",
"estimated_labels",
")",
":",
"validate",
"(",
"reference_labels",
",",
"estimated_labels",
")",
"# TODO(?): Should this be an argument?",
"min_intersection",
"=",
"3",
"ref_data",
"=",
"encode_many",
"(",
"reference_labels",
... | Compare chords along MIREX rules.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> est_intervals, est_labels = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, ref_intervals.min(),
... ref_intervals.max(), mir_eval.chord.NO_CHORD,
... mir_eval.chord.NO_CHORD)
>>> (intervals,
... ref_labels,
... est_labels) = mir_eval.util.merge_labeled_intervals(
... ref_intervals, ref_labels, est_intervals, est_labels)
>>> durations = mir_eval.util.intervals_to_durations(intervals)
>>> comparisons = mir_eval.chord.mirex(ref_labels, est_labels)
>>> score = mir_eval.chord.weighted_accuracy(comparisons, durations)
Parameters
----------
reference_labels : list, len=n
Reference chord labels to score against.
estimated_labels : list, len=n
Estimated chord labels to score against.
Returns
-------
comparison_scores : np.ndarray, shape=(n,), dtype=float
Comparison scores, in [0.0, 1.0] | [
"Compare",
"chords",
"along",
"MIREX",
"rules",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1045-L1104 |
21,427 | craffel/mir_eval | mir_eval/chord.py | seg | def seg(reference_intervals, estimated_intervals):
"""Compute the MIREX 'MeanSeg' score.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> score = mir_eval.chord.seg(ref_intervals, est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2), dtype=float
Reference chord intervals to score against.
estimated_intervals : np.ndarray, shape=(m, 2), dtype=float
Estimated chord intervals to score against.
Returns
-------
segmentation score : float
Comparison score, in [0.0, 1.0], where 1.0 means perfect segmentation.
"""
return min(underseg(reference_intervals, estimated_intervals),
overseg(reference_intervals, estimated_intervals)) | python | def seg(reference_intervals, estimated_intervals):
return min(underseg(reference_intervals, estimated_intervals),
overseg(reference_intervals, estimated_intervals)) | [
"def",
"seg",
"(",
"reference_intervals",
",",
"estimated_intervals",
")",
":",
"return",
"min",
"(",
"underseg",
"(",
"reference_intervals",
",",
"estimated_intervals",
")",
",",
"overseg",
"(",
"reference_intervals",
",",
"estimated_intervals",
")",
")"
] | Compute the MIREX 'MeanSeg' score.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> score = mir_eval.chord.seg(ref_intervals, est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2), dtype=float
Reference chord intervals to score against.
estimated_intervals : np.ndarray, shape=(m, 2), dtype=float
Estimated chord intervals to score against.
Returns
-------
segmentation score : float
Comparison score, in [0.0, 1.0], where 1.0 means perfect segmentation. | [
"Compute",
"the",
"MIREX",
"MeanSeg",
"score",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1455-L1480 |
21,428 | craffel/mir_eval | mir_eval/chord.py | merge_chord_intervals | def merge_chord_intervals(intervals, labels):
"""
Merge consecutive chord intervals if they represent the same chord.
Parameters
----------
intervals : np.ndarray, shape=(n, 2), dtype=float
Chord intervals to be merged, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
Chord labels to be merged, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
Returns
-------
merged_ivs : np.ndarray, shape=(k, 2), dtype=float
Merged chord intervals, k <= n
"""
roots, semitones, basses = encode_many(labels, True)
merged_ivs = []
prev_rt = None
prev_st = None
prev_ba = None
for s, e, rt, st, ba in zip(intervals[:, 0], intervals[:, 1],
roots, semitones, basses):
if rt != prev_rt or (st != prev_st).any() or ba != prev_ba:
prev_rt, prev_st, prev_ba = rt, st, ba
merged_ivs.append([s, e])
else:
merged_ivs[-1][-1] = e
return np.array(merged_ivs) | python | def merge_chord_intervals(intervals, labels):
roots, semitones, basses = encode_many(labels, True)
merged_ivs = []
prev_rt = None
prev_st = None
prev_ba = None
for s, e, rt, st, ba in zip(intervals[:, 0], intervals[:, 1],
roots, semitones, basses):
if rt != prev_rt or (st != prev_st).any() or ba != prev_ba:
prev_rt, prev_st, prev_ba = rt, st, ba
merged_ivs.append([s, e])
else:
merged_ivs[-1][-1] = e
return np.array(merged_ivs) | [
"def",
"merge_chord_intervals",
"(",
"intervals",
",",
"labels",
")",
":",
"roots",
",",
"semitones",
",",
"basses",
"=",
"encode_many",
"(",
"labels",
",",
"True",
")",
"merged_ivs",
"=",
"[",
"]",
"prev_rt",
"=",
"None",
"prev_st",
"=",
"None",
"prev_ba"... | Merge consecutive chord intervals if they represent the same chord.
Parameters
----------
intervals : np.ndarray, shape=(n, 2), dtype=float
Chord intervals to be merged, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
labels : list, shape=(n,)
Chord labels to be merged, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
Returns
-------
merged_ivs : np.ndarray, shape=(k, 2), dtype=float
Merged chord intervals, k <= n | [
"Merge",
"consecutive",
"chord",
"intervals",
"if",
"they",
"represent",
"the",
"same",
"chord",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1483-L1514 |
21,429 | craffel/mir_eval | mir_eval/chord.py | evaluate | def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
"""Computes weighted accuracy for all comparison functions for the given
reference and estimated annotations.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.chord.evaluate(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
ref_intervals : np.ndarray, shape=(n, 2)
Reference chord intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
ref_labels : list, shape=(n,)
reference chord labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_intervals : np.ndarray, shape=(m, 2)
estimated chord intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_labels : list, shape=(m,)
estimated chord labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Append or crop estimated intervals so their span is the same as reference
est_intervals, est_labels = util.adjust_intervals(
est_intervals, est_labels, ref_intervals.min(), ref_intervals.max(),
NO_CHORD, NO_CHORD)
# use merged intervals for segmentation evaluation
merged_ref_intervals = merge_chord_intervals(ref_intervals, ref_labels)
merged_est_intervals = merge_chord_intervals(est_intervals, est_labels)
# Adjust the labels so that they span the same intervals
intervals, ref_labels, est_labels = util.merge_labeled_intervals(
ref_intervals, ref_labels, est_intervals, est_labels)
# Convert intervals to durations (used as weights)
durations = util.intervals_to_durations(intervals)
# Store scores for each comparison function
scores = collections.OrderedDict()
scores['thirds'] = weighted_accuracy(thirds(ref_labels, est_labels),
durations)
scores['thirds_inv'] = weighted_accuracy(thirds_inv(ref_labels,
est_labels), durations)
scores['triads'] = weighted_accuracy(triads(ref_labels, est_labels),
durations)
scores['triads_inv'] = weighted_accuracy(triads_inv(ref_labels,
est_labels), durations)
scores['tetrads'] = weighted_accuracy(tetrads(ref_labels, est_labels),
durations)
scores['tetrads_inv'] = weighted_accuracy(tetrads_inv(ref_labels,
est_labels),
durations)
scores['root'] = weighted_accuracy(root(ref_labels, est_labels), durations)
scores['mirex'] = weighted_accuracy(mirex(ref_labels, est_labels),
durations)
scores['majmin'] = weighted_accuracy(majmin(ref_labels, est_labels),
durations)
scores['majmin_inv'] = weighted_accuracy(majmin_inv(ref_labels,
est_labels), durations)
scores['sevenths'] = weighted_accuracy(sevenths(ref_labels, est_labels),
durations)
scores['sevenths_inv'] = weighted_accuracy(sevenths_inv(ref_labels,
est_labels),
durations)
scores['underseg'] = underseg(merged_ref_intervals, merged_est_intervals)
scores['overseg'] = overseg(merged_ref_intervals, merged_est_intervals)
scores['seg'] = min(scores['overseg'], scores['underseg'])
return scores | python | def evaluate(ref_intervals, ref_labels, est_intervals, est_labels, **kwargs):
# Append or crop estimated intervals so their span is the same as reference
est_intervals, est_labels = util.adjust_intervals(
est_intervals, est_labels, ref_intervals.min(), ref_intervals.max(),
NO_CHORD, NO_CHORD)
# use merged intervals for segmentation evaluation
merged_ref_intervals = merge_chord_intervals(ref_intervals, ref_labels)
merged_est_intervals = merge_chord_intervals(est_intervals, est_labels)
# Adjust the labels so that they span the same intervals
intervals, ref_labels, est_labels = util.merge_labeled_intervals(
ref_intervals, ref_labels, est_intervals, est_labels)
# Convert intervals to durations (used as weights)
durations = util.intervals_to_durations(intervals)
# Store scores for each comparison function
scores = collections.OrderedDict()
scores['thirds'] = weighted_accuracy(thirds(ref_labels, est_labels),
durations)
scores['thirds_inv'] = weighted_accuracy(thirds_inv(ref_labels,
est_labels), durations)
scores['triads'] = weighted_accuracy(triads(ref_labels, est_labels),
durations)
scores['triads_inv'] = weighted_accuracy(triads_inv(ref_labels,
est_labels), durations)
scores['tetrads'] = weighted_accuracy(tetrads(ref_labels, est_labels),
durations)
scores['tetrads_inv'] = weighted_accuracy(tetrads_inv(ref_labels,
est_labels),
durations)
scores['root'] = weighted_accuracy(root(ref_labels, est_labels), durations)
scores['mirex'] = weighted_accuracy(mirex(ref_labels, est_labels),
durations)
scores['majmin'] = weighted_accuracy(majmin(ref_labels, est_labels),
durations)
scores['majmin_inv'] = weighted_accuracy(majmin_inv(ref_labels,
est_labels), durations)
scores['sevenths'] = weighted_accuracy(sevenths(ref_labels, est_labels),
durations)
scores['sevenths_inv'] = weighted_accuracy(sevenths_inv(ref_labels,
est_labels),
durations)
scores['underseg'] = underseg(merged_ref_intervals, merged_est_intervals)
scores['overseg'] = overseg(merged_ref_intervals, merged_est_intervals)
scores['seg'] = min(scores['overseg'], scores['underseg'])
return scores | [
"def",
"evaluate",
"(",
"ref_intervals",
",",
"ref_labels",
",",
"est_intervals",
",",
"est_labels",
",",
"*",
"*",
"kwargs",
")",
":",
"# Append or crop estimated intervals so their span is the same as reference",
"est_intervals",
",",
"est_labels",
"=",
"util",
".",
"... | Computes weighted accuracy for all comparison functions for the given
reference and estimated annotations.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> scores = mir_eval.chord.evaluate(ref_intervals, ref_labels,
... est_intervals, est_labels)
Parameters
----------
ref_intervals : np.ndarray, shape=(n, 2)
Reference chord intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
ref_labels : list, shape=(n,)
reference chord labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_intervals : np.ndarray, shape=(m, 2)
estimated chord intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
est_labels : list, shape=(m,)
estimated chord labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved. | [
"Computes",
"weighted",
"accuracy",
"for",
"all",
"comparison",
"functions",
"for",
"the",
"given",
"reference",
"and",
"estimated",
"annotations",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/chord.py#L1517-L1604 |
21,430 | craffel/mir_eval | mir_eval/pattern.py | _n_onset_midi | def _n_onset_midi(patterns):
"""Computes the number of onset_midi objects in a pattern
Parameters
----------
patterns :
A list of patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
Returns
-------
n_onsets : int
Number of onsets within the pattern.
"""
return len([o_m for pat in patterns for occ in pat for o_m in occ]) | python | def _n_onset_midi(patterns):
return len([o_m for pat in patterns for occ in pat for o_m in occ]) | [
"def",
"_n_onset_midi",
"(",
"patterns",
")",
":",
"return",
"len",
"(",
"[",
"o_m",
"for",
"pat",
"in",
"patterns",
"for",
"occ",
"in",
"pat",
"for",
"o_m",
"in",
"occ",
"]",
")"
] | Computes the number of onset_midi objects in a pattern
Parameters
----------
patterns :
A list of patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
Returns
-------
n_onsets : int
Number of onsets within the pattern. | [
"Computes",
"the",
"number",
"of",
"onset_midi",
"objects",
"in",
"a",
"pattern"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L64-L79 |
21,431 | craffel/mir_eval | mir_eval/pattern.py | validate | def validate(reference_patterns, estimated_patterns):
"""Checks that the input annotations to a metric look like valid pattern
lists, and throws helpful errors if not.
Parameters
----------
reference_patterns : list
The reference patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
Returns
-------
"""
# Warn if pattern lists are empty
if _n_onset_midi(reference_patterns) == 0:
warnings.warn('Reference patterns are empty.')
if _n_onset_midi(estimated_patterns) == 0:
warnings.warn('Estimated patterns are empty.')
for patterns in [reference_patterns, estimated_patterns]:
for pattern in patterns:
if len(pattern) <= 0:
raise ValueError("Each pattern must contain at least one "
"occurrence.")
for occurrence in pattern:
for onset_midi in occurrence:
if len(onset_midi) != 2:
raise ValueError("The (onset, midi) tuple must "
"contain exactly 2 elements.") | python | def validate(reference_patterns, estimated_patterns):
# Warn if pattern lists are empty
if _n_onset_midi(reference_patterns) == 0:
warnings.warn('Reference patterns are empty.')
if _n_onset_midi(estimated_patterns) == 0:
warnings.warn('Estimated patterns are empty.')
for patterns in [reference_patterns, estimated_patterns]:
for pattern in patterns:
if len(pattern) <= 0:
raise ValueError("Each pattern must contain at least one "
"occurrence.")
for occurrence in pattern:
for onset_midi in occurrence:
if len(onset_midi) != 2:
raise ValueError("The (onset, midi) tuple must "
"contain exactly 2 elements.") | [
"def",
"validate",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
":",
"# Warn if pattern lists are empty",
"if",
"_n_onset_midi",
"(",
"reference_patterns",
")",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"'Reference patterns are empty.'",
")",
"if",
"... | Checks that the input annotations to a metric look like valid pattern
lists, and throws helpful errors if not.
Parameters
----------
reference_patterns : list
The reference patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
Returns
------- | [
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"valid",
"pattern",
"lists",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L82-L112 |
21,432 | craffel/mir_eval | mir_eval/pattern.py | _occurrence_intersection | def _occurrence_intersection(occ_P, occ_Q):
"""Computes the intersection between two occurrences.
Parameters
----------
occ_P : list of tuples
(onset, midi) pairs representing the reference occurrence.
occ_Q : list
second list of (onset, midi) tuples
Returns
-------
S : set
Set of the intersection between occ_P and occ_Q.
"""
set_P = set([tuple(onset_midi) for onset_midi in occ_P])
set_Q = set([tuple(onset_midi) for onset_midi in occ_Q])
return set_P & set_Q | python | def _occurrence_intersection(occ_P, occ_Q):
set_P = set([tuple(onset_midi) for onset_midi in occ_P])
set_Q = set([tuple(onset_midi) for onset_midi in occ_Q])
return set_P & set_Q | [
"def",
"_occurrence_intersection",
"(",
"occ_P",
",",
"occ_Q",
")",
":",
"set_P",
"=",
"set",
"(",
"[",
"tuple",
"(",
"onset_midi",
")",
"for",
"onset_midi",
"in",
"occ_P",
"]",
")",
"set_Q",
"=",
"set",
"(",
"[",
"tuple",
"(",
"onset_midi",
")",
"for"... | Computes the intersection between two occurrences.
Parameters
----------
occ_P : list of tuples
(onset, midi) pairs representing the reference occurrence.
occ_Q : list
second list of (onset, midi) tuples
Returns
-------
S : set
Set of the intersection between occ_P and occ_Q. | [
"Computes",
"the",
"intersection",
"between",
"two",
"occurrences",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L115-L133 |
21,433 | craffel/mir_eval | mir_eval/pattern.py | _compute_score_matrix | def _compute_score_matrix(P, Q, similarity_metric="cardinality_score"):
"""Computes the score matrix between the patterns P and Q.
Parameters
----------
P : list
Pattern containing a list of occurrences.
Q : list
Pattern containing a list of occurrences.
similarity_metric : str
A string representing the metric to be used
when computing the similarity matrix. Accepted values:
- "cardinality_score":
Count of the intersection between occurrences.
(Default value = "cardinality_score")
Returns
-------
sm : np.array
The score matrix between P and Q using the similarity_metric.
"""
sm = np.zeros((len(P), len(Q))) # The score matrix
for iP, occ_P in enumerate(P):
for iQ, occ_Q in enumerate(Q):
if similarity_metric == "cardinality_score":
denom = float(np.max([len(occ_P), len(occ_Q)]))
# Compute the score
sm[iP, iQ] = len(_occurrence_intersection(occ_P, occ_Q)) / \
denom
# TODO: More scores: 'normalised matching socre'
else:
raise ValueError("The similarity metric (%s) can only be: "
"'cardinality_score'.")
return sm | python | def _compute_score_matrix(P, Q, similarity_metric="cardinality_score"):
sm = np.zeros((len(P), len(Q))) # The score matrix
for iP, occ_P in enumerate(P):
for iQ, occ_Q in enumerate(Q):
if similarity_metric == "cardinality_score":
denom = float(np.max([len(occ_P), len(occ_Q)]))
# Compute the score
sm[iP, iQ] = len(_occurrence_intersection(occ_P, occ_Q)) / \
denom
# TODO: More scores: 'normalised matching socre'
else:
raise ValueError("The similarity metric (%s) can only be: "
"'cardinality_score'.")
return sm | [
"def",
"_compute_score_matrix",
"(",
"P",
",",
"Q",
",",
"similarity_metric",
"=",
"\"cardinality_score\"",
")",
":",
"sm",
"=",
"np",
".",
"zeros",
"(",
"(",
"len",
"(",
"P",
")",
",",
"len",
"(",
"Q",
")",
")",
")",
"# The score matrix",
"for",
"iP",... | Computes the score matrix between the patterns P and Q.
Parameters
----------
P : list
Pattern containing a list of occurrences.
Q : list
Pattern containing a list of occurrences.
similarity_metric : str
A string representing the metric to be used
when computing the similarity matrix. Accepted values:
- "cardinality_score":
Count of the intersection between occurrences.
(Default value = "cardinality_score")
Returns
-------
sm : np.array
The score matrix between P and Q using the similarity_metric. | [
"Computes",
"the",
"score",
"matrix",
"between",
"the",
"patterns",
"P",
"and",
"Q",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L136-L170 |
21,434 | craffel/mir_eval | mir_eval/pattern.py | standard_FPR | def standard_FPR(reference_patterns, estimated_patterns, tol=1e-5):
"""Standard F1 Score, Precision and Recall.
This metric checks if the prototype patterns of the reference match
possible translated patterns in the prototype patterns of the estimations.
Since the sizes of these prototypes must be equal, this metric is quite
restictive and it tends to be 0 in most of 2013 MIREX results.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
tol : float
Tolerance level when comparing reference against estimation.
Default parameter is the one found in the original matlab code by
Tom Collins used for MIREX 2013.
(Default value = 1e-5)
Returns
-------
f_measure : float
The standard F1 Score
precision : float
The standard Precision
recall : float
The standard Recall
"""
validate(reference_patterns, estimated_patterns)
nP = len(reference_patterns) # Number of patterns in the reference
nQ = len(estimated_patterns) # Number of patterns in the estimation
k = 0 # Number of patterns that match
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Find matches of the prototype patterns
for ref_pattern in reference_patterns:
P = np.asarray(ref_pattern[0]) # Get reference prototype
for est_pattern in estimated_patterns:
Q = np.asarray(est_pattern[0]) # Get estimation prototype
if len(P) != len(Q):
continue
# Check transposition given a certain tolerance
if (len(P) == len(Q) == 1 or
np.max(np.abs(np.diff(P - Q, axis=0))) < tol):
k += 1
break
# Compute the standard measures
precision = k / float(nQ)
recall = k / float(nP)
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall | python | def standard_FPR(reference_patterns, estimated_patterns, tol=1e-5):
validate(reference_patterns, estimated_patterns)
nP = len(reference_patterns) # Number of patterns in the reference
nQ = len(estimated_patterns) # Number of patterns in the estimation
k = 0 # Number of patterns that match
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Find matches of the prototype patterns
for ref_pattern in reference_patterns:
P = np.asarray(ref_pattern[0]) # Get reference prototype
for est_pattern in estimated_patterns:
Q = np.asarray(est_pattern[0]) # Get estimation prototype
if len(P) != len(Q):
continue
# Check transposition given a certain tolerance
if (len(P) == len(Q) == 1 or
np.max(np.abs(np.diff(P - Q, axis=0))) < tol):
k += 1
break
# Compute the standard measures
precision = k / float(nQ)
recall = k / float(nP)
f_measure = util.f_measure(precision, recall)
return f_measure, precision, recall | [
"def",
"standard_FPR",
"(",
"reference_patterns",
",",
"estimated_patterns",
",",
"tol",
"=",
"1e-5",
")",
":",
"validate",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
"nP",
"=",
"len",
"(",
"reference_patterns",
")",
"# Number of patterns in the refere... | Standard F1 Score, Precision and Recall.
This metric checks if the prototype patterns of the reference match
possible translated patterns in the prototype patterns of the estimations.
Since the sizes of these prototypes must be equal, this metric is quite
restictive and it tends to be 0 in most of 2013 MIREX results.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.standard_FPR(ref_patterns, est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns using the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
tol : float
Tolerance level when comparing reference against estimation.
Default parameter is the one found in the original matlab code by
Tom Collins used for MIREX 2013.
(Default value = 1e-5)
Returns
-------
f_measure : float
The standard F1 Score
precision : float
The standard Precision
recall : float
The standard Recall | [
"Standard",
"F1",
"Score",
"Precision",
"and",
"Recall",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L173-L239 |
21,435 | craffel/mir_eval | mir_eval/pattern.py | three_layer_FPR | def three_layer_FPR(reference_patterns, estimated_patterns):
"""Three Layer F1 Score, Precision and Recall. As described by Meridith.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns,
... est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
Returns
-------
f_measure : float
The three-layer F1 Score
precision : float
The three-layer Precision
recall : float
The three-layer Recall
"""
validate(reference_patterns, estimated_patterns)
def compute_first_layer_PR(ref_occs, est_occs):
"""Computes the first layer Precision and Recall values given the
set of occurrences in the reference and the set of occurrences in the
estimation.
Parameters
----------
ref_occs :
est_occs :
Returns
-------
"""
# Find the length of the intersection between reference and estimation
s = len(_occurrence_intersection(ref_occs, est_occs))
# Compute the first layer scores
precision = s / float(len(ref_occs))
recall = s / float(len(est_occs))
return precision, recall
def compute_second_layer_PR(ref_pattern, est_pattern):
"""Computes the second layer Precision and Recall values given the
set of occurrences in the reference and the set of occurrences in the
estimation.
Parameters
----------
ref_pattern :
est_pattern :
Returns
-------
"""
# Compute the first layer scores
F_1 = compute_layer(ref_pattern, est_pattern)
# Compute the second layer scores
precision = np.mean(np.max(F_1, axis=0))
recall = np.mean(np.max(F_1, axis=1))
return precision, recall
def compute_layer(ref_elements, est_elements, layer=1):
"""Computes the F-measure matrix for a given layer. The reference and
estimated elements can be either patters or occurrences, depending
on the layer.
For layer 1, the elements must be occurrences.
For layer 2, the elements must be patterns.
Parameters
----------
ref_elements :
est_elements :
layer :
(Default value = 1)
Returns
-------
"""
if layer != 1 and layer != 2:
raise ValueError("Layer (%d) must be an integer between 1 and 2"
% layer)
nP = len(ref_elements) # Number of elements in reference
nQ = len(est_elements) # Number of elements in estimation
F = np.zeros((nP, nQ)) # F-measure matrix for the given layer
for iP in range(nP):
for iQ in range(nQ):
if layer == 1:
func = compute_first_layer_PR
elif layer == 2:
func = compute_second_layer_PR
# Compute layer scores
precision, recall = func(ref_elements[iP], est_elements[iQ])
F[iP, iQ] = util.f_measure(precision, recall)
return F
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Compute the second layer (it includes the first layer)
F_2 = compute_layer(reference_patterns, estimated_patterns, layer=2)
# Compute the final scores (third layer)
precision_3 = np.mean(np.max(F_2, axis=0))
recall_3 = np.mean(np.max(F_2, axis=1))
f_measure_3 = util.f_measure(precision_3, recall_3)
return f_measure_3, precision_3, recall_3 | python | def three_layer_FPR(reference_patterns, estimated_patterns):
validate(reference_patterns, estimated_patterns)
def compute_first_layer_PR(ref_occs, est_occs):
"""Computes the first layer Precision and Recall values given the
set of occurrences in the reference and the set of occurrences in the
estimation.
Parameters
----------
ref_occs :
est_occs :
Returns
-------
"""
# Find the length of the intersection between reference and estimation
s = len(_occurrence_intersection(ref_occs, est_occs))
# Compute the first layer scores
precision = s / float(len(ref_occs))
recall = s / float(len(est_occs))
return precision, recall
def compute_second_layer_PR(ref_pattern, est_pattern):
"""Computes the second layer Precision and Recall values given the
set of occurrences in the reference and the set of occurrences in the
estimation.
Parameters
----------
ref_pattern :
est_pattern :
Returns
-------
"""
# Compute the first layer scores
F_1 = compute_layer(ref_pattern, est_pattern)
# Compute the second layer scores
precision = np.mean(np.max(F_1, axis=0))
recall = np.mean(np.max(F_1, axis=1))
return precision, recall
def compute_layer(ref_elements, est_elements, layer=1):
"""Computes the F-measure matrix for a given layer. The reference and
estimated elements can be either patters or occurrences, depending
on the layer.
For layer 1, the elements must be occurrences.
For layer 2, the elements must be patterns.
Parameters
----------
ref_elements :
est_elements :
layer :
(Default value = 1)
Returns
-------
"""
if layer != 1 and layer != 2:
raise ValueError("Layer (%d) must be an integer between 1 and 2"
% layer)
nP = len(ref_elements) # Number of elements in reference
nQ = len(est_elements) # Number of elements in estimation
F = np.zeros((nP, nQ)) # F-measure matrix for the given layer
for iP in range(nP):
for iQ in range(nQ):
if layer == 1:
func = compute_first_layer_PR
elif layer == 2:
func = compute_second_layer_PR
# Compute layer scores
precision, recall = func(ref_elements[iP], est_elements[iQ])
F[iP, iQ] = util.f_measure(precision, recall)
return F
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Compute the second layer (it includes the first layer)
F_2 = compute_layer(reference_patterns, estimated_patterns, layer=2)
# Compute the final scores (third layer)
precision_3 = np.mean(np.max(F_2, axis=0))
recall_3 = np.mean(np.max(F_2, axis=1))
f_measure_3 = util.f_measure(precision_3, recall_3)
return f_measure_3, precision_3, recall_3 | [
"def",
"three_layer_FPR",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
":",
"validate",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
"def",
"compute_first_layer_PR",
"(",
"ref_occs",
",",
"est_occs",
")",
":",
"\"\"\"Computes the first layer Pr... | Three Layer F1 Score, Precision and Recall. As described by Meridith.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> F, P, R = mir_eval.pattern.three_layer_FPR(ref_patterns,
... est_patterns)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
Returns
-------
f_measure : float
The three-layer F1 Score
precision : float
The three-layer Precision
recall : float
The three-layer Recall | [
"Three",
"Layer",
"F1",
"Score",
"Precision",
"and",
"Recall",
".",
"As",
"described",
"by",
"Meridith",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L390-L520 |
21,436 | craffel/mir_eval | mir_eval/pattern.py | first_n_three_layer_P | def first_n_three_layer_P(reference_patterns, estimated_patterns, n=5):
"""First n three-layer precision.
This metric is basically the same as the three-layer FPR but it is only
applied to the first n estimated patterns, and it only returns the
precision. In MIREX and typically, n = 5.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> P = mir_eval.pattern.first_n_three_layer_P(ref_patterns,
... est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix
(Default value = 5)
Returns
-------
precision : float
The first n three-layer Precision
"""
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
# Compute the three-layer scores for the first n estimated patterns
F, P, R = three_layer_FPR(reference_patterns, fn_est_patterns)
return P | python | def first_n_three_layer_P(reference_patterns, estimated_patterns, n=5):
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
# Compute the three-layer scores for the first n estimated patterns
F, P, R = three_layer_FPR(reference_patterns, fn_est_patterns)
return P | [
"def",
"first_n_three_layer_P",
"(",
"reference_patterns",
",",
"estimated_patterns",
",",
"n",
"=",
"5",
")",
":",
"validate",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
"# If no patterns were provided, metric is zero",
"if",
"_n_onset_midi",
"(",
"refere... | First n three-layer precision.
This metric is basically the same as the three-layer FPR but it is only
applied to the first n estimated patterns, and it only returns the
precision. In MIREX and typically, n = 5.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> P = mir_eval.pattern.first_n_three_layer_P(ref_patterns,
... est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix
(Default value = 5)
Returns
-------
precision : float
The first n three-layer Precision | [
"First",
"n",
"three",
"-",
"layer",
"precision",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L523-L568 |
21,437 | craffel/mir_eval | mir_eval/pattern.py | first_n_target_proportion_R | def first_n_target_proportion_R(reference_patterns, estimated_patterns, n=5):
"""First n target proportion establishment recall metric.
This metric is similar is similar to the establishment FPR score, but it
only takes into account the first n estimated patterns and it only
outputs the Recall value of it.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> R = mir_eval.pattern.first_n_target_proportion_R(
... ref_patterns, est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix.
(Default value = 5)
Returns
-------
recall : float
The first n target proportion Recall.
"""
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
F, P, R = establishment_FPR(reference_patterns, fn_est_patterns)
return R | python | def first_n_target_proportion_R(reference_patterns, estimated_patterns, n=5):
validate(reference_patterns, estimated_patterns)
# If no patterns were provided, metric is zero
if _n_onset_midi(reference_patterns) == 0 or \
_n_onset_midi(estimated_patterns) == 0:
return 0., 0., 0.
# Get only the first n patterns from the estimated results
fn_est_patterns = estimated_patterns[:min(len(estimated_patterns), n)]
F, P, R = establishment_FPR(reference_patterns, fn_est_patterns)
return R | [
"def",
"first_n_target_proportion_R",
"(",
"reference_patterns",
",",
"estimated_patterns",
",",
"n",
"=",
"5",
")",
":",
"validate",
"(",
"reference_patterns",
",",
"estimated_patterns",
")",
"# If no patterns were provided, metric is zero",
"if",
"_n_onset_midi",
"(",
"... | First n target proportion establishment recall metric.
This metric is similar is similar to the establishment FPR score, but it
only takes into account the first n estimated patterns and it only
outputs the Recall value of it.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> R = mir_eval.pattern.first_n_target_proportion_R(
... ref_patterns, est_patterns, n=5)
Parameters
----------
reference_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
estimated_patterns : list
The estimated patterns in the same format
n : int
Number of patterns to consider from the estimated results, in
the order they appear in the matrix.
(Default value = 5)
Returns
-------
recall : float
The first n target proportion Recall. | [
"First",
"n",
"target",
"proportion",
"establishment",
"recall",
"metric",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L571-L614 |
21,438 | craffel/mir_eval | mir_eval/pattern.py | evaluate | def evaluate(ref_patterns, est_patterns, **kwargs):
"""Load data and perform the evaluation.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)
Parameters
----------
ref_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
est_patterns : list
The estimated patterns in the same format
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all the metrics
scores = collections.OrderedDict()
# Standard scores
scores['F'], scores['P'], scores['R'] = \
util.filter_kwargs(standard_FPR, ref_patterns, est_patterns, **kwargs)
# Establishment scores
scores['F_est'], scores['P_est'], scores['R_est'] = \
util.filter_kwargs(establishment_FPR, ref_patterns, est_patterns,
**kwargs)
# Occurrence scores
# Force these values for thresh
kwargs['thresh'] = .5
scores['F_occ.5'], scores['P_occ.5'], scores['R_occ.5'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
kwargs['thresh'] = .75
scores['F_occ.75'], scores['P_occ.75'], scores['R_occ.75'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
# Three-layer scores
scores['F_3'], scores['P_3'], scores['R_3'] = \
util.filter_kwargs(three_layer_FPR, ref_patterns, est_patterns,
**kwargs)
# First Five Patterns scores
# Set default value of n
if 'n' not in kwargs:
kwargs['n'] = 5
scores['FFP'] = util.filter_kwargs(first_n_three_layer_P, ref_patterns,
est_patterns, **kwargs)
scores['FFTP_est'] = \
util.filter_kwargs(first_n_target_proportion_R, ref_patterns,
est_patterns, **kwargs)
return scores | python | def evaluate(ref_patterns, est_patterns, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()
# Standard scores
scores['F'], scores['P'], scores['R'] = \
util.filter_kwargs(standard_FPR, ref_patterns, est_patterns, **kwargs)
# Establishment scores
scores['F_est'], scores['P_est'], scores['R_est'] = \
util.filter_kwargs(establishment_FPR, ref_patterns, est_patterns,
**kwargs)
# Occurrence scores
# Force these values for thresh
kwargs['thresh'] = .5
scores['F_occ.5'], scores['P_occ.5'], scores['R_occ.5'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
kwargs['thresh'] = .75
scores['F_occ.75'], scores['P_occ.75'], scores['R_occ.75'] = \
util.filter_kwargs(occurrence_FPR, ref_patterns, est_patterns,
**kwargs)
# Three-layer scores
scores['F_3'], scores['P_3'], scores['R_3'] = \
util.filter_kwargs(three_layer_FPR, ref_patterns, est_patterns,
**kwargs)
# First Five Patterns scores
# Set default value of n
if 'n' not in kwargs:
kwargs['n'] = 5
scores['FFP'] = util.filter_kwargs(first_n_three_layer_P, ref_patterns,
est_patterns, **kwargs)
scores['FFTP_est'] = \
util.filter_kwargs(first_n_target_proportion_R, ref_patterns,
est_patterns, **kwargs)
return scores | [
"def",
"evaluate",
"(",
"ref_patterns",
",",
"est_patterns",
",",
"*",
"*",
"kwargs",
")",
":",
"# Compute all the metrics",
"scores",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"# Standard scores",
"scores",
"[",
"'F'",
"]",
",",
"scores",
"[",
"'P'",
... | Load data and perform the evaluation.
Examples
--------
>>> ref_patterns = mir_eval.io.load_patterns("ref_pattern.txt")
>>> est_patterns = mir_eval.io.load_patterns("est_pattern.txt")
>>> scores = mir_eval.pattern.evaluate(ref_patterns, est_patterns)
Parameters
----------
ref_patterns : list
The reference patterns in the format returned by
:func:`mir_eval.io.load_patterns()`
est_patterns : list
The estimated patterns in the same format
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved. | [
"Load",
"data",
"and",
"perform",
"the",
"evaluation",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/pattern.py#L617-L683 |
21,439 | craffel/mir_eval | mir_eval/transcription_velocity.py | validate | def validate(ref_intervals, ref_pitches, ref_velocities, est_intervals,
est_pitches, est_velocities):
"""Checks that the input annotations have valid time intervals, pitches,
and velocities, and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
ref_velocities : np.ndarray, shape=(n,)
Array of MIDI velocities (i.e. between 0 and 127) of reference notes
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
est_velocities : np.ndarray, shape=(m,)
Array of MIDI velocities (i.e. between 0 and 127) of estimated notes
"""
transcription.validate(ref_intervals, ref_pitches, est_intervals,
est_pitches)
# Check that velocities have the same length as intervals/pitches
if not ref_velocities.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference velocities must have the same length as '
'pitches and intervals.')
if not est_velocities.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated velocities must have the same length as '
'pitches and intervals.')
# Check that the velocities are positive
if ref_velocities.size > 0 and np.min(ref_velocities) < 0:
raise ValueError('Reference velocities must be positive.')
if est_velocities.size > 0 and np.min(est_velocities) < 0:
raise ValueError('Estimated velocities must be positive.') | python | def validate(ref_intervals, ref_pitches, ref_velocities, est_intervals,
est_pitches, est_velocities):
transcription.validate(ref_intervals, ref_pitches, est_intervals,
est_pitches)
# Check that velocities have the same length as intervals/pitches
if not ref_velocities.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference velocities must have the same length as '
'pitches and intervals.')
if not est_velocities.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated velocities must have the same length as '
'pitches and intervals.')
# Check that the velocities are positive
if ref_velocities.size > 0 and np.min(ref_velocities) < 0:
raise ValueError('Reference velocities must be positive.')
if est_velocities.size > 0 and np.min(est_velocities) < 0:
raise ValueError('Estimated velocities must be positive.') | [
"def",
"validate",
"(",
"ref_intervals",
",",
"ref_pitches",
",",
"ref_velocities",
",",
"est_intervals",
",",
"est_pitches",
",",
"est_velocities",
")",
":",
"transcription",
".",
"validate",
"(",
"ref_intervals",
",",
"ref_pitches",
",",
"est_intervals",
",",
"e... | Checks that the input annotations have valid time intervals, pitches,
and velocities, and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
ref_velocities : np.ndarray, shape=(n,)
Array of MIDI velocities (i.e. between 0 and 127) of reference notes
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
est_velocities : np.ndarray, shape=(m,)
Array of MIDI velocities (i.e. between 0 and 127) of estimated notes | [
"Checks",
"that",
"the",
"input",
"annotations",
"have",
"valid",
"time",
"intervals",
"pitches",
"and",
"velocities",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription_velocity.py#L62-L95 |
21,440 | craffel/mir_eval | mir_eval/transcription_velocity.py | match_notes | def match_notes(
ref_intervals, ref_pitches, ref_velocities, est_intervals, est_pitches,
est_velocities, onset_tolerance=0.05, pitch_tolerance=50.0,
offset_ratio=0.2, offset_min_tolerance=0.05, strict=False,
velocity_tolerance=0.1):
"""Match notes, taking note velocity into consideration.
This function first calls :func:`mir_eval.transcription.match_notes` to
match notes according to the supplied intervals, pitches, onset, offset,
and pitch tolerances. The velocities of the matched notes are then used to
estimate a slope and intercept which can rescale the estimated velocities
so that they are as close as possible (in L2 sense) to their matched
reference velocities. Velocities are then normalized to the range [0, 1]. A
estimated note is then further only considered correct if its velocity is
within ``velocity_tolerance`` of its matched (according to pitch and
timing) reference note.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
ref_velocities : np.ndarray, shape=(n,)
Array of MIDI velocities (i.e. between 0 and 127) of reference notes
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
est_velocities : np.ndarray, shape=(m,)
Array of MIDI velocities (i.e. between 0 and 127) of estimated notes
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
pitch_tolerance : float > 0
The tolerance for an estimated note's pitch deviating from the
reference note's pitch, in cents. Default is 50.0 (50 cents).
offset_ratio : float > 0 or None
The ratio of the reference note's duration used to define the
offset_tolerance. Default is 0.2 (20%), meaning the
``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50
ms), whichever is greater. If ``offset_ratio`` is set to ``None``,
offsets are ignored in the matching.
offset_min_tolerance : float > 0
The minimum tolerance for offset matching. See offset_ratio description
for an explanation of how the offset tolerance is determined. Note:
this parameter only influences the results if ``offset_ratio`` is not
``None``.
strict : bool
If ``strict=False`` (the default), threshold checks for onset, offset,
and pitch matching are performed using ``<=`` (less than or equal). If
``strict=True``, the threshold checks are performed using ``<`` (less
than).
velocity_tolerance : float > 0
Estimated notes are considered correct if, after rescaling and
normalization to [0, 1], they are within ``velocity_tolerance`` of a
matched reference note.
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
"""
# Compute note matching as usual using standard transcription function
matching = transcription.match_notes(
ref_intervals, ref_pitches, est_intervals, est_pitches,
onset_tolerance, pitch_tolerance, offset_ratio, offset_min_tolerance,
strict)
# Rescale reference velocities to the range [0, 1]
min_velocity, max_velocity = np.min(ref_velocities), np.max(ref_velocities)
# Make the smallest possible range 1 to avoid divide by zero
velocity_range = max(1, max_velocity - min_velocity)
ref_velocities = (ref_velocities - min_velocity)/float(velocity_range)
# Convert matching list-of-tuples to array for fancy indexing
matching = np.array(matching)
# When there is no matching, return an empty list
if matching.size == 0:
return []
# Grab velocities for matched notes
ref_matched_velocities = ref_velocities[matching[:, 0]]
est_matched_velocities = est_velocities[matching[:, 1]]
# Find slope and intercept of line which produces best least-squares fit
# between matched est and ref velocities
slope, intercept = np.linalg.lstsq(
np.vstack([est_matched_velocities,
np.ones(len(est_matched_velocities))]).T,
ref_matched_velocities)[0]
# Re-scale est velocities to match ref
est_matched_velocities = slope*est_matched_velocities + intercept
# Compute the absolute error of (rescaled) estimated velocities vs.
# normalized reference velocities. Error will be in [0, 1]
velocity_diff = np.abs(est_matched_velocities - ref_matched_velocities)
# Check whether each error is within the provided tolerance
velocity_within_tolerance = (velocity_diff < velocity_tolerance)
# Only keep matches whose velocity was within the provided tolerance
matching = matching[velocity_within_tolerance]
# Convert back to list-of-tuple format
matching = [tuple(_) for _ in matching]
return matching | python | def match_notes(
ref_intervals, ref_pitches, ref_velocities, est_intervals, est_pitches,
est_velocities, onset_tolerance=0.05, pitch_tolerance=50.0,
offset_ratio=0.2, offset_min_tolerance=0.05, strict=False,
velocity_tolerance=0.1):
# Compute note matching as usual using standard transcription function
matching = transcription.match_notes(
ref_intervals, ref_pitches, est_intervals, est_pitches,
onset_tolerance, pitch_tolerance, offset_ratio, offset_min_tolerance,
strict)
# Rescale reference velocities to the range [0, 1]
min_velocity, max_velocity = np.min(ref_velocities), np.max(ref_velocities)
# Make the smallest possible range 1 to avoid divide by zero
velocity_range = max(1, max_velocity - min_velocity)
ref_velocities = (ref_velocities - min_velocity)/float(velocity_range)
# Convert matching list-of-tuples to array for fancy indexing
matching = np.array(matching)
# When there is no matching, return an empty list
if matching.size == 0:
return []
# Grab velocities for matched notes
ref_matched_velocities = ref_velocities[matching[:, 0]]
est_matched_velocities = est_velocities[matching[:, 1]]
# Find slope and intercept of line which produces best least-squares fit
# between matched est and ref velocities
slope, intercept = np.linalg.lstsq(
np.vstack([est_matched_velocities,
np.ones(len(est_matched_velocities))]).T,
ref_matched_velocities)[0]
# Re-scale est velocities to match ref
est_matched_velocities = slope*est_matched_velocities + intercept
# Compute the absolute error of (rescaled) estimated velocities vs.
# normalized reference velocities. Error will be in [0, 1]
velocity_diff = np.abs(est_matched_velocities - ref_matched_velocities)
# Check whether each error is within the provided tolerance
velocity_within_tolerance = (velocity_diff < velocity_tolerance)
# Only keep matches whose velocity was within the provided tolerance
matching = matching[velocity_within_tolerance]
# Convert back to list-of-tuple format
matching = [tuple(_) for _ in matching]
return matching | [
"def",
"match_notes",
"(",
"ref_intervals",
",",
"ref_pitches",
",",
"ref_velocities",
",",
"est_intervals",
",",
"est_pitches",
",",
"est_velocities",
",",
"onset_tolerance",
"=",
"0.05",
",",
"pitch_tolerance",
"=",
"50.0",
",",
"offset_ratio",
"=",
"0.2",
",",
... | Match notes, taking note velocity into consideration.
This function first calls :func:`mir_eval.transcription.match_notes` to
match notes according to the supplied intervals, pitches, onset, offset,
and pitch tolerances. The velocities of the matched notes are then used to
estimate a slope and intercept which can rescale the estimated velocities
so that they are as close as possible (in L2 sense) to their matched
reference velocities. Velocities are then normalized to the range [0, 1]. A
estimated note is then further only considered correct if its velocity is
within ``velocity_tolerance`` of its matched (according to pitch and
timing) reference note.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
ref_velocities : np.ndarray, shape=(n,)
Array of MIDI velocities (i.e. between 0 and 127) of reference notes
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
est_velocities : np.ndarray, shape=(m,)
Array of MIDI velocities (i.e. between 0 and 127) of estimated notes
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
pitch_tolerance : float > 0
The tolerance for an estimated note's pitch deviating from the
reference note's pitch, in cents. Default is 50.0 (50 cents).
offset_ratio : float > 0 or None
The ratio of the reference note's duration used to define the
offset_tolerance. Default is 0.2 (20%), meaning the
``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50
ms), whichever is greater. If ``offset_ratio`` is set to ``None``,
offsets are ignored in the matching.
offset_min_tolerance : float > 0
The minimum tolerance for offset matching. See offset_ratio description
for an explanation of how the offset tolerance is determined. Note:
this parameter only influences the results if ``offset_ratio`` is not
``None``.
strict : bool
If ``strict=False`` (the default), threshold checks for onset, offset,
and pitch matching are performed using ``<=`` (less than or equal). If
``strict=True``, the threshold checks are performed using ``<`` (less
than).
velocity_tolerance : float > 0
Estimated notes are considered correct if, after rescaling and
normalization to [0, 1], they are within ``velocity_tolerance`` of a
matched reference note.
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``. | [
"Match",
"notes",
"taking",
"note",
"velocity",
"into",
"consideration",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription_velocity.py#L98-L201 |
21,441 | craffel/mir_eval | mir_eval/beat.py | validate | def validate(reference_beats, estimated_beats):
"""Checks that the input annotations to a metric look like valid beat time
arrays, and throws helpful errors if not.
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
estimated beat times, in seconds
"""
# If reference or estimated beats are empty,
# warn because metric will be 0
if reference_beats.size == 0:
warnings.warn("Reference beats are empty.")
if estimated_beats.size == 0:
warnings.warn("Estimated beats are empty.")
for beats in [reference_beats, estimated_beats]:
util.validate_events(beats, MAX_TIME) | python | def validate(reference_beats, estimated_beats):
# If reference or estimated beats are empty,
# warn because metric will be 0
if reference_beats.size == 0:
warnings.warn("Reference beats are empty.")
if estimated_beats.size == 0:
warnings.warn("Estimated beats are empty.")
for beats in [reference_beats, estimated_beats]:
util.validate_events(beats, MAX_TIME) | [
"def",
"validate",
"(",
"reference_beats",
",",
"estimated_beats",
")",
":",
"# If reference or estimated beats are empty,",
"# warn because metric will be 0",
"if",
"reference_beats",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference beats are empty.\... | Checks that the input annotations to a metric look like valid beat time
arrays, and throws helpful errors if not.
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
estimated beat times, in seconds | [
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"valid",
"beat",
"time",
"arrays",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L77-L95 |
21,442 | craffel/mir_eval | mir_eval/beat.py | _get_reference_beat_variations | def _get_reference_beat_variations(reference_beats):
"""Return metric variations of the reference beats
Parameters
----------
reference_beats : np.ndarray
beat locations in seconds
Returns
-------
reference_beats : np.ndarray
Original beat locations
off_beat : np.ndarray
180 degrees out of phase from the original beat locations
double : np.ndarray
Beats at 2x the original tempo
half_odd : np.ndarray
Half tempo, odd beats
half_even : np.ndarray
Half tempo, even beats
"""
# Create annotations at twice the metric level
interpolated_indices = np.arange(0, reference_beats.shape[0]-.5, .5)
original_indices = np.arange(0, reference_beats.shape[0])
double_reference_beats = np.interp(interpolated_indices,
original_indices,
reference_beats)
# Return metric variations:
# True, off-beat, double tempo, half tempo odd, and half tempo even
return (reference_beats,
double_reference_beats[1::2],
double_reference_beats,
reference_beats[::2],
reference_beats[1::2]) | python | def _get_reference_beat_variations(reference_beats):
# Create annotations at twice the metric level
interpolated_indices = np.arange(0, reference_beats.shape[0]-.5, .5)
original_indices = np.arange(0, reference_beats.shape[0])
double_reference_beats = np.interp(interpolated_indices,
original_indices,
reference_beats)
# Return metric variations:
# True, off-beat, double tempo, half tempo odd, and half tempo even
return (reference_beats,
double_reference_beats[1::2],
double_reference_beats,
reference_beats[::2],
reference_beats[1::2]) | [
"def",
"_get_reference_beat_variations",
"(",
"reference_beats",
")",
":",
"# Create annotations at twice the metric level",
"interpolated_indices",
"=",
"np",
".",
"arange",
"(",
"0",
",",
"reference_beats",
".",
"shape",
"[",
"0",
"]",
"-",
".5",
",",
".5",
")",
... | Return metric variations of the reference beats
Parameters
----------
reference_beats : np.ndarray
beat locations in seconds
Returns
-------
reference_beats : np.ndarray
Original beat locations
off_beat : np.ndarray
180 degrees out of phase from the original beat locations
double : np.ndarray
Beats at 2x the original tempo
half_odd : np.ndarray
Half tempo, odd beats
half_even : np.ndarray
Half tempo, even beats | [
"Return",
"metric",
"variations",
"of",
"the",
"reference",
"beats"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L98-L133 |
21,443 | craffel/mir_eval | mir_eval/beat.py | f_measure | def f_measure(reference_beats,
estimated_beats,
f_measure_threshold=0.07):
"""Compute the F-measure of correct vs incorrectly predicted beats.
"Correctness" is determined over a small window.
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> f_measure = mir_eval.beat.f_measure(reference_beats,
estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
estimated beat times, in seconds
f_measure_threshold : float
Window size, in seconds
(Default value = 0.07)
Returns
-------
f_score : float
The computed F-measure score
"""
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Compute the best-case matching between reference and estimated locations
matching = util.match_events(reference_beats,
estimated_beats,
f_measure_threshold)
precision = float(len(matching))/len(estimated_beats)
recall = float(len(matching))/len(reference_beats)
return util.f_measure(precision, recall) | python | def f_measure(reference_beats,
estimated_beats,
f_measure_threshold=0.07):
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Compute the best-case matching between reference and estimated locations
matching = util.match_events(reference_beats,
estimated_beats,
f_measure_threshold)
precision = float(len(matching))/len(estimated_beats)
recall = float(len(matching))/len(reference_beats)
return util.f_measure(precision, recall) | [
"def",
"f_measure",
"(",
"reference_beats",
",",
"estimated_beats",
",",
"f_measure_threshold",
"=",
"0.07",
")",
":",
"validate",
"(",
"reference_beats",
",",
"estimated_beats",
")",
"# When estimated beats are empty, no beats are correct; metric is 0",
"if",
"estimated_beat... | Compute the F-measure of correct vs incorrectly predicted beats.
"Correctness" is determined over a small window.
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> f_measure = mir_eval.beat.f_measure(reference_beats,
estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
estimated beat times, in seconds
f_measure_threshold : float
Window size, in seconds
(Default value = 0.07)
Returns
-------
f_score : float
The computed F-measure score | [
"Compute",
"the",
"F",
"-",
"measure",
"of",
"correct",
"vs",
"incorrectly",
"predicted",
"beats",
".",
"Correctness",
"is",
"determined",
"over",
"a",
"small",
"window",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L136-L178 |
21,444 | craffel/mir_eval | mir_eval/beat.py | cemgil | def cemgil(reference_beats,
estimated_beats,
cemgil_sigma=0.04):
"""Cemgil's score, computes a gaussian error of each estimated beat.
Compares against the original beat times and all metrical variations.
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> cemgil_score, cemgil_max = mir_eval.beat.cemgil(reference_beats,
estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
cemgil_sigma : float
Sigma parameter of gaussian error windows
(Default value = 0.04)
Returns
-------
cemgil_score : float
Cemgil's score for the original reference beats
cemgil_max : float
The best Cemgil score for all metrical variations
"""
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0., 0.
# We'll compute Cemgil's accuracy for each variation
accuracies = []
for reference_beats in _get_reference_beat_variations(reference_beats):
accuracy = 0
# Cycle through beats
for beat in reference_beats:
# Find the error for the closest beat to the reference beat
beat_diff = np.min(np.abs(beat - estimated_beats))
# Add gaussian error into the accuracy
accuracy += np.exp(-(beat_diff**2)/(2.0*cemgil_sigma**2))
# Normalize the accuracy
accuracy /= .5*(estimated_beats.shape[0] + reference_beats.shape[0])
# Add it to our list of accuracy scores
accuracies.append(accuracy)
# Return raw accuracy with non-varied annotations
# and maximal accuracy across all variations
return accuracies[0], np.max(accuracies) | python | def cemgil(reference_beats,
estimated_beats,
cemgil_sigma=0.04):
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0., 0.
# We'll compute Cemgil's accuracy for each variation
accuracies = []
for reference_beats in _get_reference_beat_variations(reference_beats):
accuracy = 0
# Cycle through beats
for beat in reference_beats:
# Find the error for the closest beat to the reference beat
beat_diff = np.min(np.abs(beat - estimated_beats))
# Add gaussian error into the accuracy
accuracy += np.exp(-(beat_diff**2)/(2.0*cemgil_sigma**2))
# Normalize the accuracy
accuracy /= .5*(estimated_beats.shape[0] + reference_beats.shape[0])
# Add it to our list of accuracy scores
accuracies.append(accuracy)
# Return raw accuracy with non-varied annotations
# and maximal accuracy across all variations
return accuracies[0], np.max(accuracies) | [
"def",
"cemgil",
"(",
"reference_beats",
",",
"estimated_beats",
",",
"cemgil_sigma",
"=",
"0.04",
")",
":",
"validate",
"(",
"reference_beats",
",",
"estimated_beats",
")",
"# When estimated beats are empty, no beats are correct; metric is 0",
"if",
"estimated_beats",
".",... | Cemgil's score, computes a gaussian error of each estimated beat.
Compares against the original beat times and all metrical variations.
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> cemgil_score, cemgil_max = mir_eval.beat.cemgil(reference_beats,
estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
cemgil_sigma : float
Sigma parameter of gaussian error windows
(Default value = 0.04)
Returns
-------
cemgil_score : float
Cemgil's score for the original reference beats
cemgil_max : float
The best Cemgil score for all metrical variations | [
"Cemgil",
"s",
"score",
"computes",
"a",
"gaussian",
"error",
"of",
"each",
"estimated",
"beat",
".",
"Compares",
"against",
"the",
"original",
"beat",
"times",
"and",
"all",
"metrical",
"variations",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L181-L233 |
21,445 | craffel/mir_eval | mir_eval/beat.py | goto | def goto(reference_beats,
estimated_beats,
goto_threshold=0.35,
goto_mu=0.2,
goto_sigma=0.2):
"""Calculate Goto's score, a binary 1 or 0 depending on some specific
heuristic criteria
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
goto_threshold : float
Threshold of beat error for a beat to be "correct"
(Default value = 0.35)
goto_mu : float
The mean of the beat errors in the continuously correct
track must be less than this
(Default value = 0.2)
goto_sigma : float
The std of the beat errors in the continuously correct track must
be less than this
(Default value = 0.2)
Returns
-------
goto_score : float
Either 1.0 or 0.0 if some specific criteria are met
"""
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Error for each beat
beat_error = np.ones(reference_beats.shape[0])
# Flag for whether the reference and estimated beats are paired
paired = np.zeros(reference_beats.shape[0])
# Keep track of Goto's three criteria
goto_criteria = 0
for n in range(1, reference_beats.shape[0]-1):
# Get previous inner-reference-beat-interval
previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1])
# Window start - in the middle of the current beat and the previous
window_min = reference_beats[n] - previous_interval
# Next inter-reference-beat-interval
next_interval = 0.5*(reference_beats[n+1] - reference_beats[n])
# Window end - in the middle of the current beat and the next
window_max = reference_beats[n] + next_interval
# Get estimated beats in the window
beats_in_window = np.logical_and((estimated_beats >= window_min),
(estimated_beats < window_max))
# False negative/positive
if beats_in_window.sum() == 0 or beats_in_window.sum() > 1:
paired[n] = 0
beat_error[n] = 1
else:
# Single beat is paired!
paired[n] = 1
# Get offset of the estimated beat and the reference beat
offset = estimated_beats[beats_in_window] - reference_beats[n]
# Scale by previous or next interval
if offset < 0:
beat_error[n] = offset/previous_interval
else:
beat_error[n] = offset/next_interval
# Get indices of incorrect beats
incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold)
# All beats are correct (first and last will be 0 so always correct)
if incorrect_beats.shape[0] < 3:
# Get the track of correct beats
track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1]
goto_criteria = 1
else:
# Get the track of maximal length
track_len = np.max(np.diff(incorrect_beats))
track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0]
# Is the track length at least 25% of the song?
if track_len - 1 > .25*(reference_beats.shape[0] - 2):
goto_criteria = 1
start_beat = incorrect_beats[track_start]
end_beat = incorrect_beats[track_start + 1]
track = beat_error[start_beat:end_beat + 1]
# If we have a track
if goto_criteria:
# Are mean and std of the track less than the required thresholds?
if np.mean(np.abs(track)) < goto_mu \
and np.std(track, ddof=1) < goto_sigma:
goto_criteria = 3
# If all criteria are met, score is 100%!
return 1.0*(goto_criteria == 3) | python | def goto(reference_beats,
estimated_beats,
goto_threshold=0.35,
goto_mu=0.2,
goto_sigma=0.2):
validate(reference_beats, estimated_beats)
# When estimated beats are empty, no beats are correct; metric is 0
if estimated_beats.size == 0 or reference_beats.size == 0:
return 0.
# Error for each beat
beat_error = np.ones(reference_beats.shape[0])
# Flag for whether the reference and estimated beats are paired
paired = np.zeros(reference_beats.shape[0])
# Keep track of Goto's three criteria
goto_criteria = 0
for n in range(1, reference_beats.shape[0]-1):
# Get previous inner-reference-beat-interval
previous_interval = 0.5*(reference_beats[n] - reference_beats[n-1])
# Window start - in the middle of the current beat and the previous
window_min = reference_beats[n] - previous_interval
# Next inter-reference-beat-interval
next_interval = 0.5*(reference_beats[n+1] - reference_beats[n])
# Window end - in the middle of the current beat and the next
window_max = reference_beats[n] + next_interval
# Get estimated beats in the window
beats_in_window = np.logical_and((estimated_beats >= window_min),
(estimated_beats < window_max))
# False negative/positive
if beats_in_window.sum() == 0 or beats_in_window.sum() > 1:
paired[n] = 0
beat_error[n] = 1
else:
# Single beat is paired!
paired[n] = 1
# Get offset of the estimated beat and the reference beat
offset = estimated_beats[beats_in_window] - reference_beats[n]
# Scale by previous or next interval
if offset < 0:
beat_error[n] = offset/previous_interval
else:
beat_error[n] = offset/next_interval
# Get indices of incorrect beats
incorrect_beats = np.flatnonzero(np.abs(beat_error) > goto_threshold)
# All beats are correct (first and last will be 0 so always correct)
if incorrect_beats.shape[0] < 3:
# Get the track of correct beats
track = beat_error[incorrect_beats[0] + 1:incorrect_beats[-1] - 1]
goto_criteria = 1
else:
# Get the track of maximal length
track_len = np.max(np.diff(incorrect_beats))
track_start = np.flatnonzero(np.diff(incorrect_beats) == track_len)[0]
# Is the track length at least 25% of the song?
if track_len - 1 > .25*(reference_beats.shape[0] - 2):
goto_criteria = 1
start_beat = incorrect_beats[track_start]
end_beat = incorrect_beats[track_start + 1]
track = beat_error[start_beat:end_beat + 1]
# If we have a track
if goto_criteria:
# Are mean and std of the track less than the required thresholds?
if np.mean(np.abs(track)) < goto_mu \
and np.std(track, ddof=1) < goto_sigma:
goto_criteria = 3
# If all criteria are met, score is 100%!
return 1.0*(goto_criteria == 3) | [
"def",
"goto",
"(",
"reference_beats",
",",
"estimated_beats",
",",
"goto_threshold",
"=",
"0.35",
",",
"goto_mu",
"=",
"0.2",
",",
"goto_sigma",
"=",
"0.2",
")",
":",
"validate",
"(",
"reference_beats",
",",
"estimated_beats",
")",
"# When estimated beats are emp... | Calculate Goto's score, a binary 1 or 0 depending on some specific
heuristic criteria
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> goto_score = mir_eval.beat.goto(reference_beats, estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
goto_threshold : float
Threshold of beat error for a beat to be "correct"
(Default value = 0.35)
goto_mu : float
The mean of the beat errors in the continuously correct
track must be less than this
(Default value = 0.2)
goto_sigma : float
The std of the beat errors in the continuously correct track must
be less than this
(Default value = 0.2)
Returns
-------
goto_score : float
Either 1.0 or 0.0 if some specific criteria are met | [
"Calculate",
"Goto",
"s",
"score",
"a",
"binary",
"1",
"or",
"0",
"depending",
"on",
"some",
"specific",
"heuristic",
"criteria"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L236-L335 |
21,446 | craffel/mir_eval | mir_eval/beat.py | p_score | def p_score(reference_beats,
estimated_beats,
p_score_threshold=0.2):
"""Get McKinney's P-score.
Based on the autocorrelation of the reference and estimated beats
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> p_score = mir_eval.beat.p_score(reference_beats, estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
p_score_threshold : float
Window size will be
``p_score_threshold*np.median(inter_annotation_intervals)``,
(Default value = 0.2)
Returns
-------
correlation : float
McKinney's P-score
"""
validate(reference_beats, estimated_beats)
# Warn when only one beat is provided for either estimated or reference,
# report a warning
if reference_beats.size == 1:
warnings.warn("Only one reference beat was provided, so beat intervals"
" cannot be computed.")
if estimated_beats.size == 1:
warnings.warn("Only one estimated beat was provided, so beat intervals"
" cannot be computed.")
# When estimated or reference beats have <= 1 beats, can't compute the
# metric, so return 0
if estimated_beats.size <= 1 or reference_beats.size <= 1:
return 0.
# Quantize beats to 10ms
sampling_rate = int(1.0/0.010)
# Shift beats so that the minimum in either sequence is zero
offset = min(estimated_beats.min(), reference_beats.min())
estimated_beats = np.array(estimated_beats - offset)
reference_beats = np.array(reference_beats - offset)
# Get the largest time index
end_point = np.int(np.ceil(np.max([np.max(estimated_beats),
np.max(reference_beats)])))
# Make impulse trains with impulses at beat locations
reference_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(reference_beats*sampling_rate).astype(np.int)
reference_train[beat_indices] = 1.0
estimated_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(estimated_beats*sampling_rate).astype(np.int)
estimated_train[beat_indices] = 1.0
# Window size to take the correlation over
# defined as .2*median(inter-annotation-intervals)
annotation_intervals = np.diff(np.flatnonzero(reference_train))
win_size = int(np.round(p_score_threshold*np.median(annotation_intervals)))
# Get full correlation
train_correlation = np.correlate(reference_train, estimated_train, 'full')
# Get the middle element - note we are rounding down on purpose here
middle_lag = train_correlation.shape[0]//2
# Truncate to only valid lags (those corresponding to the window)
start = middle_lag - win_size
end = middle_lag + win_size + 1
train_correlation = train_correlation[start:end]
# Compute and return the P-score
n_beats = np.max([estimated_beats.shape[0], reference_beats.shape[0]])
return np.sum(train_correlation)/n_beats | python | def p_score(reference_beats,
estimated_beats,
p_score_threshold=0.2):
validate(reference_beats, estimated_beats)
# Warn when only one beat is provided for either estimated or reference,
# report a warning
if reference_beats.size == 1:
warnings.warn("Only one reference beat was provided, so beat intervals"
" cannot be computed.")
if estimated_beats.size == 1:
warnings.warn("Only one estimated beat was provided, so beat intervals"
" cannot be computed.")
# When estimated or reference beats have <= 1 beats, can't compute the
# metric, so return 0
if estimated_beats.size <= 1 or reference_beats.size <= 1:
return 0.
# Quantize beats to 10ms
sampling_rate = int(1.0/0.010)
# Shift beats so that the minimum in either sequence is zero
offset = min(estimated_beats.min(), reference_beats.min())
estimated_beats = np.array(estimated_beats - offset)
reference_beats = np.array(reference_beats - offset)
# Get the largest time index
end_point = np.int(np.ceil(np.max([np.max(estimated_beats),
np.max(reference_beats)])))
# Make impulse trains with impulses at beat locations
reference_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(reference_beats*sampling_rate).astype(np.int)
reference_train[beat_indices] = 1.0
estimated_train = np.zeros(end_point*sampling_rate + 1)
beat_indices = np.ceil(estimated_beats*sampling_rate).astype(np.int)
estimated_train[beat_indices] = 1.0
# Window size to take the correlation over
# defined as .2*median(inter-annotation-intervals)
annotation_intervals = np.diff(np.flatnonzero(reference_train))
win_size = int(np.round(p_score_threshold*np.median(annotation_intervals)))
# Get full correlation
train_correlation = np.correlate(reference_train, estimated_train, 'full')
# Get the middle element - note we are rounding down on purpose here
middle_lag = train_correlation.shape[0]//2
# Truncate to only valid lags (those corresponding to the window)
start = middle_lag - win_size
end = middle_lag + win_size + 1
train_correlation = train_correlation[start:end]
# Compute and return the P-score
n_beats = np.max([estimated_beats.shape[0], reference_beats.shape[0]])
return np.sum(train_correlation)/n_beats | [
"def",
"p_score",
"(",
"reference_beats",
",",
"estimated_beats",
",",
"p_score_threshold",
"=",
"0.2",
")",
":",
"validate",
"(",
"reference_beats",
",",
"estimated_beats",
")",
"# Warn when only one beat is provided for either estimated or reference,",
"# report a warning",
... | Get McKinney's P-score.
Based on the autocorrelation of the reference and estimated beats
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> p_score = mir_eval.beat.p_score(reference_beats, estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
p_score_threshold : float
Window size will be
``p_score_threshold*np.median(inter_annotation_intervals)``,
(Default value = 0.2)
Returns
-------
correlation : float
McKinney's P-score | [
"Get",
"McKinney",
"s",
"P",
"-",
"score",
".",
"Based",
"on",
"the",
"autocorrelation",
"of",
"the",
"reference",
"and",
"estimated",
"beats"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L338-L412 |
21,447 | craffel/mir_eval | mir_eval/beat.py | information_gain | def information_gain(reference_beats,
estimated_beats,
bins=41):
"""Get the information gain - K-L divergence of the beat error histogram
to a uniform histogram
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> information_gain = mir_eval.beat.information_gain(reference_beats,
estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
bins : int
Number of bins in the beat error histogram
(Default value = 41)
Returns
-------
information_gain_score : float
Entropy of beat error histogram
"""
validate(reference_beats, estimated_beats)
# If an even number of bins is provided,
# there will be no bin centered at zero, so warn the user.
if not bins % 2:
warnings.warn("bins parameter is even, "
"so there will not be a bin centered at zero.")
# Warn when only one beat is provided for either estimated or reference,
# report a warning
if reference_beats.size == 1:
warnings.warn("Only one reference beat was provided, so beat intervals"
" cannot be computed.")
if estimated_beats.size == 1:
warnings.warn("Only one estimated beat was provided, so beat intervals"
" cannot be computed.")
# When estimated or reference beats have <= 1 beats, can't compute the
# metric, so return 0
if estimated_beats.size <= 1 or reference_beats.size <= 1:
return 0.
# Get entropy for reference beats->estimated beats
# and estimated beats->reference beats
forward_entropy = _get_entropy(reference_beats, estimated_beats, bins)
backward_entropy = _get_entropy(estimated_beats, reference_beats, bins)
# Pick the larger of the entropies
norm = np.log2(bins)
if forward_entropy > backward_entropy:
# Note that the beat evaluation toolbox does not normalize
information_gain_score = (norm - forward_entropy)/norm
else:
information_gain_score = (norm - backward_entropy)/norm
return information_gain_score | python | def information_gain(reference_beats,
estimated_beats,
bins=41):
validate(reference_beats, estimated_beats)
# If an even number of bins is provided,
# there will be no bin centered at zero, so warn the user.
if not bins % 2:
warnings.warn("bins parameter is even, "
"so there will not be a bin centered at zero.")
# Warn when only one beat is provided for either estimated or reference,
# report a warning
if reference_beats.size == 1:
warnings.warn("Only one reference beat was provided, so beat intervals"
" cannot be computed.")
if estimated_beats.size == 1:
warnings.warn("Only one estimated beat was provided, so beat intervals"
" cannot be computed.")
# When estimated or reference beats have <= 1 beats, can't compute the
# metric, so return 0
if estimated_beats.size <= 1 or reference_beats.size <= 1:
return 0.
# Get entropy for reference beats->estimated beats
# and estimated beats->reference beats
forward_entropy = _get_entropy(reference_beats, estimated_beats, bins)
backward_entropy = _get_entropy(estimated_beats, reference_beats, bins)
# Pick the larger of the entropies
norm = np.log2(bins)
if forward_entropy > backward_entropy:
# Note that the beat evaluation toolbox does not normalize
information_gain_score = (norm - forward_entropy)/norm
else:
information_gain_score = (norm - backward_entropy)/norm
return information_gain_score | [
"def",
"information_gain",
"(",
"reference_beats",
",",
"estimated_beats",
",",
"bins",
"=",
"41",
")",
":",
"validate",
"(",
"reference_beats",
",",
"estimated_beats",
")",
"# If an even number of bins is provided,",
"# there will be no bin centered at zero, so warn the user."... | Get the information gain - K-L divergence of the beat error histogram
to a uniform histogram
Examples
--------
>>> reference_beats = mir_eval.io.load_events('reference.txt')
>>> reference_beats = mir_eval.beat.trim_beats(reference_beats)
>>> estimated_beats = mir_eval.io.load_events('estimated.txt')
>>> estimated_beats = mir_eval.beat.trim_beats(estimated_beats)
>>> information_gain = mir_eval.beat.information_gain(reference_beats,
estimated_beats)
Parameters
----------
reference_beats : np.ndarray
reference beat times, in seconds
estimated_beats : np.ndarray
query beat times, in seconds
bins : int
Number of bins in the beat error histogram
(Default value = 41)
Returns
-------
information_gain_score : float
Entropy of beat error histogram | [
"Get",
"the",
"information",
"gain",
"-",
"K",
"-",
"L",
"divergence",
"of",
"the",
"beat",
"error",
"histogram",
"to",
"a",
"uniform",
"histogram"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/beat.py#L580-L639 |
21,448 | craffel/mir_eval | mir_eval/util.py | index_labels | def index_labels(labels, case_sensitive=False):
"""Convert a list of string identifiers into numerical indices.
Parameters
----------
labels : list of strings, shape=(n,)
A list of annotations, e.g., segment or chord labels from an
annotation file.
case_sensitive : bool
Set to True to enable case-sensitive label indexing
(Default value = False)
Returns
-------
indices : list, shape=(n,)
Numerical representation of ``labels``
index_to_label : dict
Mapping to convert numerical indices back to labels.
``labels[i] == index_to_label[indices[i]]``
"""
label_to_index = {}
index_to_label = {}
# If we're not case-sensitive,
if not case_sensitive:
labels = [str(s).lower() for s in labels]
# First, build the unique label mapping
for index, s in enumerate(sorted(set(labels))):
label_to_index[s] = index
index_to_label[index] = s
# Remap the labels to indices
indices = [label_to_index[s] for s in labels]
# Return the converted labels, and the inverse mapping
return indices, index_to_label | python | def index_labels(labels, case_sensitive=False):
label_to_index = {}
index_to_label = {}
# If we're not case-sensitive,
if not case_sensitive:
labels = [str(s).lower() for s in labels]
# First, build the unique label mapping
for index, s in enumerate(sorted(set(labels))):
label_to_index[s] = index
index_to_label[index] = s
# Remap the labels to indices
indices = [label_to_index[s] for s in labels]
# Return the converted labels, and the inverse mapping
return indices, index_to_label | [
"def",
"index_labels",
"(",
"labels",
",",
"case_sensitive",
"=",
"False",
")",
":",
"label_to_index",
"=",
"{",
"}",
"index_to_label",
"=",
"{",
"}",
"# If we're not case-sensitive,",
"if",
"not",
"case_sensitive",
":",
"labels",
"=",
"[",
"str",
"(",
"s",
... | Convert a list of string identifiers into numerical indices.
Parameters
----------
labels : list of strings, shape=(n,)
A list of annotations, e.g., segment or chord labels from an
annotation file.
case_sensitive : bool
Set to True to enable case-sensitive label indexing
(Default value = False)
Returns
-------
indices : list, shape=(n,)
Numerical representation of ``labels``
index_to_label : dict
Mapping to convert numerical indices back to labels.
``labels[i] == index_to_label[indices[i]]`` | [
"Convert",
"a",
"list",
"of",
"string",
"identifiers",
"into",
"numerical",
"indices",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L13-L52 |
21,449 | craffel/mir_eval | mir_eval/util.py | intervals_to_samples | def intervals_to_samples(intervals, labels, offset=0, sample_size=0.1,
fill_value=None):
"""Convert an array of labeled time intervals to annotated samples.
Parameters
----------
intervals : np.ndarray, shape=(n, d)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()` or
:func:`mir_eval.io.load_labeled_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
labels : list, shape=(n,)
The annotation for each interval
offset : float > 0
Phase offset of the sampled time grid (in seconds)
(Default value = 0)
sample_size : float > 0
duration of each sample to be generated (in seconds)
(Default value = 0.1)
fill_value : type(labels[0])
Object to use for the label with out-of-range time points.
(Default value = None)
Returns
-------
sample_times : list
list of sample times
sample_labels : list
array of labels for each generated sample
Notes
-----
Intervals will be rounded down to the nearest multiple
of ``sample_size``.
"""
# Round intervals to the sample size
num_samples = int(np.floor(intervals.max() / sample_size))
sample_indices = np.arange(num_samples, dtype=np.float32)
sample_times = (sample_indices*sample_size + offset).tolist()
sampled_labels = interpolate_intervals(
intervals, labels, sample_times, fill_value)
return sample_times, sampled_labels | python | def intervals_to_samples(intervals, labels, offset=0, sample_size=0.1,
fill_value=None):
# Round intervals to the sample size
num_samples = int(np.floor(intervals.max() / sample_size))
sample_indices = np.arange(num_samples, dtype=np.float32)
sample_times = (sample_indices*sample_size + offset).tolist()
sampled_labels = interpolate_intervals(
intervals, labels, sample_times, fill_value)
return sample_times, sampled_labels | [
"def",
"intervals_to_samples",
"(",
"intervals",
",",
"labels",
",",
"offset",
"=",
"0",
",",
"sample_size",
"=",
"0.1",
",",
"fill_value",
"=",
"None",
")",
":",
"# Round intervals to the sample size",
"num_samples",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
... | Convert an array of labeled time intervals to annotated samples.
Parameters
----------
intervals : np.ndarray, shape=(n, d)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()` or
:func:`mir_eval.io.load_labeled_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
labels : list, shape=(n,)
The annotation for each interval
offset : float > 0
Phase offset of the sampled time grid (in seconds)
(Default value = 0)
sample_size : float > 0
duration of each sample to be generated (in seconds)
(Default value = 0.1)
fill_value : type(labels[0])
Object to use for the label with out-of-range time points.
(Default value = None)
Returns
-------
sample_times : list
list of sample times
sample_labels : list
array of labels for each generated sample
Notes
-----
Intervals will be rounded down to the nearest multiple
of ``sample_size``. | [
"Convert",
"an",
"array",
"of",
"labeled",
"time",
"intervals",
"to",
"annotated",
"samples",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L76-L126 |
21,450 | craffel/mir_eval | mir_eval/util.py | interpolate_intervals | def interpolate_intervals(intervals, labels, time_points, fill_value=None):
"""Assign labels to a set of points in time given a set of intervals.
Time points that do not lie within an interval are mapped to `fill_value`.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Intervals are assumed to be disjoint.
labels : list, shape=(n,)
The annotation for each interval
time_points : array_like, shape=(m,)
Points in time to assign labels. These must be in
non-decreasing order.
fill_value : type(labels[0])
Object to use for the label with out-of-range time points.
(Default value = None)
Returns
-------
aligned_labels : list
Labels corresponding to the given time points.
Raises
------
ValueError
If `time_points` is not in non-decreasing order.
"""
# Verify that time_points is sorted
time_points = np.asarray(time_points)
if np.any(time_points[1:] < time_points[:-1]):
raise ValueError('time_points must be in non-decreasing order')
aligned_labels = [fill_value] * len(time_points)
starts = np.searchsorted(time_points, intervals[:, 0], side='left')
ends = np.searchsorted(time_points, intervals[:, 1], side='right')
for (start, end, lab) in zip(starts, ends, labels):
aligned_labels[start:end] = [lab] * (end - start)
return aligned_labels | python | def interpolate_intervals(intervals, labels, time_points, fill_value=None):
# Verify that time_points is sorted
time_points = np.asarray(time_points)
if np.any(time_points[1:] < time_points[:-1]):
raise ValueError('time_points must be in non-decreasing order')
aligned_labels = [fill_value] * len(time_points)
starts = np.searchsorted(time_points, intervals[:, 0], side='left')
ends = np.searchsorted(time_points, intervals[:, 1], side='right')
for (start, end, lab) in zip(starts, ends, labels):
aligned_labels[start:end] = [lab] * (end - start)
return aligned_labels | [
"def",
"interpolate_intervals",
"(",
"intervals",
",",
"labels",
",",
"time_points",
",",
"fill_value",
"=",
"None",
")",
":",
"# Verify that time_points is sorted",
"time_points",
"=",
"np",
".",
"asarray",
"(",
"time_points",
")",
"if",
"np",
".",
"any",
"(",
... | Assign labels to a set of points in time given a set of intervals.
Time points that do not lie within an interval are mapped to `fill_value`.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Intervals are assumed to be disjoint.
labels : list, shape=(n,)
The annotation for each interval
time_points : array_like, shape=(m,)
Points in time to assign labels. These must be in
non-decreasing order.
fill_value : type(labels[0])
Object to use for the label with out-of-range time points.
(Default value = None)
Returns
-------
aligned_labels : list
Labels corresponding to the given time points.
Raises
------
ValueError
If `time_points` is not in non-decreasing order. | [
"Assign",
"labels",
"to",
"a",
"set",
"of",
"points",
"in",
"time",
"given",
"a",
"set",
"of",
"intervals",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L129-L180 |
21,451 | craffel/mir_eval | mir_eval/util.py | sort_labeled_intervals | def sort_labeled_intervals(intervals, labels=None):
'''Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input
'''
idx = np.argsort(intervals[:, 0])
intervals_sorted = intervals[idx]
if labels is None:
return intervals_sorted
else:
return intervals_sorted, [labels[_] for _ in idx] | python | def sort_labeled_intervals(intervals, labels=None):
'''Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input
'''
idx = np.argsort(intervals[:, 0])
intervals_sorted = intervals[idx]
if labels is None:
return intervals_sorted
else:
return intervals_sorted, [labels[_] for _ in idx] | [
"def",
"sort_labeled_intervals",
"(",
"intervals",
",",
"labels",
"=",
"None",
")",
":",
"idx",
"=",
"np",
".",
"argsort",
"(",
"intervals",
"[",
":",
",",
"0",
"]",
")",
"intervals_sorted",
"=",
"intervals",
"[",
"idx",
"]",
"if",
"labels",
"is",
"Non... | Sort intervals, and optionally, their corresponding labels
according to start time.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
The input intervals
labels : list, optional
Labels for each interval
Returns
-------
intervals_sorted or (intervals_sorted, labels_sorted)
Labels are only returned if provided as input | [
"Sort",
"intervals",
"and",
"optionally",
"their",
"corresponding",
"labels",
"according",
"to",
"start",
"time",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L183-L208 |
21,452 | craffel/mir_eval | mir_eval/util.py | f_measure | def f_measure(precision, recall, beta=1.0):
"""Compute the f-measure from precision and recall scores.
Parameters
----------
precision : float in (0, 1]
Precision
recall : float in (0, 1]
Recall
beta : float > 0
Weighting factor for f-measure
(Default value = 1.0)
Returns
-------
f_measure : float
The weighted f-measure
"""
if precision == 0 and recall == 0:
return 0.0
return (1 + beta**2)*precision*recall/((beta**2)*precision + recall) | python | def f_measure(precision, recall, beta=1.0):
if precision == 0 and recall == 0:
return 0.0
return (1 + beta**2)*precision*recall/((beta**2)*precision + recall) | [
"def",
"f_measure",
"(",
"precision",
",",
"recall",
",",
"beta",
"=",
"1.0",
")",
":",
"if",
"precision",
"==",
"0",
"and",
"recall",
"==",
"0",
":",
"return",
"0.0",
"return",
"(",
"1",
"+",
"beta",
"**",
"2",
")",
"*",
"precision",
"*",
"recall"... | Compute the f-measure from precision and recall scores.
Parameters
----------
precision : float in (0, 1]
Precision
recall : float in (0, 1]
Recall
beta : float > 0
Weighting factor for f-measure
(Default value = 1.0)
Returns
-------
f_measure : float
The weighted f-measure | [
"Compute",
"the",
"f",
"-",
"measure",
"from",
"precision",
"and",
"recall",
"scores",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L211-L234 |
21,453 | craffel/mir_eval | mir_eval/util.py | intervals_to_boundaries | def intervals_to_boundaries(intervals, q=5):
"""Convert interval times into boundaries.
Parameters
----------
intervals : np.ndarray, shape=(n_events, 2)
Array of interval start and end-times
q : int
Number of decimals to round to. (Default value = 5)
Returns
-------
boundaries : np.ndarray
Interval boundary times, including the end of the final interval
"""
return np.unique(np.ravel(np.round(intervals, decimals=q))) | python | def intervals_to_boundaries(intervals, q=5):
return np.unique(np.ravel(np.round(intervals, decimals=q))) | [
"def",
"intervals_to_boundaries",
"(",
"intervals",
",",
"q",
"=",
"5",
")",
":",
"return",
"np",
".",
"unique",
"(",
"np",
".",
"ravel",
"(",
"np",
".",
"round",
"(",
"intervals",
",",
"decimals",
"=",
"q",
")",
")",
")"
] | Convert interval times into boundaries.
Parameters
----------
intervals : np.ndarray, shape=(n_events, 2)
Array of interval start and end-times
q : int
Number of decimals to round to. (Default value = 5)
Returns
-------
boundaries : np.ndarray
Interval boundary times, including the end of the final interval | [
"Convert",
"interval",
"times",
"into",
"boundaries",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L237-L254 |
21,454 | craffel/mir_eval | mir_eval/util.py | boundaries_to_intervals | def boundaries_to_intervals(boundaries):
"""Convert an array of event times into intervals
Parameters
----------
boundaries : list-like
List-like of event times. These are assumed to be unique
timestamps in ascending order.
Returns
-------
intervals : np.ndarray, shape=(n_intervals, 2)
Start and end time for each interval
"""
if not np.allclose(boundaries, np.unique(boundaries)):
raise ValueError('Boundary times are not unique or not ascending.')
intervals = np.asarray(list(zip(boundaries[:-1], boundaries[1:])))
return intervals | python | def boundaries_to_intervals(boundaries):
if not np.allclose(boundaries, np.unique(boundaries)):
raise ValueError('Boundary times are not unique or not ascending.')
intervals = np.asarray(list(zip(boundaries[:-1], boundaries[1:])))
return intervals | [
"def",
"boundaries_to_intervals",
"(",
"boundaries",
")",
":",
"if",
"not",
"np",
".",
"allclose",
"(",
"boundaries",
",",
"np",
".",
"unique",
"(",
"boundaries",
")",
")",
":",
"raise",
"ValueError",
"(",
"'Boundary times are not unique or not ascending.'",
")",
... | Convert an array of event times into intervals
Parameters
----------
boundaries : list-like
List-like of event times. These are assumed to be unique
timestamps in ascending order.
Returns
-------
intervals : np.ndarray, shape=(n_intervals, 2)
Start and end time for each interval | [
"Convert",
"an",
"array",
"of",
"event",
"times",
"into",
"intervals"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L257-L277 |
21,455 | craffel/mir_eval | mir_eval/util.py | merge_labeled_intervals | def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels):
r"""Merge the time intervals of two sequences.
Parameters
----------
x_intervals : np.ndarray
Array of interval times (seconds)
x_labels : list or None
List of labels
y_intervals : np.ndarray
Array of interval times (seconds)
y_labels : list or None
List of labels
Returns
-------
new_intervals : np.ndarray
New interval times of the merged sequences.
new_x_labels : list
New labels for the sequence ``x``
new_y_labels : list
New labels for the sequence ``y``
"""
align_check = [x_intervals[0, 0] == y_intervals[0, 0],
x_intervals[-1, 1] == y_intervals[-1, 1]]
if False in align_check:
raise ValueError(
"Time intervals do not align; did you mean to call "
"'adjust_intervals()' first?")
time_boundaries = np.unique(
np.concatenate([x_intervals, y_intervals], axis=0))
output_intervals = np.array(
[time_boundaries[:-1], time_boundaries[1:]]).T
x_labels_out, y_labels_out = [], []
x_label_range = np.arange(len(x_labels))
y_label_range = np.arange(len(y_labels))
for t0, _ in output_intervals:
x_idx = x_label_range[(t0 >= x_intervals[:, 0])]
x_labels_out.append(x_labels[x_idx[-1]])
y_idx = y_label_range[(t0 >= y_intervals[:, 0])]
y_labels_out.append(y_labels[y_idx[-1]])
return output_intervals, x_labels_out, y_labels_out | python | def merge_labeled_intervals(x_intervals, x_labels, y_intervals, y_labels):
r"""Merge the time intervals of two sequences.
Parameters
----------
x_intervals : np.ndarray
Array of interval times (seconds)
x_labels : list or None
List of labels
y_intervals : np.ndarray
Array of interval times (seconds)
y_labels : list or None
List of labels
Returns
-------
new_intervals : np.ndarray
New interval times of the merged sequences.
new_x_labels : list
New labels for the sequence ``x``
new_y_labels : list
New labels for the sequence ``y``
"""
align_check = [x_intervals[0, 0] == y_intervals[0, 0],
x_intervals[-1, 1] == y_intervals[-1, 1]]
if False in align_check:
raise ValueError(
"Time intervals do not align; did you mean to call "
"'adjust_intervals()' first?")
time_boundaries = np.unique(
np.concatenate([x_intervals, y_intervals], axis=0))
output_intervals = np.array(
[time_boundaries[:-1], time_boundaries[1:]]).T
x_labels_out, y_labels_out = [], []
x_label_range = np.arange(len(x_labels))
y_label_range = np.arange(len(y_labels))
for t0, _ in output_intervals:
x_idx = x_label_range[(t0 >= x_intervals[:, 0])]
x_labels_out.append(x_labels[x_idx[-1]])
y_idx = y_label_range[(t0 >= y_intervals[:, 0])]
y_labels_out.append(y_labels[y_idx[-1]])
return output_intervals, x_labels_out, y_labels_out | [
"def",
"merge_labeled_intervals",
"(",
"x_intervals",
",",
"x_labels",
",",
"y_intervals",
",",
"y_labels",
")",
":",
"align_check",
"=",
"[",
"x_intervals",
"[",
"0",
",",
"0",
"]",
"==",
"y_intervals",
"[",
"0",
",",
"0",
"]",
",",
"x_intervals",
"[",
... | r"""Merge the time intervals of two sequences.
Parameters
----------
x_intervals : np.ndarray
Array of interval times (seconds)
x_labels : list or None
List of labels
y_intervals : np.ndarray
Array of interval times (seconds)
y_labels : list or None
List of labels
Returns
-------
new_intervals : np.ndarray
New interval times of the merged sequences.
new_x_labels : list
New labels for the sequence ``x``
new_y_labels : list
New labels for the sequence ``y`` | [
"r",
"Merge",
"the",
"time",
"intervals",
"of",
"two",
"sequences",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L501-L544 |
21,456 | craffel/mir_eval | mir_eval/util.py | match_events | def match_events(ref, est, window, distance=None):
"""Compute a maximum matching between reference and estimated event times,
subject to a window constraint.
Given two lists of event times ``ref`` and ``est``, we seek the largest set
of correspondences ``(ref[i], est[j])`` such that
``distance(ref[i], est[j]) <= window``, and each
``ref[i]`` and ``est[j]`` is matched at most once.
This is useful for computing precision/recall metrics in beat tracking,
onset detection, and segmentation.
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float > 0
Size of the window.
distance : function
function that computes the outer distance of ref and est.
By default uses ``|ref[i] - est[j]|``
Returns
-------
matching : list of tuples
A list of matched reference and event numbers.
``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``.
"""
if distance is not None:
# Compute the indices of feasible pairings
hits = np.where(distance(ref, est) <= window)
else:
hits = _fast_hit_windows(ref, est, window)
# Construct the graph input
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(_bipartite_match(G).items())
return matching | python | def match_events(ref, est, window, distance=None):
if distance is not None:
# Compute the indices of feasible pairings
hits = np.where(distance(ref, est) <= window)
else:
hits = _fast_hit_windows(ref, est, window)
# Construct the graph input
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(_bipartite_match(G).items())
return matching | [
"def",
"match_events",
"(",
"ref",
",",
"est",
",",
"window",
",",
"distance",
"=",
"None",
")",
":",
"if",
"distance",
"is",
"not",
"None",
":",
"# Compute the indices of feasible pairings",
"hits",
"=",
"np",
".",
"where",
"(",
"distance",
"(",
"ref",
",... | Compute a maximum matching between reference and estimated event times,
subject to a window constraint.
Given two lists of event times ``ref`` and ``est``, we seek the largest set
of correspondences ``(ref[i], est[j])`` such that
``distance(ref[i], est[j]) <= window``, and each
``ref[i]`` and ``est[j]`` is matched at most once.
This is useful for computing precision/recall metrics in beat tracking,
onset detection, and segmentation.
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float > 0
Size of the window.
distance : function
function that computes the outer distance of ref and est.
By default uses ``|ref[i] - est[j]|``
Returns
-------
matching : list of tuples
A list of matched reference and event numbers.
``matching[i] == (i, j)`` where ``ref[i]`` matches ``est[j]``. | [
"Compute",
"a",
"maximum",
"matching",
"between",
"reference",
"and",
"estimated",
"event",
"times",
"subject",
"to",
"a",
"window",
"constraint",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L663-L710 |
21,457 | craffel/mir_eval | mir_eval/util.py | _fast_hit_windows | def _fast_hit_windows(ref, est, window):
'''Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window``
'''
ref = np.asarray(ref)
est = np.asarray(est)
ref_idx = np.argsort(ref)
ref_sorted = ref[ref_idx]
left_idx = np.searchsorted(ref_sorted, est - window, side='left')
right_idx = np.searchsorted(ref_sorted, est + window, side='right')
hit_ref, hit_est = [], []
for j, (start, end) in enumerate(zip(left_idx, right_idx)):
hit_ref.extend(ref_idx[start:end])
hit_est.extend([j] * (end - start))
return hit_ref, hit_est | python | def _fast_hit_windows(ref, est, window):
'''Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window``
'''
ref = np.asarray(ref)
est = np.asarray(est)
ref_idx = np.argsort(ref)
ref_sorted = ref[ref_idx]
left_idx = np.searchsorted(ref_sorted, est - window, side='left')
right_idx = np.searchsorted(ref_sorted, est + window, side='right')
hit_ref, hit_est = [], []
for j, (start, end) in enumerate(zip(left_idx, right_idx)):
hit_ref.extend(ref_idx[start:end])
hit_est.extend([j] * (end - start))
return hit_ref, hit_est | [
"def",
"_fast_hit_windows",
"(",
"ref",
",",
"est",
",",
"window",
")",
":",
"ref",
"=",
"np",
".",
"asarray",
"(",
"ref",
")",
"est",
"=",
"np",
".",
"asarray",
"(",
"est",
")",
"ref_idx",
"=",
"np",
".",
"argsort",
"(",
"ref",
")",
"ref_sorted",
... | Fast calculation of windowed hits for time events.
Given two lists of event times ``ref`` and ``est``, and a
tolerance window, computes a list of pairings
``(i, j)`` where ``|ref[i] - est[j]| <= window``.
This is equivalent to, but more efficient than the following:
>>> hit_ref, hit_est = np.where(np.abs(np.subtract.outer(ref, est))
... <= window)
Parameters
----------
ref : np.ndarray, shape=(n,)
Array of reference values
est : np.ndarray, shape=(m,)
Array of estimated values
window : float >= 0
Size of the tolerance window
Returns
-------
hit_ref : np.ndarray
hit_est : np.ndarray
indices such that ``|hit_ref[i] - hit_est[i]| <= window`` | [
"Fast",
"calculation",
"of",
"windowed",
"hits",
"for",
"time",
"events",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L713-L755 |
21,458 | craffel/mir_eval | mir_eval/util.py | validate_events | def validate_events(events, max_time=30000.):
"""Checks that a 1-d event location ndarray is well-formed, and raises
errors if not.
Parameters
----------
events : np.ndarray, shape=(n,)
Array of event times
max_time : float
If an event is found above this time, a ValueError will be raised.
(Default value = 30000.)
"""
# Make sure no event times are huge
if (events > max_time).any():
raise ValueError('An event at time {} was found which is greater than '
'the maximum allowable time of max_time = {} (did you'
' supply event times in '
'seconds?)'.format(events.max(), max_time))
# Make sure event locations are 1-d np ndarrays
if events.ndim != 1:
raise ValueError('Event times should be 1-d numpy ndarray, '
'but shape={}'.format(events.shape))
# Make sure event times are increasing
if (np.diff(events) < 0).any():
raise ValueError('Events should be in increasing order.') | python | def validate_events(events, max_time=30000.):
# Make sure no event times are huge
if (events > max_time).any():
raise ValueError('An event at time {} was found which is greater than '
'the maximum allowable time of max_time = {} (did you'
' supply event times in '
'seconds?)'.format(events.max(), max_time))
# Make sure event locations are 1-d np ndarrays
if events.ndim != 1:
raise ValueError('Event times should be 1-d numpy ndarray, '
'but shape={}'.format(events.shape))
# Make sure event times are increasing
if (np.diff(events) < 0).any():
raise ValueError('Events should be in increasing order.') | [
"def",
"validate_events",
"(",
"events",
",",
"max_time",
"=",
"30000.",
")",
":",
"# Make sure no event times are huge",
"if",
"(",
"events",
">",
"max_time",
")",
".",
"any",
"(",
")",
":",
"raise",
"ValueError",
"(",
"'An event at time {} was found which is great... | Checks that a 1-d event location ndarray is well-formed, and raises
errors if not.
Parameters
----------
events : np.ndarray, shape=(n,)
Array of event times
max_time : float
If an event is found above this time, a ValueError will be raised.
(Default value = 30000.) | [
"Checks",
"that",
"a",
"1",
"-",
"d",
"event",
"location",
"ndarray",
"is",
"well",
"-",
"formed",
"and",
"raises",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L783-L808 |
21,459 | craffel/mir_eval | mir_eval/util.py | validate_frequencies | def validate_frequencies(frequencies, max_freq, min_freq,
allow_negatives=False):
"""Checks that a 1-d frequency ndarray is well-formed, and raises
errors if not.
Parameters
----------
frequencies : np.ndarray, shape=(n,)
Array of frequency values
max_freq : float
If a frequency is found above this pitch, a ValueError will be raised.
(Default value = 5000.)
min_freq : float
If a frequency is found below this pitch, a ValueError will be raised.
(Default value = 20.)
allow_negatives : bool
Whether or not to allow negative frequency values.
"""
# If flag is true, map frequencies to their absolute value.
if allow_negatives:
frequencies = np.abs(frequencies)
# Make sure no frequency values are huge
if (np.abs(frequencies) > max_freq).any():
raise ValueError('A frequency of {} was found which is greater than '
'the maximum allowable value of max_freq = {} (did '
'you supply frequency values in '
'Hz?)'.format(frequencies.max(), max_freq))
# Make sure no frequency values are tiny
if (np.abs(frequencies) < min_freq).any():
raise ValueError('A frequency of {} was found which is less than the '
'minimum allowable value of min_freq = {} (did you '
'supply frequency values in '
'Hz?)'.format(frequencies.min(), min_freq))
# Make sure frequency values are 1-d np ndarrays
if frequencies.ndim != 1:
raise ValueError('Frequencies should be 1-d numpy ndarray, '
'but shape={}'.format(frequencies.shape)) | python | def validate_frequencies(frequencies, max_freq, min_freq,
allow_negatives=False):
# If flag is true, map frequencies to their absolute value.
if allow_negatives:
frequencies = np.abs(frequencies)
# Make sure no frequency values are huge
if (np.abs(frequencies) > max_freq).any():
raise ValueError('A frequency of {} was found which is greater than '
'the maximum allowable value of max_freq = {} (did '
'you supply frequency values in '
'Hz?)'.format(frequencies.max(), max_freq))
# Make sure no frequency values are tiny
if (np.abs(frequencies) < min_freq).any():
raise ValueError('A frequency of {} was found which is less than the '
'minimum allowable value of min_freq = {} (did you '
'supply frequency values in '
'Hz?)'.format(frequencies.min(), min_freq))
# Make sure frequency values are 1-d np ndarrays
if frequencies.ndim != 1:
raise ValueError('Frequencies should be 1-d numpy ndarray, '
'but shape={}'.format(frequencies.shape)) | [
"def",
"validate_frequencies",
"(",
"frequencies",
",",
"max_freq",
",",
"min_freq",
",",
"allow_negatives",
"=",
"False",
")",
":",
"# If flag is true, map frequencies to their absolute value.",
"if",
"allow_negatives",
":",
"frequencies",
"=",
"np",
".",
"abs",
"(",
... | Checks that a 1-d frequency ndarray is well-formed, and raises
errors if not.
Parameters
----------
frequencies : np.ndarray, shape=(n,)
Array of frequency values
max_freq : float
If a frequency is found above this pitch, a ValueError will be raised.
(Default value = 5000.)
min_freq : float
If a frequency is found below this pitch, a ValueError will be raised.
(Default value = 20.)
allow_negatives : bool
Whether or not to allow negative frequency values. | [
"Checks",
"that",
"a",
"1",
"-",
"d",
"frequency",
"ndarray",
"is",
"well",
"-",
"formed",
"and",
"raises",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L811-L847 |
21,460 | craffel/mir_eval | mir_eval/util.py | intervals_to_durations | def intervals_to_durations(intervals):
"""Converts an array of n intervals to their n durations.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Returns
-------
durations : np.ndarray, shape=(n,)
Array of the duration of each interval.
"""
validate_intervals(intervals)
return np.abs(np.diff(intervals, axis=-1)).flatten() | python | def intervals_to_durations(intervals):
validate_intervals(intervals)
return np.abs(np.diff(intervals, axis=-1)).flatten() | [
"def",
"intervals_to_durations",
"(",
"intervals",
")",
":",
"validate_intervals",
"(",
"intervals",
")",
"return",
"np",
".",
"abs",
"(",
"np",
".",
"diff",
"(",
"intervals",
",",
"axis",
"=",
"-",
"1",
")",
")",
".",
"flatten",
"(",
")"
] | Converts an array of n intervals to their n durations.
Parameters
----------
intervals : np.ndarray, shape=(n, 2)
An array of time intervals, as returned by
:func:`mir_eval.io.load_intervals()`.
The ``i`` th interval spans time ``intervals[i, 0]`` to
``intervals[i, 1]``.
Returns
-------
durations : np.ndarray, shape=(n,)
Array of the duration of each interval. | [
"Converts",
"an",
"array",
"of",
"n",
"intervals",
"to",
"their",
"n",
"durations",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/util.py#L907-L925 |
21,461 | craffel/mir_eval | mir_eval/separation.py | validate | def validate(reference_sources, estimated_sources):
"""Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources
"""
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
'= {}, estimated_sources.shape '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
if reference_sources.ndim > 3 or estimated_sources.ndim > 3:
raise ValueError('The number of dimensions is too high (must be less '
'than 3). reference_sources.ndim = {}, '
'estimated_sources.ndim '
'= {}'.format(reference_sources.ndim,
estimated_sources.ndim))
if reference_sources.size == 0:
warnings.warn("reference_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(reference_sources):
raise ValueError('All the reference sources should be non-silent (not '
'all-zeros), but at least one of the reference '
'sources is all 0s, which introduces ambiguity to the'
' evaluation. (Otherwise we can add infinitely many '
'all-zero sources.)')
if estimated_sources.size == 0:
warnings.warn("estimated_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(estimated_sources):
raise ValueError('All the estimated sources should be non-silent (not '
'all-zeros), but at least one of the estimated '
'sources is all 0s. Since we require each reference '
'source to be non-silent, having a silent estimated '
'source will result in an underdetermined system.')
if (estimated_sources.shape[0] > MAX_SOURCES or
reference_sources.shape[0] > MAX_SOURCES):
raise ValueError('The supplied matrices should be of shape (nsrc,'
' nsampl) but reference_sources.shape[0] = {} and '
'estimated_sources.shape[0] = {} which is greater '
'than mir_eval.separation.MAX_SOURCES = {}. To '
'override this check, set '
'mir_eval.separation.MAX_SOURCES to a '
'larger value.'.format(reference_sources.shape[0],
estimated_sources.shape[0],
MAX_SOURCES)) | python | def validate(reference_sources, estimated_sources):
if reference_sources.shape != estimated_sources.shape:
raise ValueError('The shape of estimated sources and the true '
'sources should match. reference_sources.shape '
'= {}, estimated_sources.shape '
'= {}'.format(reference_sources.shape,
estimated_sources.shape))
if reference_sources.ndim > 3 or estimated_sources.ndim > 3:
raise ValueError('The number of dimensions is too high (must be less '
'than 3). reference_sources.ndim = {}, '
'estimated_sources.ndim '
'= {}'.format(reference_sources.ndim,
estimated_sources.ndim))
if reference_sources.size == 0:
warnings.warn("reference_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(reference_sources):
raise ValueError('All the reference sources should be non-silent (not '
'all-zeros), but at least one of the reference '
'sources is all 0s, which introduces ambiguity to the'
' evaluation. (Otherwise we can add infinitely many '
'all-zero sources.)')
if estimated_sources.size == 0:
warnings.warn("estimated_sources is empty, should be of size "
"(nsrc, nsample). sdr, sir, sar, and perm will all "
"be empty np.ndarrays")
elif _any_source_silent(estimated_sources):
raise ValueError('All the estimated sources should be non-silent (not '
'all-zeros), but at least one of the estimated '
'sources is all 0s. Since we require each reference '
'source to be non-silent, having a silent estimated '
'source will result in an underdetermined system.')
if (estimated_sources.shape[0] > MAX_SOURCES or
reference_sources.shape[0] > MAX_SOURCES):
raise ValueError('The supplied matrices should be of shape (nsrc,'
' nsampl) but reference_sources.shape[0] = {} and '
'estimated_sources.shape[0] = {} which is greater '
'than mir_eval.separation.MAX_SOURCES = {}. To '
'override this check, set '
'mir_eval.separation.MAX_SOURCES to a '
'larger value.'.format(reference_sources.shape[0],
estimated_sources.shape[0],
MAX_SOURCES)) | [
"def",
"validate",
"(",
"reference_sources",
",",
"estimated_sources",
")",
":",
"if",
"reference_sources",
".",
"shape",
"!=",
"estimated_sources",
".",
"shape",
":",
"raise",
"ValueError",
"(",
"'The shape of estimated sources and the true '",
"'sources should match. ref... | Checks that the input data to a metric are valid, and throws helpful
errors if not.
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources | [
"Checks",
"that",
"the",
"input",
"data",
"to",
"a",
"metric",
"are",
"valid",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L62-L121 |
21,462 | craffel/mir_eval | mir_eval/separation.py | _any_source_silent | def _any_source_silent(sources):
"""Returns true if the parameter sources has any silent first dimensions"""
return np.any(np.all(np.sum(
sources, axis=tuple(range(2, sources.ndim))) == 0, axis=1)) | python | def _any_source_silent(sources):
return np.any(np.all(np.sum(
sources, axis=tuple(range(2, sources.ndim))) == 0, axis=1)) | [
"def",
"_any_source_silent",
"(",
"sources",
")",
":",
"return",
"np",
".",
"any",
"(",
"np",
".",
"all",
"(",
"np",
".",
"sum",
"(",
"sources",
",",
"axis",
"=",
"tuple",
"(",
"range",
"(",
"2",
",",
"sources",
".",
"ndim",
")",
")",
")",
"==",
... | Returns true if the parameter sources has any silent first dimensions | [
"Returns",
"true",
"if",
"the",
"parameter",
"sources",
"has",
"any",
"silent",
"first",
"dimensions"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L124-L127 |
21,463 | craffel/mir_eval | mir_eval/separation.py | bss_eval_sources | def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True):
"""
Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012.
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[jest],
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[j],
j, 512)
sdr[j], sir[j], sar[j] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar, popt) | python | def bss_eval_sources(reference_sources, estimated_sources,
compute_permutation=True):
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = estimated_sources.shape[0]
# does user desire permutations?
if compute_permutation:
# compute criteria for all possible pair matches
sdr = np.empty((nsrc, nsrc))
sir = np.empty((nsrc, nsrc))
sar = np.empty((nsrc, nsrc))
for jest in range(nsrc):
for jtrue in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[jest],
jtrue, 512)
sdr[jest, jtrue], sir[jest, jtrue], sar[jest, jtrue] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# select the best ordering
perms = list(itertools.permutations(list(range(nsrc))))
mean_sir = np.empty(len(perms))
dum = np.arange(nsrc)
for (i, perm) in enumerate(perms):
mean_sir[i] = np.mean(sir[perm, dum])
popt = perms[np.argmax(mean_sir)]
idx = (popt, dum)
return (sdr[idx], sir[idx], sar[idx], np.asarray(popt))
else:
# compute criteria for only the simple correspondence
# (estimate 1 is estimate corresponding to reference source 1, etc.)
sdr = np.empty(nsrc)
sir = np.empty(nsrc)
sar = np.empty(nsrc)
for j in range(nsrc):
s_true, e_spat, e_interf, e_artif = \
_bss_decomp_mtifilt(reference_sources,
estimated_sources[j],
j, 512)
sdr[j], sir[j], sar[j] = \
_bss_source_crit(s_true, e_spat, e_interf, e_artif)
# return the default permutation for compatibility
popt = np.arange(nsrc)
return (sdr, sir, sar, popt) | [
"def",
"bss_eval_sources",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"compute_permutation",
"=",
"True",
")",
":",
"# make sure the input is of shape (nsrc, nsampl)",
"if",
"estimated_sources",
".",
"ndim",
"==",
"1",
":",
"estimated_sources",
"=",
"estimat... | Ordering and measurement of the separation quality for estimated source
signals in terms of filtered true source, interference and artifacts.
The decomposition allows a time-invariant filter distortion of length
512, as described in Section III.B of [#vincent2006performance]_.
Passing ``False`` for ``compute_permutation`` will improve the computation
performance of the evaluation; however, it is not always appropriate and
is not the way that the BSS_EVAL Matlab toolbox computes bss_eval_sources.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have same shape as
estimated_sources)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have same shape as
reference_sources)
compute_permutation : bool, optional
compute permutation of estimate/source combinations (True by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc,)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc,)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc,)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc,)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``[0, 1, ...,
nsrc-1]`` if ``compute_permutation`` is ``False``.
References
----------
.. [#] Emmanuel Vincent, Shoko Araki, Fabian J. Theis, Guido Nolte, Pau
Bofill, Hiroshi Sawada, Alexey Ozerov, B. Vikrham Gowreesunker, Dominik
Lutter and Ngoc Q.K. Duong, "The Signal Separation Evaluation Campaign
(2007-2010): Achievements and remaining challenges", Signal Processing,
92, pp. 1928-1936, 2012. | [
"Ordering",
"and",
"measurement",
"of",
"the",
"separation",
"quality",
"for",
"estimated",
"source",
"signals",
"in",
"terms",
"of",
"filtered",
"true",
"source",
"interference",
"and",
"artifacts",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L130-L241 |
21,464 | craffel/mir_eval | mir_eval/separation.py | bss_eval_sources_framewise | def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_sources
Please be aware that this function does not compute permutations (by
default) on the possible relations between reference_sources and
estimated_sources due to the dangers of a changing permutation. Therefore
(by default), it assumes that ``reference_sources[i]`` corresponds to
``estimated_sources[i]``. To enable computing permutations please set
``compute_permutation`` to be ``True`` and check that the returned ``perm``
is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of :func:`mir_eval.separation.bss_eval_sources` called on
``reference_sources`` and ``estimated_sources`` (with the
``compute_permutation`` parameter passed to
:func:`mir_eval.separation.bss_eval_sources`) is returned.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources_framewise(
reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int, optional
Window length for framewise evaluation (default value is 30s at a
sample rate of 44.1kHz)
hop : int, optional
Hop size for framewise evaluation (default value is 15s at a
sample rate of 44.1kHz)
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for
all windows if ``compute_permutation`` is ``False``
"""
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the sources result
if nwin < 2:
result = bss_eval_sources(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice]
est_slice = estimated_sources[:, win_slice]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, sir, sar, perm | python | def bss_eval_sources_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
# make sure the input is of shape (nsrc, nsampl)
if estimated_sources.ndim == 1:
estimated_sources = estimated_sources[np.newaxis, :]
if reference_sources.ndim == 1:
reference_sources = reference_sources[np.newaxis, :]
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the sources result
if nwin < 2:
result = bss_eval_sources(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice]
est_slice = estimated_sources[:, win_slice]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], sir[:, k], sar[:, k], perm[:, k] = bss_eval_sources(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, sir, sar, perm | [
"def",
"bss_eval_sources_framewise",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"window",
"=",
"30",
"*",
"44100",
",",
"hop",
"=",
"15",
"*",
"44100",
",",
"compute_permutation",
"=",
"False",
")",
":",
"# make sure the input is of shape (nsrc, nsampl)"... | Framewise computation of bss_eval_sources
Please be aware that this function does not compute permutations (by
default) on the possible relations between reference_sources and
estimated_sources due to the dangers of a changing permutation. Therefore
(by default), it assumes that ``reference_sources[i]`` corresponds to
``estimated_sources[i]``. To enable computing permutations please set
``compute_permutation`` to be ``True`` and check that the returned ``perm``
is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of :func:`mir_eval.separation.bss_eval_sources` called on
``reference_sources`` and ``estimated_sources`` (with the
``compute_permutation`` parameter passed to
:func:`mir_eval.separation.bss_eval_sources`) is returned.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, sir, sar,
... perm) = mir_eval.separation.bss_eval_sources_framewise(
reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int, optional
Window length for framewise evaluation (default value is 30s at a
sample rate of 44.1kHz)
hop : int, optional
Hop size for framewise evaluation (default value is 15s at a
sample rate of 44.1kHz)
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number ``perm[j]`` corresponds to
true source number ``j``). Note: ``perm`` will be ``range(nsrc)`` for
all windows if ``compute_permutation`` is ``False`` | [
"Framewise",
"computation",
"of",
"bss_eval_sources"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L244-L353 |
21,465 | craffel/mir_eval | mir_eval/separation.py | bss_eval_images_framewise | def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
"""Framewise computation of bss_eval_images
Please be aware that this function does not compute permutations (by
default) on the possible relations between ``reference_sources`` and
``estimated_sources`` due to the dangers of a changing permutation.
Therefore (by default), it assumes that ``reference_sources[i]``
corresponds to ``estimated_sources[i]``. To enable computing permutations
please set ``compute_permutation`` to be ``True`` and check that the
returned ``perm`` is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of ``bss_eval_images`` called on ``reference_sources`` and
``estimated_sources`` (with the ``compute_permutation`` parameter passed to
``bss_eval_images``) is returned
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images_framewise(
reference_sources,
... estimated_sources,
window,
.... hop)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int
Window length for framewise evaluation
hop : int
Hop size for framewise evaluation
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc, nframes)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
Note: perm will be range(nsrc) for all windows if compute_permutation
is False
"""
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the images result
if nwin < 2:
result = bss_eval_images(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
isr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice, :]
est_slice = estimated_sources[:, win_slice, :]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], isr[:, k], sir[:, k], sar[:, k], perm[:, k] = \
bss_eval_images(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, isr, sir, sar, perm | python | def bss_eval_images_framewise(reference_sources, estimated_sources,
window=30*44100, hop=15*44100,
compute_permutation=False):
# make sure the input has 3 dimensions
# assuming input is in shape (nsampl) or (nsrc, nsampl)
estimated_sources = np.atleast_3d(estimated_sources)
reference_sources = np.atleast_3d(reference_sources)
# we will ensure input doesn't have more than 3 dimensions in validate
validate(reference_sources, estimated_sources)
# If empty matrices were supplied, return empty lists (special case)
if reference_sources.size == 0 or estimated_sources.size == 0:
return np.array([]), np.array([]), np.array([]), np.array([])
nsrc = reference_sources.shape[0]
nwin = int(
np.floor((reference_sources.shape[1] - window + hop) / hop)
)
# if fewer than 2 windows would be evaluated, return the images result
if nwin < 2:
result = bss_eval_images(reference_sources,
estimated_sources,
compute_permutation)
return [np.expand_dims(score, -1) for score in result]
# compute the criteria across all windows
sdr = np.empty((nsrc, nwin))
isr = np.empty((nsrc, nwin))
sir = np.empty((nsrc, nwin))
sar = np.empty((nsrc, nwin))
perm = np.empty((nsrc, nwin))
# k iterates across all the windows
for k in range(nwin):
win_slice = slice(k * hop, k * hop + window)
ref_slice = reference_sources[:, win_slice, :]
est_slice = estimated_sources[:, win_slice, :]
# check for a silent frame
if (not _any_source_silent(ref_slice) and
not _any_source_silent(est_slice)):
sdr[:, k], isr[:, k], sir[:, k], sar[:, k], perm[:, k] = \
bss_eval_images(
ref_slice, est_slice, compute_permutation
)
else:
# if we have a silent frame set results as np.nan
sdr[:, k] = sir[:, k] = sar[:, k] = perm[:, k] = np.nan
return sdr, isr, sir, sar, perm | [
"def",
"bss_eval_images_framewise",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"window",
"=",
"30",
"*",
"44100",
",",
"hop",
"=",
"15",
"*",
"44100",
",",
"compute_permutation",
"=",
"False",
")",
":",
"# make sure the input has 3 dimensions",
"# assu... | Framewise computation of bss_eval_images
Please be aware that this function does not compute permutations (by
default) on the possible relations between ``reference_sources`` and
``estimated_sources`` due to the dangers of a changing permutation.
Therefore (by default), it assumes that ``reference_sources[i]``
corresponds to ``estimated_sources[i]``. To enable computing permutations
please set ``compute_permutation`` to be ``True`` and check that the
returned ``perm`` is identical for all windows.
NOTE: if ``reference_sources`` and ``estimated_sources`` would be evaluated
using only a single window or are shorter than the window length, the
result of ``bss_eval_images`` called on ``reference_sources`` and
``estimated_sources`` (with the ``compute_permutation`` parameter passed to
``bss_eval_images``) is returned
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated
>>> # source
>>> (sdr, isr, sir, sar,
... perm) = mir_eval.separation.bss_eval_images_framewise(
reference_sources,
... estimated_sources,
window,
.... hop)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing true sources (must have the same shape as
``estimated_sources``)
estimated_sources : np.ndarray, shape=(nsrc, nsampl, nchan)
matrix containing estimated sources (must have the same shape as
``reference_sources``)
window : int
Window length for framewise evaluation
hop : int
Hop size for framewise evaluation
compute_permutation : bool, optional
compute permutation of estimate/source combinations for all windows
(False by default)
Returns
-------
sdr : np.ndarray, shape=(nsrc, nframes)
vector of Signal to Distortion Ratios (SDR)
isr : np.ndarray, shape=(nsrc, nframes)
vector of source Image to Spatial distortion Ratios (ISR)
sir : np.ndarray, shape=(nsrc, nframes)
vector of Source to Interference Ratios (SIR)
sar : np.ndarray, shape=(nsrc, nframes)
vector of Sources to Artifacts Ratios (SAR)
perm : np.ndarray, shape=(nsrc, nframes)
vector containing the best ordering of estimated sources in
the mean SIR sense (estimated source number perm[j] corresponds to
true source number j)
Note: perm will be range(nsrc) for all windows if compute_permutation
is False | [
"Framewise",
"computation",
"of",
"bss_eval_images"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L493-L606 |
21,466 | craffel/mir_eval | mir_eval/separation.py | _project | def _project(reference_sources, estimated_source, flen):
"""Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1
"""
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros((nsrc * flen, nsrc * flen))
for i in range(nsrc):
for j in range(nsrc):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros(nsrc * flen)
for i in range(nsrc):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc):
sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1]
return sproj | python | def _project(reference_sources, estimated_source, flen):
nsrc = reference_sources.shape[0]
nsampl = reference_sources.shape[1]
# computing coefficients of least squares problem via FFT ##
# zero padding and FFT of input data
reference_sources = np.hstack((reference_sources,
np.zeros((nsrc, flen - 1))))
estimated_source = np.hstack((estimated_source, np.zeros(flen - 1)))
n_fft = int(2**np.ceil(np.log2(nsampl + flen - 1.)))
sf = scipy.fftpack.fft(reference_sources, n=n_fft, axis=1)
sef = scipy.fftpack.fft(estimated_source, n=n_fft)
# inner products between delayed versions of reference_sources
G = np.zeros((nsrc * flen, nsrc * flen))
for i in range(nsrc):
for j in range(nsrc):
ssf = sf[i] * np.conj(sf[j])
ssf = np.real(scipy.fftpack.ifft(ssf))
ss = toeplitz(np.hstack((ssf[0], ssf[-1:-flen:-1])),
r=ssf[:flen])
G[i * flen: (i+1) * flen, j * flen: (j+1) * flen] = ss
G[j * flen: (j+1) * flen, i * flen: (i+1) * flen] = ss.T
# inner products between estimated_source and delayed versions of
# reference_sources
D = np.zeros(nsrc * flen)
for i in range(nsrc):
ssef = sf[i] * np.conj(sef)
ssef = np.real(scipy.fftpack.ifft(ssef))
D[i * flen: (i+1) * flen] = np.hstack((ssef[0], ssef[-1:-flen:-1]))
# Computing projection
# Distortion filters
try:
C = np.linalg.solve(G, D).reshape(flen, nsrc, order='F')
except np.linalg.linalg.LinAlgError:
C = np.linalg.lstsq(G, D)[0].reshape(flen, nsrc, order='F')
# Filtering
sproj = np.zeros(nsampl + flen - 1)
for i in range(nsrc):
sproj += fftconvolve(C[:, i], reference_sources[i])[:nsampl + flen - 1]
return sproj | [
"def",
"_project",
"(",
"reference_sources",
",",
"estimated_source",
",",
"flen",
")",
":",
"nsrc",
"=",
"reference_sources",
".",
"shape",
"[",
"0",
"]",
"nsampl",
"=",
"reference_sources",
".",
"shape",
"[",
"1",
"]",
"# computing coefficients of least squares ... | Least-squares projection of estimated source on the subspace spanned by
delayed versions of reference sources, with delays between 0 and flen-1 | [
"Least",
"-",
"squares",
"projection",
"of",
"estimated",
"source",
"on",
"the",
"subspace",
"spanned",
"by",
"delayed",
"versions",
"of",
"reference",
"sources",
"with",
"delays",
"between",
"0",
"and",
"flen",
"-",
"1"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L679-L722 |
21,467 | craffel/mir_eval | mir_eval/separation.py | _bss_image_crit | def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
"""Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts.
"""
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar) | python | def _bss_image_crit(s_true, e_spat, e_interf, e_artif):
# energy ratios
sdr = _safe_db(np.sum(s_true**2), np.sum((e_spat+e_interf+e_artif)**2))
isr = _safe_db(np.sum(s_true**2), np.sum(e_spat**2))
sir = _safe_db(np.sum((s_true+e_spat)**2), np.sum(e_interf**2))
sar = _safe_db(np.sum((s_true+e_spat+e_interf)**2), np.sum(e_artif**2))
return (sdr, isr, sir, sar) | [
"def",
"_bss_image_crit",
"(",
"s_true",
",",
"e_spat",
",",
"e_interf",
",",
"e_artif",
")",
":",
"# energy ratios",
"sdr",
"=",
"_safe_db",
"(",
"np",
".",
"sum",
"(",
"s_true",
"**",
"2",
")",
",",
"np",
".",
"sum",
"(",
"(",
"e_spat",
"+",
"e_int... | Measurement of the separation quality for a given image in terms of
filtered true source, spatial error, interference and artifacts. | [
"Measurement",
"of",
"the",
"separation",
"quality",
"for",
"a",
"given",
"image",
"in",
"terms",
"of",
"filtered",
"true",
"source",
"spatial",
"error",
"interference",
"and",
"artifacts",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L815-L824 |
21,468 | craffel/mir_eval | mir_eval/separation.py | _safe_db | def _safe_db(num, den):
"""Properly handle the potential +Inf db SIR, instead of raising a
RuntimeWarning. Only denominator is checked because the numerator can never
be 0.
"""
if den == 0:
return np.Inf
return 10 * np.log10(num / den) | python | def _safe_db(num, den):
if den == 0:
return np.Inf
return 10 * np.log10(num / den) | [
"def",
"_safe_db",
"(",
"num",
",",
"den",
")",
":",
"if",
"den",
"==",
"0",
":",
"return",
"np",
".",
"Inf",
"return",
"10",
"*",
"np",
".",
"log10",
"(",
"num",
"/",
"den",
")"
] | Properly handle the potential +Inf db SIR, instead of raising a
RuntimeWarning. Only denominator is checked because the numerator can never
be 0. | [
"Properly",
"handle",
"the",
"potential",
"+",
"Inf",
"db",
"SIR",
"instead",
"of",
"raising",
"a",
"RuntimeWarning",
".",
"Only",
"denominator",
"is",
"checked",
"because",
"the",
"numerator",
"can",
"never",
"be",
"0",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L827-L834 |
21,469 | craffel/mir_eval | mir_eval/separation.py | evaluate | def evaluate(reference_sources, estimated_sources, **kwargs):
"""Compute all metrics for the given reference and estimated signals.
NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`
for any valid input and will additionally compute
:func:`mir_eval.separation.bss_eval_sources` for valid input with fewer
than 3 dimensions.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated source
>>> scores = mir_eval.separation.evaluate(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing estimated sources
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved.
"""
# Compute all the metrics
scores = collections.OrderedDict()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images - Source to Distortion'] = sdr.tolist()
scores['Images - Image to Spatial'] = isr.tolist()
scores['Images - Source to Interference'] = sir.tolist()
scores['Images - Source to Artifact'] = sar.tolist()
scores['Images - Source permutation'] = perm.tolist()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images Frames - Source to Distortion'] = sdr.tolist()
scores['Images Frames - Image to Spatial'] = isr.tolist()
scores['Images Frames - Source to Interference'] = sir.tolist()
scores['Images Frames - Source to Artifact'] = sar.tolist()
scores['Images Frames - Source permutation'] = perm.tolist()
# Verify we can compute sources on this input
if reference_sources.ndim < 3 and estimated_sources.ndim < 3:
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources Frames - Source to Distortion'] = sdr.tolist()
scores['Sources Frames - Source to Interference'] = sir.tolist()
scores['Sources Frames - Source to Artifact'] = sar.tolist()
scores['Sources Frames - Source permutation'] = perm.tolist()
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources - Source to Distortion'] = sdr.tolist()
scores['Sources - Source to Interference'] = sir.tolist()
scores['Sources - Source to Artifact'] = sar.tolist()
scores['Sources - Source permutation'] = perm.tolist()
return scores | python | def evaluate(reference_sources, estimated_sources, **kwargs):
# Compute all the metrics
scores = collections.OrderedDict()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images - Source to Distortion'] = sdr.tolist()
scores['Images - Image to Spatial'] = isr.tolist()
scores['Images - Source to Interference'] = sir.tolist()
scores['Images - Source to Artifact'] = sar.tolist()
scores['Images - Source permutation'] = perm.tolist()
sdr, isr, sir, sar, perm = util.filter_kwargs(
bss_eval_images_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Images Frames - Source to Distortion'] = sdr.tolist()
scores['Images Frames - Image to Spatial'] = isr.tolist()
scores['Images Frames - Source to Interference'] = sir.tolist()
scores['Images Frames - Source to Artifact'] = sar.tolist()
scores['Images Frames - Source permutation'] = perm.tolist()
# Verify we can compute sources on this input
if reference_sources.ndim < 3 and estimated_sources.ndim < 3:
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources_framewise,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources Frames - Source to Distortion'] = sdr.tolist()
scores['Sources Frames - Source to Interference'] = sir.tolist()
scores['Sources Frames - Source to Artifact'] = sar.tolist()
scores['Sources Frames - Source permutation'] = perm.tolist()
sdr, sir, sar, perm = util.filter_kwargs(
bss_eval_sources,
reference_sources,
estimated_sources,
**kwargs
)
scores['Sources - Source to Distortion'] = sdr.tolist()
scores['Sources - Source to Interference'] = sir.tolist()
scores['Sources - Source to Artifact'] = sar.tolist()
scores['Sources - Source permutation'] = perm.tolist()
return scores | [
"def",
"evaluate",
"(",
"reference_sources",
",",
"estimated_sources",
",",
"*",
"*",
"kwargs",
")",
":",
"# Compute all the metrics",
"scores",
"=",
"collections",
".",
"OrderedDict",
"(",
")",
"sdr",
",",
"isr",
",",
"sir",
",",
"sar",
",",
"perm",
"=",
... | Compute all metrics for the given reference and estimated signals.
NOTE: This will always compute :func:`mir_eval.separation.bss_eval_images`
for any valid input and will additionally compute
:func:`mir_eval.separation.bss_eval_sources` for valid input with fewer
than 3 dimensions.
Examples
--------
>>> # reference_sources[n] should be an ndarray of samples of the
>>> # n'th reference source
>>> # estimated_sources[n] should be the same for the n'th estimated source
>>> scores = mir_eval.separation.evaluate(reference_sources,
... estimated_sources)
Parameters
----------
reference_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing true sources
estimated_sources : np.ndarray, shape=(nsrc, nsampl[, nchan])
matrix containing estimated sources
kwargs
Additional keyword arguments which will be passed to the
appropriate metric or preprocessing functions.
Returns
-------
scores : dict
Dictionary of scores, where the key is the metric name (str) and
the value is the (float) score achieved. | [
"Compute",
"all",
"metrics",
"for",
"the",
"given",
"reference",
"and",
"estimated",
"signals",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/separation.py#L837-L921 |
21,470 | craffel/mir_eval | mir_eval/sonify.py | clicks | def clicks(times, fs, click=None, length=None):
"""Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal
"""
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal | python | def clicks(times, fs, click=None, length=None):
# Create default click signal
if click is None:
# 1 kHz tone, 100ms
click = np.sin(2*np.pi*np.arange(fs*.1)*1000/(1.*fs))
# Exponential decay
click *= np.exp(-np.arange(fs*.1)/(fs*.01))
# Set default length
if length is None:
length = int(times.max()*fs + click.shape[0] + 1)
# Pre-allocate click signal
click_signal = np.zeros(length)
# Place clicks
for time in times:
# Compute the boundaries of the click
start = int(time*fs)
end = start + click.shape[0]
# Make sure we don't try to output past the end of the signal
if start >= length:
break
if end >= length:
click_signal[start:] = click[:length - start]
break
# Normally, just add a click here
click_signal[start:end] = click
return click_signal | [
"def",
"clicks",
"(",
"times",
",",
"fs",
",",
"click",
"=",
"None",
",",
"length",
"=",
"None",
")",
":",
"# Create default click signal",
"if",
"click",
"is",
"None",
":",
"# 1 kHz tone, 100ms",
"click",
"=",
"np",
".",
"sin",
"(",
"2",
"*",
"np",
".... | Returns a signal with the signal 'click' placed at each specified time
Parameters
----------
times : np.ndarray
times to place clicks, in seconds
fs : int
desired sampling rate of the output signal
click : np.ndarray
click signal, defaults to a 1 kHz blip
length : int
desired number of samples in the output signal,
defaults to ``times.max()*fs + click.shape[0] + 1``
Returns
-------
click_signal : np.ndarray
Synthesized click signal | [
"Returns",
"a",
"signal",
"with",
"the",
"signal",
"click",
"placed",
"at",
"each",
"specified",
"time"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L14-L60 |
21,471 | craffel/mir_eval | mir_eval/sonify.py | time_frequency | def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
"""Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll
"""
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
"""Return a function that returns `value`
no matter the input.
"""
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=0.0)
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output | python | def time_frequency(gram, frequencies, times, fs, function=np.sin, length=None,
n_dec=1):
# Default value for length
if times.ndim == 1:
# Convert to intervals
times = util.boundaries_to_intervals(times)
if length is None:
length = int(times[-1, 1] * fs)
times, _ = util.adjust_intervals(times, t_max=length)
# Truncate times so that the shape matches gram
n_times = gram.shape[1]
times = times[:n_times]
def _fast_synthesize(frequency):
"""A faster way to synthesize a signal.
Generate one cycle, and simulate arbitrary repetitions
using array indexing tricks.
"""
# hack so that we can ensure an integer number of periods and samples
# rounds frequency to 1st decimal, s.t. 10 * frequency will be an int
frequency = np.round(frequency, n_dec)
# Generate 10*frequency periods at this frequency
# Equivalent to n_samples = int(n_periods * fs / frequency)
# n_periods = 10*frequency is the smallest integer that guarantees
# that n_samples will be an integer, since assuming 10*frequency
# is an integer
n_samples = int(10.0**n_dec * fs)
short_signal = function(2.0 * np.pi * np.arange(n_samples) *
frequency / fs)
# Calculate the number of loops we need to fill the duration
n_repeats = int(np.ceil(length/float(short_signal.shape[0])))
# Simulate tiling the short buffer by using stride tricks
long_signal = as_strided(short_signal,
shape=(n_repeats, len(short_signal)),
strides=(0, short_signal.itemsize))
# Use a flatiter to simulate a long 1D buffer
return long_signal.flat
def _const_interpolator(value):
"""Return a function that returns `value`
no matter the input.
"""
def __interpolator(x):
return value
return __interpolator
# Threshold the tfgram to remove non-positive values
gram = np.maximum(gram, 0)
# Pre-allocate output signal
output = np.zeros(length)
time_centers = np.mean(times, axis=1) * float(fs)
for n, frequency in enumerate(frequencies):
# Get a waveform of length samples at this frequency
wave = _fast_synthesize(frequency)
# Interpolate the values in gram over the time grid
if len(time_centers) > 1:
gram_interpolator = interp1d(
time_centers, gram[n, :],
kind='linear', bounds_error=False,
fill_value=0.0)
# If only one time point, create constant interpolator
else:
gram_interpolator = _const_interpolator(gram[n, 0])
# Scale each time interval by the piano roll magnitude
for m, (start, end) in enumerate((times * fs).astype(int)):
# Clip the timings to make sure the indices are valid
start, end = max(start, 0), min(end, length)
# add to waveform
output[start:end] += (
wave[start:end] * gram_interpolator(np.arange(start, end)))
# Normalize, but only if there's non-zero values
norm = np.abs(output).max()
if norm >= np.finfo(output.dtype).tiny:
output /= norm
return output | [
"def",
"time_frequency",
"(",
"gram",
",",
"frequencies",
",",
"times",
",",
"fs",
",",
"function",
"=",
"np",
".",
"sin",
",",
"length",
"=",
"None",
",",
"n_dec",
"=",
"1",
")",
":",
"# Default value for length",
"if",
"times",
".",
"ndim",
"==",
"1"... | Reverse synthesis of a time-frequency representation of a signal
Parameters
----------
gram : np.ndarray
``gram[n, m]`` is the magnitude of ``frequencies[n]``
from ``times[m]`` to ``times[m + 1]``
Non-positive magnitudes are interpreted as silence.
frequencies : np.ndarray
array of size ``gram.shape[0]`` denoting the frequency of
each row of gram
times : np.ndarray, shape= ``(gram.shape[1],)`` or ``(gram.shape[1], 2)``
Either the start time of each column in the gram,
or the time interval corresponding to each column.
fs : int
desired sampling rate of the output signal
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``times[-1]*fs``
n_dec : int
the number of decimals used to approximate each sonfied frequency.
Defaults to 1 decimal place. Higher precision will be slower.
Returns
-------
output : np.ndarray
synthesized version of the piano roll | [
"Reverse",
"synthesis",
"of",
"a",
"time",
"-",
"frequency",
"representation",
"of",
"a",
"signal"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L63-L184 |
21,472 | craffel/mir_eval | mir_eval/sonify.py | pitch_contour | def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
if amplitudes is None:
a_est = np.ones((length, ))
else:
# build an amplitude interpolator
a_interp = interp1d(
times * fs, amplitudes, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
a_est = a_interp(np.arange(length))
# Sonify the waveform
return a_est * function(np.cumsum(f_est)) | python | def pitch_contour(times, frequencies, fs, amplitudes=None, function=np.sin,
length=None, kind='linear'):
'''Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour
'''
fs = float(fs)
if length is None:
length = int(times.max() * fs)
# Squash the negative frequencies.
# wave(0) = 0, so clipping here will un-voice the corresponding instants
frequencies = np.maximum(frequencies, 0.0)
# Build a frequency interpolator
f_interp = interp1d(times * fs, 2 * np.pi * frequencies / fs, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
# Estimate frequency at sample points
f_est = f_interp(np.arange(length))
if amplitudes is None:
a_est = np.ones((length, ))
else:
# build an amplitude interpolator
a_interp = interp1d(
times * fs, amplitudes, kind=kind,
fill_value=0.0, bounds_error=False, copy=False)
a_est = a_interp(np.arange(length))
# Sonify the waveform
return a_est * function(np.cumsum(f_est)) | [
"def",
"pitch_contour",
"(",
"times",
",",
"frequencies",
",",
"fs",
",",
"amplitudes",
"=",
"None",
",",
"function",
"=",
"np",
".",
"sin",
",",
"length",
"=",
"None",
",",
"kind",
"=",
"'linear'",
")",
":",
"fs",
"=",
"float",
"(",
"fs",
")",
"if... | Sonify a pitch contour.
Parameters
----------
times : np.ndarray
time indices for each frequency measurement, in seconds
frequencies : np.ndarray
frequency measurements, in Hz.
Non-positive measurements will be interpreted as un-voiced samples.
fs : int
desired sampling rate of the output signal
amplitudes : np.ndarray
amplitude measurments, nonnegative
defaults to ``np.ones((length,))``
function : function
function to use to synthesize notes, should be :math:`2\pi`-periodic
length : int
desired number of samples in the output signal,
defaults to ``max(times)*fs``
kind : str
Interpolation mode for the frequency and amplitude values.
See: ``scipy.interpolate.interp1d`` for valid settings.
Returns
-------
output : np.ndarray
synthesized version of the pitch contour | [
"Sonify",
"a",
"pitch",
"contour",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L187-L250 |
21,473 | craffel/mir_eval | mir_eval/sonify.py | chords | def chords(chord_labels, intervals, fs, **kwargs):
"""Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels
"""
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs) | python | def chords(chord_labels, intervals, fs, **kwargs):
util.validate_intervals(intervals)
# Convert from labels to chroma
roots, interval_bitmaps, _ = chord.encode_many(chord_labels)
chromagram = np.array([np.roll(interval_bitmap, root)
for (interval_bitmap, root)
in zip(interval_bitmaps, roots)]).T
return chroma(chromagram, intervals, fs, **kwargs) | [
"def",
"chords",
"(",
"chord_labels",
",",
"intervals",
",",
"fs",
",",
"*",
"*",
"kwargs",
")",
":",
"util",
".",
"validate_intervals",
"(",
"intervals",
")",
"# Convert from labels to chroma",
"roots",
",",
"interval_bitmaps",
",",
"_",
"=",
"chord",
".",
... | Synthesizes chord labels
Parameters
----------
chord_labels : list of str
List of chord label strings.
intervals : np.ndarray, shape=(len(chord_labels), 2)
Start and end times of each chord label
fs : int
Sampling rate to synthesize at
kwargs
Additional keyword arguments to pass to
:func:`mir_eval.sonify.time_frequency`
Returns
-------
output : np.ndarray
Synthesized chord labels | [
"Synthesizes",
"chord",
"labels"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/sonify.py#L300-L329 |
21,474 | craffel/mir_eval | mir_eval/onset.py | validate | def validate(reference_onsets, estimated_onsets):
"""Checks that the input annotations to a metric look like valid onset time
arrays, and throws helpful errors if not.
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
"""
# If reference or estimated onsets are empty, warn because metric will be 0
if reference_onsets.size == 0:
warnings.warn("Reference onsets are empty.")
if estimated_onsets.size == 0:
warnings.warn("Estimated onsets are empty.")
for onsets in [reference_onsets, estimated_onsets]:
util.validate_events(onsets, MAX_TIME) | python | def validate(reference_onsets, estimated_onsets):
# If reference or estimated onsets are empty, warn because metric will be 0
if reference_onsets.size == 0:
warnings.warn("Reference onsets are empty.")
if estimated_onsets.size == 0:
warnings.warn("Estimated onsets are empty.")
for onsets in [reference_onsets, estimated_onsets]:
util.validate_events(onsets, MAX_TIME) | [
"def",
"validate",
"(",
"reference_onsets",
",",
"estimated_onsets",
")",
":",
"# If reference or estimated onsets are empty, warn because metric will be 0",
"if",
"reference_onsets",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference onsets are empty.\""... | Checks that the input annotations to a metric look like valid onset time
arrays, and throws helpful errors if not.
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds | [
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"valid",
"onset",
"time",
"arrays",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/onset.py#L35-L53 |
21,475 | craffel/mir_eval | mir_eval/onset.py | f_measure | def f_measure(reference_onsets, estimated_onsets, window=.05):
"""Compute the F-measure of correct vs incorrectly predicted onsets.
"Corectness" is determined over a small window.
Examples
--------
>>> reference_onsets = mir_eval.io.load_events('reference.txt')
>>> estimated_onsets = mir_eval.io.load_events('estimated.txt')
>>> F, P, R = mir_eval.onset.f_measure(reference_onsets,
... estimated_onsets)
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
window : float
Window size, in seconds
(Default value = .05)
Returns
-------
f_measure : float
2*precision*recall/(precision + recall)
precision : float
(# true positives)/(# true positives + # false positives)
recall : float
(# true positives)/(# true positives + # false negatives)
"""
validate(reference_onsets, estimated_onsets)
# If either list is empty, return 0s
if reference_onsets.size == 0 or estimated_onsets.size == 0:
return 0., 0., 0.
# Compute the best-case matching between reference and estimated onset
# locations
matching = util.match_events(reference_onsets, estimated_onsets, window)
precision = float(len(matching))/len(estimated_onsets)
recall = float(len(matching))/len(reference_onsets)
# Compute F-measure and return all statistics
return util.f_measure(precision, recall), precision, recall | python | def f_measure(reference_onsets, estimated_onsets, window=.05):
validate(reference_onsets, estimated_onsets)
# If either list is empty, return 0s
if reference_onsets.size == 0 or estimated_onsets.size == 0:
return 0., 0., 0.
# Compute the best-case matching between reference and estimated onset
# locations
matching = util.match_events(reference_onsets, estimated_onsets, window)
precision = float(len(matching))/len(estimated_onsets)
recall = float(len(matching))/len(reference_onsets)
# Compute F-measure and return all statistics
return util.f_measure(precision, recall), precision, recall | [
"def",
"f_measure",
"(",
"reference_onsets",
",",
"estimated_onsets",
",",
"window",
"=",
".05",
")",
":",
"validate",
"(",
"reference_onsets",
",",
"estimated_onsets",
")",
"# If either list is empty, return 0s",
"if",
"reference_onsets",
".",
"size",
"==",
"0",
"o... | Compute the F-measure of correct vs incorrectly predicted onsets.
"Corectness" is determined over a small window.
Examples
--------
>>> reference_onsets = mir_eval.io.load_events('reference.txt')
>>> estimated_onsets = mir_eval.io.load_events('estimated.txt')
>>> F, P, R = mir_eval.onset.f_measure(reference_onsets,
... estimated_onsets)
Parameters
----------
reference_onsets : np.ndarray
reference onset locations, in seconds
estimated_onsets : np.ndarray
estimated onset locations, in seconds
window : float
Window size, in seconds
(Default value = .05)
Returns
-------
f_measure : float
2*precision*recall/(precision + recall)
precision : float
(# true positives)/(# true positives + # false positives)
recall : float
(# true positives)/(# true positives + # false negatives) | [
"Compute",
"the",
"F",
"-",
"measure",
"of",
"correct",
"vs",
"incorrectly",
"predicted",
"onsets",
".",
"Corectness",
"is",
"determined",
"over",
"a",
"small",
"window",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/onset.py#L56-L98 |
21,476 | craffel/mir_eval | mir_eval/transcription.py | validate | def validate(ref_intervals, ref_pitches, est_intervals, est_pitches):
"""Checks that the input annotations to a metric look like time intervals
and a pitch list, and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz
"""
# Validate intervals
validate_intervals(ref_intervals, est_intervals)
# Make sure intervals and pitches match in length
if not ref_intervals.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference intervals and pitches have different '
'lengths.')
if not est_intervals.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated intervals and pitches have different '
'lengths.')
# Make sure all pitch values are positive
if ref_pitches.size > 0 and np.min(ref_pitches) <= 0:
raise ValueError("Reference contains at least one non-positive pitch "
"value")
if est_pitches.size > 0 and np.min(est_pitches) <= 0:
raise ValueError("Estimate contains at least one non-positive pitch "
"value") | python | def validate(ref_intervals, ref_pitches, est_intervals, est_pitches):
# Validate intervals
validate_intervals(ref_intervals, est_intervals)
# Make sure intervals and pitches match in length
if not ref_intervals.shape[0] == ref_pitches.shape[0]:
raise ValueError('Reference intervals and pitches have different '
'lengths.')
if not est_intervals.shape[0] == est_pitches.shape[0]:
raise ValueError('Estimated intervals and pitches have different '
'lengths.')
# Make sure all pitch values are positive
if ref_pitches.size > 0 and np.min(ref_pitches) <= 0:
raise ValueError("Reference contains at least one non-positive pitch "
"value")
if est_pitches.size > 0 and np.min(est_pitches) <= 0:
raise ValueError("Estimate contains at least one non-positive pitch "
"value") | [
"def",
"validate",
"(",
"ref_intervals",
",",
"ref_pitches",
",",
"est_intervals",
",",
"est_pitches",
")",
":",
"# Validate intervals",
"validate_intervals",
"(",
"ref_intervals",
",",
"est_intervals",
")",
"# Make sure intervals and pitches match in length",
"if",
"not",
... | Checks that the input annotations to a metric look like time intervals
and a pitch list, and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
ref_pitches : np.ndarray, shape=(n,)
Array of reference pitch values in Hertz
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
est_pitches : np.ndarray, shape=(m,)
Array of estimated pitch values in Hertz | [
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"time",
"intervals",
"and",
"a",
"pitch",
"list",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L117-L149 |
21,477 | craffel/mir_eval | mir_eval/transcription.py | validate_intervals | def validate_intervals(ref_intervals, est_intervals):
"""Checks that the input annotations to a metric look like time intervals,
and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
"""
# If reference or estimated notes are empty, warn
if ref_intervals.size == 0:
warnings.warn("Reference notes are empty.")
if est_intervals.size == 0:
warnings.warn("Estimated notes are empty.")
# Validate intervals
util.validate_intervals(ref_intervals)
util.validate_intervals(est_intervals) | python | def validate_intervals(ref_intervals, est_intervals):
# If reference or estimated notes are empty, warn
if ref_intervals.size == 0:
warnings.warn("Reference notes are empty.")
if est_intervals.size == 0:
warnings.warn("Estimated notes are empty.")
# Validate intervals
util.validate_intervals(ref_intervals)
util.validate_intervals(est_intervals) | [
"def",
"validate_intervals",
"(",
"ref_intervals",
",",
"est_intervals",
")",
":",
"# If reference or estimated notes are empty, warn",
"if",
"ref_intervals",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference notes are empty.\"",
")",
"if",
"est_in... | Checks that the input annotations to a metric look like time intervals,
and throws helpful errors if not.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times) | [
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"time",
"intervals",
"and",
"throws",
"helpful",
"errors",
"if",
"not",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L152-L171 |
21,478 | craffel/mir_eval | mir_eval/transcription.py | match_note_offsets | def match_note_offsets(ref_intervals, est_intervals, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False):
"""Compute a maximum matching between reference and estimated notes,
only taking note offsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we seek
the largest set of correspondences ``(i, j)`` such that the offset of
reference note ``i`` has to be within ``offset_tolerance`` of the offset of
estimated note ``j``, where ``offset_tolerance`` is equal to
``offset_ratio`` times the reference note's duration, i.e. ``offset_ratio
* ref_duration[i]`` where ``ref_duration[i] = ref_intervals[i, 1] -
ref_intervals[i, 0]``. If the resulting ``offset_tolerance`` is less than
``offset_min_tolerance`` (50 ms by default) then ``offset_min_tolerance``
is used instead.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_onsets` and
:func:`match_notes` for matching notes based on onsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
offset_ratio : float > 0
The ratio of the reference note's duration used to define the
``offset_tolerance``. Default is 0.2 (20%), meaning the
``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50
ms), whichever is greater.
offset_min_tolerance : float > 0
The minimum tolerance for offset matching. See ``offset_ratio``
description for an explanation of how the offset tolerance is
determined.
strict : bool
If ``strict=False`` (the default), threshold checks for offset
matching are performed using ``<=`` (less than or equal). If
``strict=True``, the threshold checks are performed using ``<`` (less
than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
"""
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for offset matches
offset_distances = np.abs(np.subtract.outer(ref_intervals[:, 1],
est_intervals[:, 1]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
offset_distances = np.around(offset_distances, decimals=N_DECIMALS)
ref_durations = util.intervals_to_durations(ref_intervals)
offset_tolerances = np.maximum(offset_ratio * ref_durations,
offset_min_tolerance)
offset_hit_matrix = (
cmp_func(offset_distances, offset_tolerances.reshape(-1, 1)))
# check for hits
hits = np.where(offset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching | python | def match_note_offsets(ref_intervals, est_intervals, offset_ratio=0.2,
offset_min_tolerance=0.05, strict=False):
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for offset matches
offset_distances = np.abs(np.subtract.outer(ref_intervals[:, 1],
est_intervals[:, 1]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
offset_distances = np.around(offset_distances, decimals=N_DECIMALS)
ref_durations = util.intervals_to_durations(ref_intervals)
offset_tolerances = np.maximum(offset_ratio * ref_durations,
offset_min_tolerance)
offset_hit_matrix = (
cmp_func(offset_distances, offset_tolerances.reshape(-1, 1)))
# check for hits
hits = np.where(offset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching | [
"def",
"match_note_offsets",
"(",
"ref_intervals",
",",
"est_intervals",
",",
"offset_ratio",
"=",
"0.2",
",",
"offset_min_tolerance",
"=",
"0.05",
",",
"strict",
"=",
"False",
")",
":",
"# set the comparison function",
"if",
"strict",
":",
"cmp_func",
"=",
"np",
... | Compute a maximum matching between reference and estimated notes,
only taking note offsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we seek
the largest set of correspondences ``(i, j)`` such that the offset of
reference note ``i`` has to be within ``offset_tolerance`` of the offset of
estimated note ``j``, where ``offset_tolerance`` is equal to
``offset_ratio`` times the reference note's duration, i.e. ``offset_ratio
* ref_duration[i]`` where ``ref_duration[i] = ref_intervals[i, 1] -
ref_intervals[i, 0]``. If the resulting ``offset_tolerance`` is less than
``offset_min_tolerance`` (50 ms by default) then ``offset_min_tolerance``
is used instead.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_onsets` and
:func:`match_notes` for matching notes based on onsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
offset_ratio : float > 0
The ratio of the reference note's duration used to define the
``offset_tolerance``. Default is 0.2 (20%), meaning the
``offset_tolerance`` will equal the ``ref_duration * 0.2``, or 0.05 (50
ms), whichever is greater.
offset_min_tolerance : float > 0
The minimum tolerance for offset matching. See ``offset_ratio``
description for an explanation of how the offset tolerance is
determined.
strict : bool
If ``strict=False`` (the default), threshold checks for offset
matching are performed using ``<=`` (less than or equal). If
``strict=True``, the threshold checks are performed using ``<`` (less
than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``. | [
"Compute",
"a",
"maximum",
"matching",
"between",
"reference",
"and",
"estimated",
"notes",
"only",
"taking",
"note",
"offsets",
"into",
"account",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L174-L260 |
21,479 | craffel/mir_eval | mir_eval/transcription.py | match_note_onsets | def match_note_onsets(ref_intervals, est_intervals, onset_tolerance=0.05,
strict=False):
"""Compute a maximum matching between reference and estimated notes,
only taking note onsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we see
the largest set of correspondences ``(i,j)`` such that the onset of
reference note ``i`` is within ``onset_tolerance`` of the onset of
estimated note ``j``.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_offsets` and
:func:`match_notes` for matching notes based on offsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``.
"""
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for onset matches
onset_distances = np.abs(np.subtract.outer(ref_intervals[:, 0],
est_intervals[:, 0]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
onset_distances = np.around(onset_distances, decimals=N_DECIMALS)
onset_hit_matrix = cmp_func(onset_distances, onset_tolerance)
# find hits
hits = np.where(onset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching | python | def match_note_onsets(ref_intervals, est_intervals, onset_tolerance=0.05,
strict=False):
# set the comparison function
if strict:
cmp_func = np.less
else:
cmp_func = np.less_equal
# check for onset matches
onset_distances = np.abs(np.subtract.outer(ref_intervals[:, 0],
est_intervals[:, 0]))
# Round distances to a target precision to avoid the situation where
# if the distance is exactly 50ms (and strict=False) it erroneously
# doesn't match the notes because of precision issues.
onset_distances = np.around(onset_distances, decimals=N_DECIMALS)
onset_hit_matrix = cmp_func(onset_distances, onset_tolerance)
# find hits
hits = np.where(onset_hit_matrix)
# Construct the graph input
# Flip graph so that 'matching' is a list of tuples where the first item
# in each tuple is the reference note index, and the second item is the
# estimated note index.
G = {}
for ref_i, est_i in zip(*hits):
if est_i not in G:
G[est_i] = []
G[est_i].append(ref_i)
# Compute the maximum matching
matching = sorted(util._bipartite_match(G).items())
return matching | [
"def",
"match_note_onsets",
"(",
"ref_intervals",
",",
"est_intervals",
",",
"onset_tolerance",
"=",
"0.05",
",",
"strict",
"=",
"False",
")",
":",
"# set the comparison function",
"if",
"strict",
":",
"cmp_func",
"=",
"np",
".",
"less",
"else",
":",
"cmp_func",... | Compute a maximum matching between reference and estimated notes,
only taking note onsets into account.
Given two note sequences represented by ``ref_intervals`` and
``est_intervals`` (see :func:`mir_eval.io.load_valued_intervals`), we see
the largest set of correspondences ``(i,j)`` such that the onset of
reference note ``i`` is within ``onset_tolerance`` of the onset of
estimated note ``j``.
Every reference note is matched against at most one estimated note.
Note there are separate functions :func:`match_note_offsets` and
:func:`match_notes` for matching notes based on offsets only or based on
onset, offset, and pitch, respectively. This is because the rules for
matching note onsets and matching note offsets are different.
Parameters
----------
ref_intervals : np.ndarray, shape=(n,2)
Array of reference notes time intervals (onset and offset times)
est_intervals : np.ndarray, shape=(m,2)
Array of estimated notes time intervals (onset and offset times)
onset_tolerance : float > 0
The tolerance for an estimated note's onset deviating from the
reference note's onset, in seconds. Default is 0.05 (50 ms).
strict : bool
If ``strict=False`` (the default), threshold checks for onset matching
are performed using ``<=`` (less than or equal). If ``strict=True``,
the threshold checks are performed using ``<`` (less than).
Returns
-------
matching : list of tuples
A list of matched reference and estimated notes.
``matching[i] == (i, j)`` where reference note ``i`` matches estimated
note ``j``. | [
"Compute",
"a",
"maximum",
"matching",
"between",
"reference",
"and",
"estimated",
"notes",
"only",
"taking",
"note",
"onsets",
"into",
"account",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/transcription.py#L263-L333 |
21,480 | craffel/mir_eval | mir_eval/melody.py | validate_voicing | def validate_voicing(ref_voicing, est_voicing):
"""Checks that voicing inputs to a metric are in the correct format.
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array
"""
if ref_voicing.size == 0:
warnings.warn("Reference voicing array is empty.")
if est_voicing.size == 0:
warnings.warn("Estimated voicing array is empty.")
if ref_voicing.sum() == 0:
warnings.warn("Reference melody has no voiced frames.")
if est_voicing.sum() == 0:
warnings.warn("Estimated melody has no voiced frames.")
# Make sure they're the same length
if ref_voicing.shape[0] != est_voicing.shape[0]:
raise ValueError('Reference and estimated voicing arrays should '
'be the same length.')
for voicing in [ref_voicing, est_voicing]:
# Make sure they're (effectively) boolean
if np.logical_and(voicing != 0, voicing != 1).any():
raise ValueError('Voicing arrays must be boolean.') | python | def validate_voicing(ref_voicing, est_voicing):
if ref_voicing.size == 0:
warnings.warn("Reference voicing array is empty.")
if est_voicing.size == 0:
warnings.warn("Estimated voicing array is empty.")
if ref_voicing.sum() == 0:
warnings.warn("Reference melody has no voiced frames.")
if est_voicing.sum() == 0:
warnings.warn("Estimated melody has no voiced frames.")
# Make sure they're the same length
if ref_voicing.shape[0] != est_voicing.shape[0]:
raise ValueError('Reference and estimated voicing arrays should '
'be the same length.')
for voicing in [ref_voicing, est_voicing]:
# Make sure they're (effectively) boolean
if np.logical_and(voicing != 0, voicing != 1).any():
raise ValueError('Voicing arrays must be boolean.') | [
"def",
"validate_voicing",
"(",
"ref_voicing",
",",
"est_voicing",
")",
":",
"if",
"ref_voicing",
".",
"size",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference voicing array is empty.\"",
")",
"if",
"est_voicing",
".",
"size",
"==",
"0",
":",
"warnin... | Checks that voicing inputs to a metric are in the correct format.
Parameters
----------
ref_voicing : np.ndarray
Reference boolean voicing array
est_voicing : np.ndarray
Estimated boolean voicing array | [
"Checks",
"that",
"voicing",
"inputs",
"to",
"a",
"metric",
"are",
"in",
"the",
"correct",
"format",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/melody.py#L61-L87 |
21,481 | craffel/mir_eval | mir_eval/melody.py | hz2cents | def hz2cents(freq_hz, base_frequency=10.0):
"""Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency
"""
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent | python | def hz2cents(freq_hz, base_frequency=10.0):
freq_cent = np.zeros(freq_hz.shape[0])
freq_nonz_ind = np.flatnonzero(freq_hz)
normalized_frequency = np.abs(freq_hz[freq_nonz_ind])/base_frequency
freq_cent[freq_nonz_ind] = 1200*np.log2(normalized_frequency)
return freq_cent | [
"def",
"hz2cents",
"(",
"freq_hz",
",",
"base_frequency",
"=",
"10.0",
")",
":",
"freq_cent",
"=",
"np",
".",
"zeros",
"(",
"freq_hz",
".",
"shape",
"[",
"0",
"]",
")",
"freq_nonz_ind",
"=",
"np",
".",
"flatnonzero",
"(",
"freq_hz",
")",
"normalized_freq... | Convert an array of frequency values in Hz to cents.
0 values are left in place.
Parameters
----------
freq_hz : np.ndarray
Array of frequencies in Hz.
base_frequency : float
Base frequency for conversion.
(Default value = 10.0)
Returns
-------
cent : np.ndarray
Array of frequencies in cents, relative to base_frequency | [
"Convert",
"an",
"array",
"of",
"frequency",
"values",
"in",
"Hz",
"to",
"cents",
".",
"0",
"values",
"are",
"left",
"in",
"place",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/melody.py#L118-L141 |
21,482 | craffel/mir_eval | mir_eval/melody.py | constant_hop_timebase | def constant_hop_timebase(hop, end_time):
"""Generates a time series from 0 to ``end_time`` with times spaced ``hop``
apart
Parameters
----------
hop : float
Spacing of samples in the time series
end_time : float
Time series will span ``[0, end_time]``
Returns
-------
times : np.ndarray
Generated timebase
"""
# Compute new timebase. Rounding/linspace is to avoid float problems.
end_time = np.round(end_time, 10)
times = np.linspace(0, hop*int(np.floor(end_time/hop)),
int(np.floor(end_time/hop)) + 1)
times = np.round(times, 10)
return times | python | def constant_hop_timebase(hop, end_time):
# Compute new timebase. Rounding/linspace is to avoid float problems.
end_time = np.round(end_time, 10)
times = np.linspace(0, hop*int(np.floor(end_time/hop)),
int(np.floor(end_time/hop)) + 1)
times = np.round(times, 10)
return times | [
"def",
"constant_hop_timebase",
"(",
"hop",
",",
"end_time",
")",
":",
"# Compute new timebase. Rounding/linspace is to avoid float problems.",
"end_time",
"=",
"np",
".",
"round",
"(",
"end_time",
",",
"10",
")",
"times",
"=",
"np",
".",
"linspace",
"(",
"0",
",... | Generates a time series from 0 to ``end_time`` with times spaced ``hop``
apart
Parameters
----------
hop : float
Spacing of samples in the time series
end_time : float
Time series will span ``[0, end_time]``
Returns
-------
times : np.ndarray
Generated timebase | [
"Generates",
"a",
"time",
"series",
"from",
"0",
"to",
"end_time",
"with",
"times",
"spaced",
"hop",
"apart"
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/melody.py#L165-L187 |
21,483 | craffel/mir_eval | mir_eval/segment.py | detection | def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
"""Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``)
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure | python | def detection(reference_intervals, estimated_intervals,
window=0.5, beta=1.0, trim=False):
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return 0.0, 0.0, 0.0
matching = util.match_events(reference_boundaries,
estimated_boundaries,
window)
precision = float(len(matching)) / len(estimated_boundaries)
recall = float(len(matching)) / len(reference_boundaries)
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure | [
"def",
"detection",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"window",
"=",
"0.5",
",",
"beta",
"=",
"1.0",
",",
"trim",
"=",
"False",
")",
":",
"validate_boundary",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
")",
... | Boundary detection hit-rate.
A hit is counted whenever an reference boundary is within ``window`` of a
estimated boundary. Note that each boundary is matched at most once: this
is achieved by computing the size of a maximal matching between reference
and estimated boundary points, subject to the window constraint.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> # With 0.5s windowing
>>> P05, R05, F05 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5)
>>> # With 3s windowing
>>> P3, R3, F3 = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=3)
>>> # Ignoring hits for the beginning and end of track
>>> P, R, F = mir_eval.segment.detection(ref_intervals,
... est_intervals,
... window=0.5,
... trim=True)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
window : float > 0
size of the window of 'correctness' around ground-truth beats
(in seconds)
(Default value = 0.5)
beta : float > 0
weighting constant for F-measure.
(Default value = 1.0)
trim : boolean
if ``True``, the first and last boundary times are ignored.
Typically, these denote start (0) and end-markers.
(Default value = False)
Returns
-------
precision : float
precision of estimated predictions
recall : float
recall of reference reference boundaries
f_measure : float
F-measure (weighted harmonic mean of ``precision`` and ``recall``) | [
"Boundary",
"detection",
"hit",
"-",
"rate",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L176-L260 |
21,484 | craffel/mir_eval | mir_eval/segment.py | deviation | def deviation(reference_intervals, estimated_intervals, trim=False):
"""Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary
"""
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference | python | def deviation(reference_intervals, estimated_intervals, trim=False):
validate_boundary(reference_intervals, estimated_intervals, trim)
# Convert intervals to boundaries
reference_boundaries = util.intervals_to_boundaries(reference_intervals)
estimated_boundaries = util.intervals_to_boundaries(estimated_intervals)
# Suppress the first and last intervals
if trim:
reference_boundaries = reference_boundaries[1:-1]
estimated_boundaries = estimated_boundaries[1:-1]
# If we have no boundaries, we get no score.
if len(reference_boundaries) == 0 or len(estimated_boundaries) == 0:
return np.nan, np.nan
dist = np.abs(np.subtract.outer(reference_boundaries,
estimated_boundaries))
estimated_to_reference = np.median(dist.min(axis=0))
reference_to_estimated = np.median(dist.min(axis=1))
return reference_to_estimated, estimated_to_reference | [
"def",
"deviation",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
"=",
"False",
")",
":",
"validate_boundary",
"(",
"reference_intervals",
",",
"estimated_intervals",
",",
"trim",
")",
"# Convert intervals to boundaries",
"reference_boundaries",
"="... | Compute the median deviations between reference
and estimated boundary times.
Examples
--------
>>> ref_intervals, _ = mir_eval.io.load_labeled_intervals('ref.lab')
>>> est_intervals, _ = mir_eval.io.load_labeled_intervals('est.lab')
>>> r_to_e, e_to_r = mir_eval.boundary.deviation(ref_intervals,
... est_intervals)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_intervals` or
:func:`mir_eval.io.load_labeled_intervals`.
trim : boolean
if ``True``, the first and last intervals are ignored.
Typically, these denote start (0.0) and end-of-track markers.
(Default value = False)
Returns
-------
reference_to_estimated : float
median time from each reference boundary to the
closest estimated boundary
estimated_to_reference : float
median time from each estimated boundary to the
closest reference boundary | [
"Compute",
"the",
"median",
"deviations",
"between",
"reference",
"and",
"estimated",
"boundary",
"times",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L263-L321 |
21,485 | craffel/mir_eval | mir_eval/segment.py | pairwise | def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
"""Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster
"""
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure | python | def pairwise(reference_intervals, reference_labels,
estimated_intervals, estimated_labels,
frame_size=0.1, beta=1.0):
validate_structure(reference_intervals, reference_labels,
estimated_intervals, estimated_labels)
# Check for empty annotations. Don't need to check labels because
# validate_structure makes sure they're the same size as intervals
if reference_intervals.size == 0 or estimated_intervals.size == 0:
return 0., 0., 0.
# Generate the cluster labels
y_ref = util.intervals_to_samples(reference_intervals,
reference_labels,
sample_size=frame_size)[-1]
y_ref = util.index_labels(y_ref)[0]
# Map to index space
y_est = util.intervals_to_samples(estimated_intervals,
estimated_labels,
sample_size=frame_size)[-1]
y_est = util.index_labels(y_est)[0]
# Build the reference label agreement matrix
agree_ref = np.equal.outer(y_ref, y_ref)
# Count the unique pairs
n_agree_ref = (agree_ref.sum() - len(y_ref)) / 2.0
# Repeat for estimate
agree_est = np.equal.outer(y_est, y_est)
n_agree_est = (agree_est.sum() - len(y_est)) / 2.0
# Find where they agree
matches = np.logical_and(agree_ref, agree_est)
n_matches = (matches.sum() - len(y_ref)) / 2.0
precision = n_matches / n_agree_est
recall = n_matches / n_agree_ref
f_measure = util.f_measure(precision, recall, beta=beta)
return precision, recall, f_measure | [
"def",
"pairwise",
"(",
"reference_intervals",
",",
"reference_labels",
",",
"estimated_intervals",
",",
"estimated_labels",
",",
"frame_size",
"=",
"0.1",
",",
"beta",
"=",
"1.0",
")",
":",
"validate_structure",
"(",
"reference_intervals",
",",
"reference_labels",
... | Frame-clustering segmentation evaluation by pair-wise agreement.
Examples
--------
>>> (ref_intervals,
... ref_labels) = mir_eval.io.load_labeled_intervals('ref.lab')
>>> (est_intervals,
... est_labels) = mir_eval.io.load_labeled_intervals('est.lab')
>>> # Trim or pad the estimate to match reference timing
>>> (ref_intervals,
... ref_labels) = mir_eval.util.adjust_intervals(ref_intervals,
... ref_labels,
... t_min=0)
>>> (est_intervals,
... est_labels) = mir_eval.util.adjust_intervals(
... est_intervals, est_labels, t_min=0, t_max=ref_intervals.max())
>>> precision, recall, f = mir_eval.structure.pairwise(ref_intervals,
... ref_labels,
... est_intervals,
... est_labels)
Parameters
----------
reference_intervals : np.ndarray, shape=(n, 2)
reference segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
reference_labels : list, shape=(n,)
reference segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_intervals : np.ndarray, shape=(m, 2)
estimated segment intervals, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
estimated_labels : list, shape=(m,)
estimated segment labels, in the format returned by
:func:`mir_eval.io.load_labeled_intervals`.
frame_size : float > 0
length (in seconds) of frames for clustering
(Default value = 0.1)
beta : float > 0
beta value for F-measure
(Default value = 1.0)
Returns
-------
precision : float > 0
Precision of detecting whether frames belong in the same cluster
recall : float > 0
Recall of detecting whether frames belong in the same cluster
f : float > 0
F-measure of detecting whether frames belong in the same cluster | [
"Frame",
"-",
"clustering",
"segmentation",
"evaluation",
"by",
"pair",
"-",
"wise",
"agreement",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L324-L418 |
21,486 | craffel/mir_eval | mir_eval/segment.py | _contingency_matrix | def _contingency_matrix(reference_indices, estimated_indices):
"""Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix
"""
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray() | python | def _contingency_matrix(reference_indices, estimated_indices):
ref_classes, ref_class_idx = np.unique(reference_indices,
return_inverse=True)
est_classes, est_class_idx = np.unique(estimated_indices,
return_inverse=True)
n_ref_classes = ref_classes.shape[0]
n_est_classes = est_classes.shape[0]
# Using coo_matrix is faster than histogram2d
return scipy.sparse.coo_matrix((np.ones(ref_class_idx.shape[0]),
(ref_class_idx, est_class_idx)),
shape=(n_ref_classes, n_est_classes),
dtype=np.int).toarray() | [
"def",
"_contingency_matrix",
"(",
"reference_indices",
",",
"estimated_indices",
")",
":",
"ref_classes",
",",
"ref_class_idx",
"=",
"np",
".",
"unique",
"(",
"reference_indices",
",",
"return_inverse",
"=",
"True",
")",
"est_classes",
",",
"est_class_idx",
"=",
... | Computes the contingency matrix of a true labeling vs an estimated one.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
contingency_matrix : np.ndarray
Contingency matrix, shape=(#reference indices, #estimated indices)
.. note:: Based on sklearn.metrics.cluster.contingency_matrix | [
"Computes",
"the",
"contingency",
"matrix",
"of",
"a",
"true",
"labeling",
"vs",
"an",
"estimated",
"one",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L516-L543 |
21,487 | craffel/mir_eval | mir_eval/segment.py | _adjusted_rand_index | def _adjusted_rand_index(reference_indices, estimated_indices):
"""Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score
"""
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.special.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.special.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.special.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.special.comb(n_samples,
2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return (sum_comb - prod_comb)/(mean_comb - prod_comb) | python | def _adjusted_rand_index(reference_indices, estimated_indices):
n_samples = len(reference_indices)
ref_classes = np.unique(reference_indices)
est_classes = np.unique(estimated_indices)
# Special limit cases: no clustering since the data is not split;
# or trivial clustering where each document is assigned a unique cluster.
# These are perfect matches hence return 1.0.
if (ref_classes.shape[0] == est_classes.shape[0] == 1 or
ref_classes.shape[0] == est_classes.shape[0] == 0 or
(ref_classes.shape[0] == est_classes.shape[0] ==
len(reference_indices))):
return 1.0
contingency = _contingency_matrix(reference_indices, estimated_indices)
# Compute the ARI using the contingency data
sum_comb_c = sum(scipy.special.comb(n_c, 2, exact=1) for n_c in
contingency.sum(axis=1))
sum_comb_k = sum(scipy.special.comb(n_k, 2, exact=1) for n_k in
contingency.sum(axis=0))
sum_comb = sum((scipy.special.comb(n_ij, 2, exact=1) for n_ij in
contingency.flatten()))
prod_comb = (sum_comb_c * sum_comb_k)/float(scipy.special.comb(n_samples,
2))
mean_comb = (sum_comb_k + sum_comb_c)/2.
return (sum_comb - prod_comb)/(mean_comb - prod_comb) | [
"def",
"_adjusted_rand_index",
"(",
"reference_indices",
",",
"estimated_indices",
")",
":",
"n_samples",
"=",
"len",
"(",
"reference_indices",
")",
"ref_classes",
"=",
"np",
".",
"unique",
"(",
"reference_indices",
")",
"est_classes",
"=",
"np",
".",
"unique",
... | Compute the Rand index, adjusted for change.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
Returns
-------
ari : float
Adjusted Rand index
.. note:: Based on sklearn.metrics.cluster.adjusted_rand_score | [
"Compute",
"the",
"Rand",
"index",
"adjusted",
"for",
"change",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L546-L589 |
21,488 | craffel/mir_eval | mir_eval/segment.py | _mutual_info_score | def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
"""Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score
"""
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum() | python | def _mutual_info_score(reference_indices, estimated_indices, contingency=None):
if contingency is None:
contingency = _contingency_matrix(reference_indices,
estimated_indices).astype(float)
contingency_sum = np.sum(contingency)
pi = np.sum(contingency, axis=1)
pj = np.sum(contingency, axis=0)
outer = np.outer(pi, pj)
nnz = contingency != 0.0
# normalized contingency
contingency_nm = contingency[nnz]
log_contingency_nm = np.log(contingency_nm)
contingency_nm /= contingency_sum
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
log_outer = -np.log(outer[nnz]) + np.log(pi.sum()) + np.log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - np.log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum() | [
"def",
"_mutual_info_score",
"(",
"reference_indices",
",",
"estimated_indices",
",",
"contingency",
"=",
"None",
")",
":",
"if",
"contingency",
"is",
"None",
":",
"contingency",
"=",
"_contingency_matrix",
"(",
"reference_indices",
",",
"estimated_indices",
")",
".... | Compute the mutual information between two sequence labelings.
Parameters
----------
reference_indices : np.ndarray
Array of reference indices
estimated_indices : np.ndarray
Array of estimated indices
contingency : np.ndarray
Pre-computed contingency matrix. If None, one will be computed.
(Default value = None)
Returns
-------
mi : float
Mutual information
.. note:: Based on sklearn.metrics.cluster.mutual_info_score | [
"Compute",
"the",
"mutual",
"information",
"between",
"two",
"sequence",
"labelings",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L663-L701 |
21,489 | craffel/mir_eval | mir_eval/segment.py | _entropy | def _entropy(labels):
"""Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy
"""
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum))) | python | def _entropy(labels):
if len(labels) == 0:
return 1.0
label_idx = np.unique(labels, return_inverse=True)[1]
pi = np.bincount(label_idx).astype(np.float)
pi = pi[pi > 0]
pi_sum = np.sum(pi)
# log(a / b) should be calculated as log(a) - log(b) for
# possible loss of precision
return -np.sum((pi / pi_sum) * (np.log(pi) - np.log(pi_sum))) | [
"def",
"_entropy",
"(",
"labels",
")",
":",
"if",
"len",
"(",
"labels",
")",
"==",
"0",
":",
"return",
"1.0",
"label_idx",
"=",
"np",
".",
"unique",
"(",
"labels",
",",
"return_inverse",
"=",
"True",
")",
"[",
"1",
"]",
"pi",
"=",
"np",
".",
"bin... | Calculates the entropy for a labeling.
Parameters
----------
labels : list-like
List of labels.
Returns
-------
entropy : float
Entropy of the labeling.
.. note:: Based on sklearn.metrics.cluster.entropy | [
"Calculates",
"the",
"entropy",
"for",
"a",
"labeling",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/segment.py#L704-L728 |
21,490 | craffel/mir_eval | mir_eval/tempo.py | validate_tempi | def validate_tempi(tempi, reference=True):
"""Checks that there are two non-negative tempi.
For a reference value, at least one tempo has to be greater than zero.
Parameters
----------
tempi : np.ndarray
length-2 array of tempo, in bpm
reference : bool
indicates a reference value
"""
if tempi.size != 2:
raise ValueError('tempi must have exactly two values')
if not np.all(np.isfinite(tempi)) or np.any(tempi < 0):
raise ValueError('tempi={} must be non-negative numbers'.format(tempi))
if reference and np.all(tempi == 0):
raise ValueError('reference tempi={} must have one'
' value greater than zero'.format(tempi)) | python | def validate_tempi(tempi, reference=True):
if tempi.size != 2:
raise ValueError('tempi must have exactly two values')
if not np.all(np.isfinite(tempi)) or np.any(tempi < 0):
raise ValueError('tempi={} must be non-negative numbers'.format(tempi))
if reference and np.all(tempi == 0):
raise ValueError('reference tempi={} must have one'
' value greater than zero'.format(tempi)) | [
"def",
"validate_tempi",
"(",
"tempi",
",",
"reference",
"=",
"True",
")",
":",
"if",
"tempi",
".",
"size",
"!=",
"2",
":",
"raise",
"ValueError",
"(",
"'tempi must have exactly two values'",
")",
"if",
"not",
"np",
".",
"all",
"(",
"np",
".",
"isfinite",
... | Checks that there are two non-negative tempi.
For a reference value, at least one tempo has to be greater than zero.
Parameters
----------
tempi : np.ndarray
length-2 array of tempo, in bpm
reference : bool
indicates a reference value | [
"Checks",
"that",
"there",
"are",
"two",
"non",
"-",
"negative",
"tempi",
".",
"For",
"a",
"reference",
"value",
"at",
"least",
"one",
"tempo",
"has",
"to",
"be",
"greater",
"than",
"zero",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/tempo.py#L29-L51 |
21,491 | craffel/mir_eval | mir_eval/tempo.py | validate | def validate(reference_tempi, reference_weight, estimated_tempi):
"""Checks that the input annotations to a metric look like valid tempo
annotations.
Parameters
----------
reference_tempi : np.ndarray
reference tempo values, in bpm
reference_weight : float
perceptual weight of slow vs fast in reference
estimated_tempi : np.ndarray
estimated tempo values, in bpm
"""
validate_tempi(reference_tempi, reference=True)
validate_tempi(estimated_tempi, reference=False)
if reference_weight < 0 or reference_weight > 1:
raise ValueError('Reference weight must lie in range [0, 1]') | python | def validate(reference_tempi, reference_weight, estimated_tempi):
validate_tempi(reference_tempi, reference=True)
validate_tempi(estimated_tempi, reference=False)
if reference_weight < 0 or reference_weight > 1:
raise ValueError('Reference weight must lie in range [0, 1]') | [
"def",
"validate",
"(",
"reference_tempi",
",",
"reference_weight",
",",
"estimated_tempi",
")",
":",
"validate_tempi",
"(",
"reference_tempi",
",",
"reference",
"=",
"True",
")",
"validate_tempi",
"(",
"estimated_tempi",
",",
"reference",
"=",
"False",
")",
"if",... | Checks that the input annotations to a metric look like valid tempo
annotations.
Parameters
----------
reference_tempi : np.ndarray
reference tempo values, in bpm
reference_weight : float
perceptual weight of slow vs fast in reference
estimated_tempi : np.ndarray
estimated tempo values, in bpm | [
"Checks",
"that",
"the",
"input",
"annotations",
"to",
"a",
"metric",
"look",
"like",
"valid",
"tempo",
"annotations",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/tempo.py#L54-L74 |
21,492 | craffel/mir_eval | mir_eval/tempo.py | detection | def detection(reference_tempi, reference_weight, estimated_tempi, tol=0.08):
"""Compute the tempo detection accuracy metric.
Parameters
----------
reference_tempi : np.ndarray, shape=(2,)
Two non-negative reference tempi
reference_weight : float > 0
The relative strength of ``reference_tempi[0]`` vs
``reference_tempi[1]``.
estimated_tempi : np.ndarray, shape=(2,)
Two non-negative estimated tempi.
tol : float in [0, 1]:
The maximum allowable deviation from a reference tempo to
count as a hit.
``|est_t - ref_t| <= tol * ref_t``
(Default value = 0.08)
Returns
-------
p_score : float in [0, 1]
Weighted average of recalls:
``reference_weight * hits[0] + (1 - reference_weight) * hits[1]``
one_correct : bool
True if at least one reference tempo was correctly estimated
both_correct : bool
True if both reference tempi were correctly estimated
Raises
------
ValueError
If the input tempi are ill-formed
If the reference weight is not in the range [0, 1]
If ``tol < 0`` or ``tol > 1``.
"""
validate(reference_tempi, reference_weight, estimated_tempi)
if tol < 0 or tol > 1:
raise ValueError('invalid tolerance {}: must lie in the range '
'[0, 1]'.format(tol))
if tol == 0.:
warnings.warn('A tolerance of 0.0 may not '
'lead to the results you expect.')
hits = [False, False]
for i, ref_t in enumerate(reference_tempi):
if ref_t > 0:
# Compute the relative error for this reference tempo
f_ref_t = float(ref_t)
relative_error = np.min(np.abs(ref_t - estimated_tempi) / f_ref_t)
# Count the hits
hits[i] = relative_error <= tol
p_score = reference_weight * hits[0] + (1.0-reference_weight) * hits[1]
one_correct = bool(np.max(hits))
both_correct = bool(np.min(hits))
return p_score, one_correct, both_correct | python | def detection(reference_tempi, reference_weight, estimated_tempi, tol=0.08):
validate(reference_tempi, reference_weight, estimated_tempi)
if tol < 0 or tol > 1:
raise ValueError('invalid tolerance {}: must lie in the range '
'[0, 1]'.format(tol))
if tol == 0.:
warnings.warn('A tolerance of 0.0 may not '
'lead to the results you expect.')
hits = [False, False]
for i, ref_t in enumerate(reference_tempi):
if ref_t > 0:
# Compute the relative error for this reference tempo
f_ref_t = float(ref_t)
relative_error = np.min(np.abs(ref_t - estimated_tempi) / f_ref_t)
# Count the hits
hits[i] = relative_error <= tol
p_score = reference_weight * hits[0] + (1.0-reference_weight) * hits[1]
one_correct = bool(np.max(hits))
both_correct = bool(np.min(hits))
return p_score, one_correct, both_correct | [
"def",
"detection",
"(",
"reference_tempi",
",",
"reference_weight",
",",
"estimated_tempi",
",",
"tol",
"=",
"0.08",
")",
":",
"validate",
"(",
"reference_tempi",
",",
"reference_weight",
",",
"estimated_tempi",
")",
"if",
"tol",
"<",
"0",
"or",
"tol",
">",
... | Compute the tempo detection accuracy metric.
Parameters
----------
reference_tempi : np.ndarray, shape=(2,)
Two non-negative reference tempi
reference_weight : float > 0
The relative strength of ``reference_tempi[0]`` vs
``reference_tempi[1]``.
estimated_tempi : np.ndarray, shape=(2,)
Two non-negative estimated tempi.
tol : float in [0, 1]:
The maximum allowable deviation from a reference tempo to
count as a hit.
``|est_t - ref_t| <= tol * ref_t``
(Default value = 0.08)
Returns
-------
p_score : float in [0, 1]
Weighted average of recalls:
``reference_weight * hits[0] + (1 - reference_weight) * hits[1]``
one_correct : bool
True if at least one reference tempo was correctly estimated
both_correct : bool
True if both reference tempi were correctly estimated
Raises
------
ValueError
If the input tempi are ill-formed
If the reference weight is not in the range [0, 1]
If ``tol < 0`` or ``tol > 1``. | [
"Compute",
"the",
"tempo",
"detection",
"accuracy",
"metric",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/tempo.py#L77-L145 |
21,493 | craffel/mir_eval | mir_eval/multipitch.py | validate | def validate(ref_time, ref_freqs, est_time, est_freqs):
"""Checks that the time and frequency inputs are well-formed.
Parameters
----------
ref_time : np.ndarray
reference time stamps in seconds
ref_freqs : list of np.ndarray
reference frequencies in Hz
est_time : np.ndarray
estimate time stamps in seconds
est_freqs : list of np.ndarray
estimated frequencies in Hz
"""
util.validate_events(ref_time, max_time=MAX_TIME)
util.validate_events(est_time, max_time=MAX_TIME)
if ref_time.size == 0:
warnings.warn("Reference times are empty.")
if ref_time.ndim != 1:
raise ValueError("Reference times have invalid dimension")
if len(ref_freqs) == 0:
warnings.warn("Reference frequencies are empty.")
if est_time.size == 0:
warnings.warn("Estimated times are empty.")
if est_time.ndim != 1:
raise ValueError("Estimated times have invalid dimension")
if len(est_freqs) == 0:
warnings.warn("Estimated frequencies are empty.")
if ref_time.size != len(ref_freqs):
raise ValueError('Reference times and frequencies have unequal '
'lengths.')
if est_time.size != len(est_freqs):
raise ValueError('Estimate times and frequencies have unequal '
'lengths.')
for freq in ref_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False)
for freq in est_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False) | python | def validate(ref_time, ref_freqs, est_time, est_freqs):
util.validate_events(ref_time, max_time=MAX_TIME)
util.validate_events(est_time, max_time=MAX_TIME)
if ref_time.size == 0:
warnings.warn("Reference times are empty.")
if ref_time.ndim != 1:
raise ValueError("Reference times have invalid dimension")
if len(ref_freqs) == 0:
warnings.warn("Reference frequencies are empty.")
if est_time.size == 0:
warnings.warn("Estimated times are empty.")
if est_time.ndim != 1:
raise ValueError("Estimated times have invalid dimension")
if len(est_freqs) == 0:
warnings.warn("Estimated frequencies are empty.")
if ref_time.size != len(ref_freqs):
raise ValueError('Reference times and frequencies have unequal '
'lengths.')
if est_time.size != len(est_freqs):
raise ValueError('Estimate times and frequencies have unequal '
'lengths.')
for freq in ref_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False)
for freq in est_freqs:
util.validate_frequencies(freq, max_freq=MAX_FREQ, min_freq=MIN_FREQ,
allow_negatives=False) | [
"def",
"validate",
"(",
"ref_time",
",",
"ref_freqs",
",",
"est_time",
",",
"est_freqs",
")",
":",
"util",
".",
"validate_events",
"(",
"ref_time",
",",
"max_time",
"=",
"MAX_TIME",
")",
"util",
".",
"validate_events",
"(",
"est_time",
",",
"max_time",
"=",
... | Checks that the time and frequency inputs are well-formed.
Parameters
----------
ref_time : np.ndarray
reference time stamps in seconds
ref_freqs : list of np.ndarray
reference frequencies in Hz
est_time : np.ndarray
estimate time stamps in seconds
est_freqs : list of np.ndarray
estimated frequencies in Hz | [
"Checks",
"that",
"the",
"time",
"and",
"frequency",
"inputs",
"are",
"well",
"-",
"formed",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L57-L101 |
21,494 | craffel/mir_eval | mir_eval/multipitch.py | resample_multipitch | def resample_multipitch(times, frequencies, target_times):
"""Resamples multipitch time series to a new timescale. Values in
``target_times`` outside the range of ``times`` return no pitch estimate.
Parameters
----------
times : np.ndarray
Array of time stamps
frequencies : list of np.ndarray
List of np.ndarrays of frequency values
target_times : np.ndarray
Array of target time stamps
Returns
-------
frequencies_resampled : list of numpy arrays
Frequency list of lists resampled to new timebase
"""
if target_times.size == 0:
return []
if times.size == 0:
return [np.array([])]*len(target_times)
n_times = len(frequencies)
# scipy's interpolate doesn't handle ragged arrays. Instead, we interpolate
# the frequency index and then map back to the frequency values.
# This only works because we're using a nearest neighbor interpolator!
frequency_index = np.arange(0, n_times)
# times are already ordered so assume_sorted=True for efficiency
# since we're interpolating the index, fill_value is set to the first index
# that is out of range. We handle this in the next line.
new_frequency_index = scipy.interpolate.interp1d(
times, frequency_index, kind='nearest', bounds_error=False,
assume_sorted=True, fill_value=n_times)(target_times)
# create array of frequencies plus additional empty element at the end for
# target time stamps that are out of the interpolation range
freq_vals = frequencies + [np.array([])]
# map interpolated indices back to frequency values
frequencies_resampled = [
freq_vals[i] for i in new_frequency_index.astype(int)]
return frequencies_resampled | python | def resample_multipitch(times, frequencies, target_times):
if target_times.size == 0:
return []
if times.size == 0:
return [np.array([])]*len(target_times)
n_times = len(frequencies)
# scipy's interpolate doesn't handle ragged arrays. Instead, we interpolate
# the frequency index and then map back to the frequency values.
# This only works because we're using a nearest neighbor interpolator!
frequency_index = np.arange(0, n_times)
# times are already ordered so assume_sorted=True for efficiency
# since we're interpolating the index, fill_value is set to the first index
# that is out of range. We handle this in the next line.
new_frequency_index = scipy.interpolate.interp1d(
times, frequency_index, kind='nearest', bounds_error=False,
assume_sorted=True, fill_value=n_times)(target_times)
# create array of frequencies plus additional empty element at the end for
# target time stamps that are out of the interpolation range
freq_vals = frequencies + [np.array([])]
# map interpolated indices back to frequency values
frequencies_resampled = [
freq_vals[i] for i in new_frequency_index.astype(int)]
return frequencies_resampled | [
"def",
"resample_multipitch",
"(",
"times",
",",
"frequencies",
",",
"target_times",
")",
":",
"if",
"target_times",
".",
"size",
"==",
"0",
":",
"return",
"[",
"]",
"if",
"times",
".",
"size",
"==",
"0",
":",
"return",
"[",
"np",
".",
"array",
"(",
... | Resamples multipitch time series to a new timescale. Values in
``target_times`` outside the range of ``times`` return no pitch estimate.
Parameters
----------
times : np.ndarray
Array of time stamps
frequencies : list of np.ndarray
List of np.ndarrays of frequency values
target_times : np.ndarray
Array of target time stamps
Returns
-------
frequencies_resampled : list of numpy arrays
Frequency list of lists resampled to new timebase | [
"Resamples",
"multipitch",
"time",
"series",
"to",
"a",
"new",
"timescale",
".",
"Values",
"in",
"target_times",
"outside",
"the",
"range",
"of",
"times",
"return",
"no",
"pitch",
"estimate",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L104-L150 |
21,495 | craffel/mir_eval | mir_eval/multipitch.py | compute_num_true_positives | def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False):
"""Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives.
"""
n_frames = len(ref_freqs)
true_positives = np.zeros((n_frames, ))
for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)):
if chroma:
# match chroma-wrapped frequency events
matching = util.match_events(
ref_frame, est_frame, window,
distance=util._outer_distance_mod_n)
else:
# match frequency events within tolerance window in semitones
matching = util.match_events(ref_frame, est_frame, window)
true_positives[i] = len(matching)
return true_positives | python | def compute_num_true_positives(ref_freqs, est_freqs, window=0.5, chroma=False):
n_frames = len(ref_freqs)
true_positives = np.zeros((n_frames, ))
for i, (ref_frame, est_frame) in enumerate(zip(ref_freqs, est_freqs)):
if chroma:
# match chroma-wrapped frequency events
matching = util.match_events(
ref_frame, est_frame, window,
distance=util._outer_distance_mod_n)
else:
# match frequency events within tolerance window in semitones
matching = util.match_events(ref_frame, est_frame, window)
true_positives[i] = len(matching)
return true_positives | [
"def",
"compute_num_true_positives",
"(",
"ref_freqs",
",",
"est_freqs",
",",
"window",
"=",
"0.5",
",",
"chroma",
"=",
"False",
")",
":",
"n_frames",
"=",
"len",
"(",
"ref_freqs",
")",
"true_positives",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_frames",
",",
... | Compute the number of true positives in an estimate given a reference.
A frequency is correct if it is within a quartertone of the
correct frequency.
Parameters
----------
ref_freqs : list of np.ndarray
reference frequencies (MIDI)
est_freqs : list of np.ndarray
estimated frequencies (MIDI)
window : float
Window size, in semitones
chroma : bool
If True, computes distances modulo n.
If True, ``ref_freqs`` and ``est_freqs`` should be wrapped modulo n.
Returns
-------
true_positives : np.ndarray
Array the same length as ref_freqs containing the number of true
positives. | [
"Compute",
"the",
"number",
"of",
"true",
"positives",
"in",
"an",
"estimate",
"given",
"a",
"reference",
".",
"A",
"frequency",
"is",
"correct",
"if",
"it",
"is",
"within",
"a",
"quartertone",
"of",
"the",
"correct",
"frequency",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L204-L243 |
21,496 | craffel/mir_eval | mir_eval/multipitch.py | compute_accuracy | def compute_accuracy(true_positives, n_ref, n_est):
"""Compute accuracy metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
precision : float
``sum(true_positives)/sum(n_est)``
recall : float
``sum(true_positives)/sum(n_ref)``
acc : float
``sum(true_positives)/sum(n_est + n_ref - true_positives)``
"""
true_positive_sum = float(true_positives.sum())
n_est_sum = n_est.sum()
if n_est_sum > 0:
precision = true_positive_sum/n_est.sum()
else:
warnings.warn("Estimate frequencies are all empty.")
precision = 0.0
n_ref_sum = n_ref.sum()
if n_ref_sum > 0:
recall = true_positive_sum/n_ref.sum()
else:
warnings.warn("Reference frequencies are all empty.")
recall = 0.0
acc_denom = (n_est + n_ref - true_positives).sum()
if acc_denom > 0:
acc = true_positive_sum/acc_denom
else:
acc = 0.0
return precision, recall, acc | python | def compute_accuracy(true_positives, n_ref, n_est):
true_positive_sum = float(true_positives.sum())
n_est_sum = n_est.sum()
if n_est_sum > 0:
precision = true_positive_sum/n_est.sum()
else:
warnings.warn("Estimate frequencies are all empty.")
precision = 0.0
n_ref_sum = n_ref.sum()
if n_ref_sum > 0:
recall = true_positive_sum/n_ref.sum()
else:
warnings.warn("Reference frequencies are all empty.")
recall = 0.0
acc_denom = (n_est + n_ref - true_positives).sum()
if acc_denom > 0:
acc = true_positive_sum/acc_denom
else:
acc = 0.0
return precision, recall, acc | [
"def",
"compute_accuracy",
"(",
"true_positives",
",",
"n_ref",
",",
"n_est",
")",
":",
"true_positive_sum",
"=",
"float",
"(",
"true_positives",
".",
"sum",
"(",
")",
")",
"n_est_sum",
"=",
"n_est",
".",
"sum",
"(",
")",
"if",
"n_est_sum",
">",
"0",
":"... | Compute accuracy metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
precision : float
``sum(true_positives)/sum(n_est)``
recall : float
``sum(true_positives)/sum(n_ref)``
acc : float
``sum(true_positives)/sum(n_est + n_ref - true_positives)`` | [
"Compute",
"accuracy",
"metrics",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L246-L291 |
21,497 | craffel/mir_eval | mir_eval/multipitch.py | compute_err_score | def compute_err_score(true_positives, n_ref, n_est):
"""Compute error score metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
e_sub : float
Substitution error
e_miss : float
Miss error
e_fa : float
False alarm error
e_tot : float
Total error
"""
n_ref_sum = float(n_ref.sum())
if n_ref_sum == 0:
warnings.warn("Reference frequencies are all empty.")
return 0., 0., 0., 0.
# Substitution error
e_sub = (np.min([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
# compute the max of (n_ref - n_est) and 0
e_miss_numerator = n_ref - n_est
e_miss_numerator[e_miss_numerator < 0] = 0
# Miss error
e_miss = e_miss_numerator.sum()/n_ref_sum
# compute the max of (n_est - n_ref) and 0
e_fa_numerator = n_est - n_ref
e_fa_numerator[e_fa_numerator < 0] = 0
# False alarm error
e_fa = e_fa_numerator.sum()/n_ref_sum
# total error
e_tot = (np.max([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
return e_sub, e_miss, e_fa, e_tot | python | def compute_err_score(true_positives, n_ref, n_est):
n_ref_sum = float(n_ref.sum())
if n_ref_sum == 0:
warnings.warn("Reference frequencies are all empty.")
return 0., 0., 0., 0.
# Substitution error
e_sub = (np.min([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
# compute the max of (n_ref - n_est) and 0
e_miss_numerator = n_ref - n_est
e_miss_numerator[e_miss_numerator < 0] = 0
# Miss error
e_miss = e_miss_numerator.sum()/n_ref_sum
# compute the max of (n_est - n_ref) and 0
e_fa_numerator = n_est - n_ref
e_fa_numerator[e_fa_numerator < 0] = 0
# False alarm error
e_fa = e_fa_numerator.sum()/n_ref_sum
# total error
e_tot = (np.max([n_ref, n_est], axis=0) - true_positives).sum()/n_ref_sum
return e_sub, e_miss, e_fa, e_tot | [
"def",
"compute_err_score",
"(",
"true_positives",
",",
"n_ref",
",",
"n_est",
")",
":",
"n_ref_sum",
"=",
"float",
"(",
"n_ref",
".",
"sum",
"(",
")",
")",
"if",
"n_ref_sum",
"==",
"0",
":",
"warnings",
".",
"warn",
"(",
"\"Reference frequencies are all emp... | Compute error score metrics.
Parameters
----------
true_positives : np.ndarray
Array containing the number of true positives at each time point.
n_ref : np.ndarray
Array containing the number of reference frequencies at each time
point.
n_est : np.ndarray
Array containing the number of estimate frequencies at each time point.
Returns
-------
e_sub : float
Substitution error
e_miss : float
Miss error
e_fa : float
False alarm error
e_tot : float
Total error | [
"Compute",
"error",
"score",
"metrics",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/multipitch.py#L294-L343 |
21,498 | craffel/mir_eval | mir_eval/hierarchy.py | _hierarchy_bounds | def _hierarchy_bounds(intervals_hier):
'''Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation
'''
boundaries = list(itertools.chain(*list(itertools.chain(*intervals_hier))))
return min(boundaries), max(boundaries) | python | def _hierarchy_bounds(intervals_hier):
'''Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation
'''
boundaries = list(itertools.chain(*list(itertools.chain(*intervals_hier))))
return min(boundaries), max(boundaries) | [
"def",
"_hierarchy_bounds",
"(",
"intervals_hier",
")",
":",
"boundaries",
"=",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"list",
"(",
"itertools",
".",
"chain",
"(",
"*",
"intervals_hier",
")",
")",
")",
")",
"return",
"min",
"(",
"boundaries",
... | Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation | [
"Compute",
"the",
"covered",
"time",
"range",
"of",
"a",
"hierarchical",
"segmentation",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L83-L100 |
21,499 | craffel/mir_eval | mir_eval/hierarchy.py | _align_intervals | def _align_intervals(int_hier, lab_hier, t_min=0.0, t_max=None):
'''Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
'''
return [list(_) for _ in zip(*[util.adjust_intervals(np.asarray(ival),
labels=lab,
t_min=t_min,
t_max=t_max)
for ival, lab in zip(int_hier, lab_hier)])] | python | def _align_intervals(int_hier, lab_hier, t_min=0.0, t_max=None):
'''Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`.
'''
return [list(_) for _ in zip(*[util.adjust_intervals(np.asarray(ival),
labels=lab,
t_min=t_min,
t_max=t_max)
for ival, lab in zip(int_hier, lab_hier)])] | [
"def",
"_align_intervals",
"(",
"int_hier",
",",
"lab_hier",
",",
"t_min",
"=",
"0.0",
",",
"t_max",
"=",
"None",
")",
":",
"return",
"[",
"list",
"(",
"_",
")",
"for",
"_",
"in",
"zip",
"(",
"*",
"[",
"util",
".",
"adjust_intervals",
"(",
"np",
".... | Align a hierarchical annotation to span a fixed start and end time.
Parameters
----------
int_hier : list of list of intervals
lab_hier : list of list of str
Hierarchical segment annotations, encoded as a
list of list of intervals (int_hier) and list of
list of strings (lab_hier)
t_min : None or number >= 0
The minimum time value for the segmentation
t_max : None or number >= t_min
The maximum time value for the segmentation
Returns
-------
intervals_hier : list of list of intervals
labels_hier : list of list of str
`int_hier` `lab_hier` aligned to span `[t_min, t_max]`. | [
"Align",
"a",
"hierarchical",
"annotation",
"to",
"span",
"a",
"fixed",
"start",
"and",
"end",
"time",
"."
] | f41c8dafaea04b411252a516d1965af43c7d531b | https://github.com/craffel/mir_eval/blob/f41c8dafaea04b411252a516d1965af43c7d531b/mir_eval/hierarchy.py#L103-L130 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.