repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
nussl | nussl-master/nussl/core/utils.py | """
Provides utilities for running nussl algorithms that do not belong to
any specific algorithm or that are shared between algorithms.
"""
import warnings
import numpy as np
import torch
import random
from .. import musdb
import librosa
from . import constants
import os
from contextlib import contextmanager
def seed(random_seed, set_cudnn=False):
"""
Seeds all random states in nussl with the same random seed
for reproducibility. Seeds ``numpy``, ``random`` and ``torch``
random generators.
For full reproducibility, two further options must be set
according to the torch documentation:
https://pytorch.org/docs/stable/notes/randomness.html
To do this, ``set_cudnn`` must be True. It defaults to
False, since setting it to True results in a performance
hit.
Args:
random_seed (int): integer corresponding to random seed to
use.
set_cudnn (bool): Whether or not to set cudnn into determinstic
mode and off of benchmark mode. Defaults to False.
"""
torch.manual_seed(random_seed)
np.random.seed(random_seed)
random.seed(random_seed)
if set_cudnn:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def find_peak_indices(input_array, n_peaks, min_dist=None, do_min=False, threshold=0.5):
"""
This function will find the indices of the peaks of an input n-dimensional numpy array.
This can be configured to find max or min peak indices, distance between the peaks, and
a lower bound, at which the algorithm will stop searching for peaks (or upper bound if
searching for max). Used exactly the same as :func:`find_peak_values`.
This function currently only accepts 1-D and 2-D numpy arrays.
Notes:
* This function only returns the indices of peaks. If you want to find peak values,
use :func:`find_peak_values`.
* min_dist can be an int or a tuple of length 2.
If input_array is 1-D, min_dist must be an integer.
If input_array is 2-D, min_dist can be an integer, in which case the minimum
distance in both dimensions will be equal. min_dist can also be a tuple if
you want each dimension to have a different minimum distance between peaks.
In that case, the 0th value in the tuple represents the first dimension, and
the 1st value represents the second dimension in the numpy array.
Args:
input_array: a 1- or 2- dimensional numpy array that will be inspected.
n_peaks: (int) maximum number of peaks to find
min_dist: (int) minimum distance between peaks. Default value: len(input_array) / 4
do_min: (bool) if True, finds indices at minimum value instead of maximum
threshold: (float) the value (scaled between 0.0 and 1.0)
Returns:
peak_indices: (list) list of the indices of the peak values
"""
input_array = np.array(input_array, dtype=float)
if input_array.ndim > 2:
raise ValueError('Cannot find peak indices on data greater than 2 dimensions!')
is_1d = input_array.ndim == 1
zero_dist = zero_dist0 = zero_dist1 = None
min_dist = len(input_array) // 4 if min_dist is None else min_dist
if is_1d:
zero_dist = min_dist
else:
if type(min_dist) is int:
zero_dist0 = zero_dist1 = min_dist
elif len(min_dist) == 1:
zero_dist0 = zero_dist1 = min_dist[0]
else:
zero_dist0, zero_dist1 = min_dist
# scale input_array between [0.0, 1.0]
input_array -= np.min(input_array)
input_array /= np.max(input_array)
# flip sign if doing min
input_array = -input_array if do_min else input_array
# throw out everything below threshold
input_array = np.multiply(input_array, (input_array >= threshold))
# check to make sure we didn't throw everything out
if np.size(np.nonzero(input_array)) == 0:
raise ValueError('Threshold set incorrectly. No peaks above threshold.')
if np.size(np.nonzero(input_array)) < n_peaks:
warnings.warn('Threshold set such that there will be less peaks than n_peaks.')
peak_indices = []
for i in range(n_peaks):
# np.unravel_index for 2D indices e.g., index 5 in a 3x3 array should be (1, 2)
# Also, wrap in list for duck typing
cur_peak_idx = list(np.unravel_index(
np.argmax(input_array.flatten()), input_array.shape
))
# zero out peak and its surroundings
if is_1d:
cur_peak_idx = cur_peak_idx[0]
peak_indices.append(cur_peak_idx)
lower, upper = _set_array_zero_indices(cur_peak_idx, zero_dist, len(input_array))
input_array[lower:upper] = 0
else:
peak_indices.append(cur_peak_idx)
lower0, upper0 = _set_array_zero_indices(cur_peak_idx[0], zero_dist0,
input_array.shape[0])
lower1, upper1 = _set_array_zero_indices(cur_peak_idx[1], zero_dist1,
input_array.shape[1])
input_array[lower0:upper0, lower1:upper1] = 0
if np.sum(input_array) == 0.0:
break
return peak_indices
def _set_array_zero_indices(index, zero_distance, max_len):
lower = index - zero_distance
upper = index + zero_distance + 1
lower = 0 if lower < 0 else lower
upper = max_len if upper >= max_len else upper
return int(lower), int(upper)
def complex_randn(shape):
"""
Returns a complex-valued numpy array of random values with shape :param:`shape`.
Args:
shape (tuple): Tuple of ints that will be the shape of the resultant complex numpy array.
Returns:
(:obj:`np.ndarray`): a complex-valued numpy array of random values with shape `shape`
"""
return np.random.randn(*shape) + 1j * np.random.randn(*shape)
def _get_axis(array, axis_num, i):
"""
Will get index 'i' along axis 'axis_num' using np.take.
Args:
array (:obj:`np.ndarray`): Array to fetch axis of.
axis_num (int): Axes to retrieve.
i (int): Index to retrieve.
Returns:
The value at index :param:`i` along axis :param:`axis_num`
"""
return np.take(array, i, axis_num)
def _slice_along_dim(data, dim, start, end):
"""
Takes a slice of data along a dim between a start and an end. Agnostic to
whether the data is a numpy array or a torch tensor.
Args:
data (np.ndarray or torch.Tensor): Data to slice.
dim (int): Dimension along which to do the slicing.
start (int): Start of the slice.
end (int): End of the slice
"""
if dim > 3:
raise ValueError("Unsupported for dim > 4")
if dim >= len(data.shape):
raise ValueError(f"dim {dim} too high for data.shape {data.shape}!")
if dim == 0:
return data[start:end, ...]
elif dim == 1:
return data[:, start:end, ...]
elif dim == 2:
return data[:, :, start:end, ...]
elif dim == 3:
return data[:, :, :, start:end, ...]
def _format(string):
""" Formats a class name correctly for checking function and class names.
Strips all non-alphanumeric chars and makes lowercase.
"""
return ''.join(list(filter(str.isalnum, string))).lower()
def musdb_track_to_audio_signals(track):
"""
Converts a musdb track to a dictionary of AudioSignal objects.
Args:
track (musdb.audio_classes.MultiTrack): MultiTrasack object
containing stems that will each be turned into AudioSignal
objects.
Returns:
(2-tuple): tuple containing the mixture AudioSignal and a dictionary of
the sources.
"""
# lazy load to prevent circular imports
from .audio_signal import AudioSignal
mixture = AudioSignal(audio_data_array=track.audio, sample_rate=track.rate)
mixture.path_to_input_file = track.name
stems = track.stems
sources = {}
for k, v in sorted(track.sources.items(), key=lambda x: x[1].stem_id):
sources[k] = AudioSignal(
audio_data_array=stems[v.stem_id],
sample_rate=track.rate
)
sources[k].path_to_input_file = f'musdb/{track.name}_{k}.wav'
return mixture, sources
def audio_signals_to_musdb_track(mixture, sources_dict, targets_dict):
"""
Converts :class:`AudioSignal` objects to ``musdb`` :class:`Track` objects that
contain the mixture, the ground truth sources, and the targets for use with the ``mus_eval``
implementation of BSS-Eval and ``musdb``.
See Also:
* More information on ``musdb``: `Github<https://github.com/sigsep/sigsep-mus-db>`
and `documentation<http://musdb.readthedocs.io/>`
* More information on ``mus_eval``: `Github<https://github.com/sigsep/sigsep-mus-eval>`
and `documentation<https://sigsep.github.io/sigsep-mus-eval/>`
* :class:`BSSEvalV4` for *nussl*'s interface to BSS-Eval v4.
Examples:
.. code-block:: python
:linenos:
import nussl
signal = nussl.AudioSignal(nussl.efz_utils.download_audio_file('HistoryRepeating.wav'))
repet = nussl.Repet(signal)
repet.run()
bg, fg = repet.make_audio_signals()
src_dict = {'vocals': fg, 'accompaniment': bg}
target = nussl.core.constants.STEM_TARGET_DICT
track = nussl.utils.audio_signals_to_musdb_track(signal, src_dict, target)
Args:
mixture (:class:`AudioSignal`): The :class:`AudioSignal` object that contains the mixture.
sources_dict (dict): Dictionary where the keys are the labels for the sources and values
are the associated :class:`AudioSignal` objects.
targets_dict (dict): Dictionary where the keys are the labels for the sources (as above)
and the values are weights.
Returns:
(:obj:`musdb.MultiTrack`) populated as specified by inputs.
"""
verify_audio_signal_list_strict(list(sources_dict.values()) + [mixture])
path = mixture.path_to_input_file if mixture.path_to_input_file else "None"
fname = mixture.file_name if mixture.file_name else "None"
track = musdb.audio_classes.MultiTrack(path=path, name=fname, is_wav=True)
track.audio = mixture.audio_data.T
track.rate = mixture.sample_rate
stems = [track.audio]
for name, target_srcs in list(targets_dict.items()):
if name in sources_dict:
stems.append(sources_dict[name].audio_data.T)
track._stems = np.array(stems)
return track
def verify_audio_signal_list_lax(audio_signal_list):
"""
Verifies that an input (:param:`audio_signal_list`) is a list of :ref:`AudioSignal` objects.
If not so, attempts to correct the list (if possible) and returns the corrected list.
Args:
audio_signal_list (list): List of :ref:`AudioSignal` objects
Returns:
audio_signal_list (list): Verified list of :ref:`AudioSignal` objects.
"""
# Lazy load to prevent a circular reference upon initialization
from .audio_signal import AudioSignal
if isinstance(audio_signal_list, AudioSignal):
audio_signal_list = [audio_signal_list]
elif isinstance(audio_signal_list, list):
if not all(isinstance(s, AudioSignal) for s in audio_signal_list):
raise ValueError('All input objects must be AudioSignal objects!')
if not all(s.has_data for s in audio_signal_list):
raise ValueError('All AudioSignal objects in input list must have data!')
else:
raise ValueError(
'audio_signal_list must be a list of or a single AudioSignal objects!')
return audio_signal_list
def verify_audio_signal_list_strict(audio_signal_list):
"""
Verifies that an input (:param:`audio_signal_list`) is a list of :ref:`AudioSignal` objects and
that they all have the same sample rate and same number of channels. If not true,
attempts to correct the list (if possible) and returns the corrected list.
Args:
audio_signal_list (list): List of :ref:`AudioSignal` objects
Returns:
audio_signal_list (list): Verified list of :ref:`AudioSignal` objects, that all have
the same sample rate and number of channels.
"""
audio_signal_list = verify_audio_signal_list_lax(audio_signal_list)
if not all(audio_signal_list[0].sample_rate == s.sample_rate for s in audio_signal_list):
raise ValueError('All input AudioSignal objects must have the same sample rate!')
if not all(audio_signal_list[0].num_channels == s.num_channels for s in audio_signal_list):
raise ValueError('All input AudioSignal objects must have the same number of channels!')
if not all(audio_signal_list[0].signal_length == s.signal_length for s in audio_signal_list):
raise ValueError('All input AudioSignal objects must have the same signal length!')
return audio_signal_list
def visualize_gradient_flow(named_parameters, n_bins=50):
"""
Visualize the gradient flow through the named parameters of a PyTorch model.
Plots the gradients flowing through different layers in the net during training.
Can be used for checking for possible gradient vanishing / exploding problems.
Usage: Plug this function in Trainer class after loss.backwards() as
"visualize_gradient_flow(self.model.named_parameters())" to visualize
the gradient flow
Args:
named_parameters (generator): Generator object yielding name and parameters
for each layer in a PyTorch model.
n_bins (int): Number of bins to use for each histogram. Defaults to 50.
"""
import matplotlib.pyplot as plt
data = []
for n, p in named_parameters:
if p.requires_grad and "bias" not in n:
if p.grad is not None:
_data = p.grad.cpu().data.numpy().flatten()
lower = np.percentile(_data, 10)
upper = np.percentile(_data, 90)
_data = _data[_data >= lower]
_data = _data[_data <= upper]
n = n.split('layers.')[-1]
data.append((n, _data, np.abs(_data).mean()))
_data = [d[1] for d in sorted(data, key=lambda x: x[-1])]
_names = [d[0] for d in sorted(data, key=lambda x: x[-1])]
plt.hist(_data, len(_data) * n_bins, histtype='step', fill=False,
stacked=True, label=_names)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2)
def visualize_spectrogram(audio_signal, ch=0, do_mono=False, x_axis='time',
y_axis='linear', **kwargs):
"""
Wrapper around `librosa.display.specshow` for usage with AudioSignals.
Args:
audio_signal (AudioSignal): AudioSignal to plot
ch (int, optional): Which channel to plot. Defaults to 0.
do_mono (bool, optional): Make the AudioSignal mono. Defaults to False.
x_axis (str, optional): x_axis argument to librosa.display.specshow. Defaults to 'time'.
y_axis (str, optional): y_axis argument to librosa.display.specshow. Defaults to 'linear'.
kwargs: Additional keyword arguments to librosa.display.specshow.
"""
import librosa.display
if do_mono:
audio_signal = audio_signal.to_mono(overwrite=False)
if y_axis == 'mel':
# Monkey patch for https://github.com/librosa/librosa/issues/1240
data = librosa.feature.melspectrogram(audio_signal.get_channel(ch),
sr=audio_signal.sample_rate)
kwargs.update({'fmax': audio_signal.sample_rate / 2.})
else:
data = np.abs(audio_signal.stft())[..., ch]
data = librosa.amplitude_to_db(data, ref=np.max)
librosa.display.specshow(data, x_axis=x_axis, y_axis=y_axis,
sr=audio_signal.sample_rate,
hop_length=audio_signal.stft_params.hop_length,
**kwargs)
def visualize_waveform(audio_signal, ch=0, do_mono=False, x_axis='time', **kwargs):
"""
Wrapper around `librosa.display.waveplot` for usage with AudioSignals.
Args:
audio_signal (AudioSignal): AudioSignal to plot
ch (int, optional): Which channel to plot. Defaults to 0.
do_mono (bool, optional): Make the AudioSignal mono. Defaults to False.
x_axis (str, optional): x_axis argument to librosa.display.waveplot. Defaults to 'time'.
kwargs: Additional keyword arguments to librosa.display.waveplot.
"""
import librosa.display
import matplotlib.pyplot as plt
if do_mono:
audio_signal = audio_signal.to_mono(overwrite=False)
data = np.asfortranarray(audio_signal.audio_data[ch])
librosa.display.waveplot(data, sr=audio_signal.sample_rate, x_axis=x_axis, **kwargs)
plt.ylabel('Amplitude')
def visualize_sources_as_waveform(audio_signals, ch=0, do_mono=False, x_axis='time',
colors=None, alphas=None, show_legend=True, **kwargs):
"""
Visualizes a dictionary or list of sources with overlapping waveforms with transparency.
The labels of each source are either the key, if a dictionary, or the
path to the input audio file, if a list.
Args:
audio_signals (list or dict): List or dictionary of audio signal objects to be
plotted.
ch (int, optional): Which channel to plot. Defaults to 0.
do_mono (bool, optional): Make each AudioSignal mono. Defaults to False.
x_axis (str, optional): x_axis argument to librosa.display.waveplot. Defaults to 'time'.
colors (list, optional): Sequence of colors to use for each signal.
Defaults to None, which uses the default matplotlib color cycle.
alphas (list, optional): Sequence of alpha transparency to use for each signal.
Defaults to None.
kwargs: Additional keyword arguments to librosa.display.waveplot.
"""
import matplotlib.pyplot as plt
if isinstance(audio_signals, list):
audio_signals = {
f'{i}:{a.path_to_input_file}': a
for i, a in enumerate(audio_signals)
}
sorted_keys = sorted(
audio_signals.keys(),
key=lambda k: audio_signals[k].rms().mean(),
reverse=True
)
alphas = (
np.linspace(0.25, .75, len(audio_signals))
if alphas is None else alphas
)
colors = (
plt.rcParams['axes.prop_cycle'].by_key()['color']
if colors is None else colors
)
for i, key in enumerate(sorted_keys):
val = audio_signals[key]
color = colors[i % len(audio_signals)]
visualize_waveform(val, ch=ch, do_mono=do_mono, x_axis=x_axis,
alpha=alphas[i % len(audio_signals)],
label=key, color=color)
if show_legend:
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2)
def visualize_sources_as_masks(audio_signals, ch=0, do_mono=False, x_axis='time',
y_axis='linear', db_cutoff=-60, colors=None, alphas=None,
alpha_amount=1.0, show_legend=True, **kwargs):
"""
Visualizes a dictionary or list of sources with overlapping waveforms with transparency.
The labels of each source are either the key, if a dictionary, or the
path to the input audio file, if a list.
Args:
audio_signals (list or dict): List or dictionary of audio signal objects to be
plotted.
ch (int, optional): Which channel to plot. Defaults to 0.
do_mono (bool, optional): Make each AudioSignal mono. Defaults to False.
x_axis (str, optional): x_axis argument to librosa.display.waveplot. Defaults to 'time'.
colors (list, optional): Sequence of colors to use for each signal.
Defaults to None, which uses the default matplotlib color cycle.
alphas (list, optional): Sequence of alpha transparency to use for each signal.
Defaults to None.
kwargs: Additional keyword arguments to librosa.display.specshow.
"""
import matplotlib.pyplot as plt
import matplotlib
import librosa.display
from .. import datasets
if isinstance(audio_signals, list):
audio_signals = {
f'{i}:{a.path_to_input_file}': a
for i, a in enumerate(audio_signals)
}
if do_mono:
for key in audio_signals:
audio_signals[key] = audio_signals[key].to_mono()
sorted_keys = sorted(
audio_signals.keys(),
key=lambda k: audio_signals[k].rms().mean(),
reverse=True
)
source_names = sorted(list(audio_signals.keys()))
mix = sum(audio_signals.values())
data = {
'mix': mix,
'sources': audio_signals
}
data = datasets.transforms.PhaseSensitiveSpectrumApproximation()(data)
colors = (
plt.rcParams['axes.prop_cycle'].by_key()['color']
if colors is None else colors
)
# construct each image with alpha values
masks = data['source_magnitudes'] / (np.maximum(
data['mix_magnitude'][..., None], data['source_magnitudes'])
+ constants.EPSILON
)
legend_elements = []
silence_mask = librosa.amplitude_to_db(np.abs(mix.stft()), ref=np.max) > db_cutoff
masks *= silence_mask[..., None]
y_coords = librosa.display.__mesh_coords(y_axis, None, masks.shape[0],
sr=mix.sample_rate, hop_length=mix.stft_params.hop_length)
x_coords = librosa.display.__mesh_coords(x_axis, None, masks.shape[1],
sr=mix.sample_rate, hop_length=mix.stft_params.hop_length)
extent = [x_coords.min(), x_coords.max(), y_coords.min(), y_coords.max()]
for j, key in enumerate(sorted_keys):
i = source_names.index(key)
mask = masks[..., ch, i]
color = colors[j % len(colors)]
cmap = matplotlib.colors.LinearSegmentedColormap.from_list(
'custom', ['white', color])
image = cmap(mask)
image[:, :, -1] = mask ** alpha_amount
plt.imshow(image, origin='lower', aspect='auto',
interpolation='none', extent=extent)
legend_elements.append(
matplotlib.patches.Patch(facecolor=color, label=key))
axes = librosa.display.__check_axes(None)
axes.set_xlim(x_coords.min(), x_coords.max())
axes.set_ylim(y_coords.min(), y_coords.max())
# Set up axis scaling
librosa.display.__scale_axes(axes, x_axis, 'x')
librosa.display.__scale_axes(axes, y_axis, 'y')
# Construct tickers and locators
librosa.display.__decorate_axis(axes.xaxis, x_axis)
librosa.display.__decorate_axis(axes.yaxis, y_axis)
if show_legend:
plt.legend(handles=legend_elements,
bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2)
@contextmanager
def _close_temp_files(tmpfiles):
"""
Utility function for creating a context and closing all temporary files
once the context is exited. For correct functionality, all temporary file
handles created inside the context must be appended to the ```tmpfiles```
list.
This function is taken wholesale from Scaper.
Args:
tmpfiles (list): List of temporary file handles
"""
def _close():
for t in tmpfiles:
try:
t.close()
os.unlink(t.name)
except:
pass
try:
yield
except:
_close()
raise
_close()
| 23,707 | 36.27673 | 99 | py |
nussl | nussl-master/nussl/core/efz_utils.py | """
The *nussl* External File Zoo (EFZ) is a server that houses all files that are too large to
bundle with *nussl* when distributing it through ``pip`` or Github. These types of files include
audio examples, benchmark files for tests, and trained neural network models.
*nussl* has built-in utilities for accessing the EFZ through its API. Here, it is possible to
see what files are available on the EFZ and download desired files. The EFZ utilities allow
for such functionality.
"""
import warnings
import json
import os
import sys
import hashlib
from six.moves.urllib_parse import urljoin
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen, Request
from six.moves.urllib.request import urlretrieve
from . import constants
def get_available_audio_files():
"""
Returns a list of dicts containing metadata of the available audio files on the nussl External
File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
Each entry in the list is in the following format:
.. code-block:: python
{
u'file_length_seconds': 5.00390022675737,
u'visible': True,
u'file_name': u'K0140.wav',
u'date_modified': u'2018-06-01',
u'file_hash': u'f0d8d3c8d199d3790b0e42d1e5df50a6801f928d10f533149ed0babe61b5d7b5',
u'file_size_bytes': 441388,
u'file_description': u'Acoustic piano playing middle C.',
u'audio_attributes': u'piano, middle C',
u'file_size': u'431.0KiB',
u'date_added': u'2018-06-01'
}
See Also:
* :func:`print_available_audio_files`, prints a list of the audio files to the console.
* :func:`download_audio_file` to download an audio file from the EFZ.
Returns:
(list): A list of dicts containing metadata of the available audio files on the nussl
External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
"""
# _download_all_metadata() will throw its own errors, so no try block around it
return _download_all_metadata(constants.NUSSL_EFZ_AUDIO_METADATA_URL)
def print_available_audio_files():
"""
Prints a message to the console that shows all of the available audio files that are on the
nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
See Also:
* :func:`get_available_audio_files` to get this same data from the EFZ server as a list.
* :func:`download_audio_file` to download an audio file from the EFZ.
Example:
>>> import nussl
>>> nussl.efz_utils.print_available_audio_files()
File Name Duration (sec) Size Description
dev1_female3_inst_mix.wav 10.0 1.7MiB Instantaneous mixture of three female speakers talking in a stereo field.
dev1_female3_synthconv_130ms_5cm_mix.wav 10.0 1.7MiB Three female speakers talking in a stereo field, with 130ms of inter-channel delay.
K0140.wav 5.0 431.0KiB Acoustic piano playing middle C.
K0149.wav 5.0 430.0KiB Acoustic piano playing the A above middle C. (A440)
To download one of these files insert the file name as the first parameter to
:func:`download_audio_file`, like so:
>>> nussl.efz_utils.download_audio_file('K0140.wav')
"""
file_metadata = get_available_audio_files()
print(f'{"File Name":40} {"Duration (sec)":15} {"Size":10} {"Description":50}')
for info in file_metadata:
print(
f'{info["file_name"]:40} {info["file_length_seconds"]:15} {info["file_size"]:10}'
f' {info["file_description"]:50}'
)
print('To download one of these files insert the file name '
'as the first parameter to nussl.download_audio_file(), like so: \n'
' >>> nussl.efz_utils.download_audio_file(\'K0140.wav\')')
def get_available_trained_models():
"""
Returns a list of dicts containing metadata of the available trained models on the nussl
External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
Each entry in the list is in the following format:
.. code-block:: python
{
u'for_class': u'DeepClustering',
u'visible': True,
u'file_name': u'deep_clustering_vocals_44k_long.model',
u'date_modified': u'2018-06-01',
u'file_hash': u'e09034c2cb43a293ece0b121f113b8e4e1c5a247331c71f40cb9ca38227ccc2c',
u'file_size_bytes': 94543355,
u'file_description': u'Deep clustering for vocal separation trained on augmented DSD100.',
u'file_size': u'90.2MiB',
u'date_added': u'2018-06-01'
}
Notes:
Most of the entries in the dictionary are self-explanatory, but note the ``for_class``
entry. The ``for_class`` entry specifies which `nussl` separation class the given model will
work with. Usually, `nussl` separation classes that require a model will default so
retrieving a model on the EFZ server (if not already found on the user's machine), but
sometimes it is desirable to use a model other than the default one provided. In this case,
the ``for_class`` entry lets the user know which class it is valid for use with.
Additionally, trying to load a model into a class that it is not explicitly labeled for that
class will raise an exception. Just don't do it, ok?
See Also:
* :func:`print_available_trained_models`, prints a list of the trained models to
the console.
* :func:`download_trained_model` to download a trained model from the EFZ.
Returns:
(list): A list of dicts containing metadata of the available trained models on the nussl
External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
"""
return _download_all_metadata(constants.NUSSL_EFZ_MODEL_METADATA_URL)
def print_available_trained_models():
"""
Prints a message to the console that shows all of the available trained models that are on the
nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
Notes:
Most of the entries in the dictionary are self-explanatory, but note the ``for_class``
entry. The ``for_class`` entry specifies which `nussl` separation class the given model will
work with. Usually, `nussl` separation classes that require a model will default so
retrieving a model on the EFZ server (if not already found on the user's machine), but
sometimes it is desirable to use a model other than the default one provided. In this case,
the ``for_class`` entry lets the user know which class it is valid for use with.
Additionally, trying to load a model into a class that it is not explicitly labeled for that
class will raise an exception. Just don't do it, ok?
See Also:
* :func:`get_available_trained_models` to get this same data from the EFZ server as a list.
* :func:`download_trained_model` to download a trained model from the EFZ.
Example:
>>> import nussl
>>> nussl.efz_utils.print_available_trained_models()
File Name For Class Size Description
deep_clustering_model.model DeepClustering 48.1MiB example Deep Clustering model
deep_clustering_vocal_44k_long.model DeepClustering 90.2MiB trained DC model for vocal extraction
To download one of these files insert the file name as the first parameter to download_trained_model(), like so:
>>> nussl.efz_utils.download_trained_model('deep_clustering_model.h5')
"""
file_metadata = get_available_trained_models()
print(f'{"File Name":40} {"For Class":15} {"Size":10} {"Description":50}')
for info in file_metadata:
print(
f'{info["file_name"]:40} {info["for_class"]:15} {info["file_size"]:10}'
f' {info["file_description"]:50}'
)
print('To download one of these files insert the file name '
'as the first parameter to nussl.download_trained_model, like so: \n'
' >>> nussl.efz_utils.download_trained_model(\'deep_clustering_model.h5\')')
def get_available_benchmark_files():
"""
Returns a list of dicts containing metadata of the available benchmark files for tests on the
nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
Each entry in the list is in the following format:
.. code-block:: python
{
u'for_class': u'DuetUnitTests',
u'visible': True, u'file_name':
u'benchmark_atn_bins.npy',
u'date_modified': u'2018-06-19',
u'file_hash': u'cf7fef6f4ea9af3dbde8b9880602eeaf72507b6c78f04097c5e79d34404a8a1f',
u'file_size_bytes': 488,
u'file_description': u'Attenuation bins numpy array for DUET benchmark test.',
u'file_size': u'488.0B',
u'date_added': u'2018-06-19'
}
Notes:
Most of the entries in the dictionary are self-explanatory, but note the `for_class`
entry. The `for_class` entry specifies which `nussl` benchmark class will load the
corresponding benchmark file. Make sure these match exactly when writing tests!
See Also:
* :func:`print_available_benchmark_files`, prints a list of the benchmark files to the
console.
* :func:`download_benchmark_file` to download an benchmark file from the EFZ.
Returns:
(list): A list of dicts containing metadata of the available audio files on the nussl
External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
"""
return _download_all_metadata(constants.NUSSL_EFZ_BENCHMARK_METADATA_URL)
def print_available_benchmark_files():
"""
Prints a message to the console that shows all of the available benchmark files that are on the
nussl External File Zoo (EFZ) server (http://nussl.ci.northwestern.edu/).
Example:
>>> import nussl
>>> nussl.efz_utils.print_available_benchmark_files()
File Name For Class Size Description
mix3_matlab_repet_foreground.mat TestRepet 6.4MiB Foreground matrix for Repet class benchmark test.
benchmark_atn_bins.npy DuetUnitTests 488.0B Attenuation bins numpy array for DUET benchmark test.
benchmark_sym_atn.npy DuetUnitTests 3.4MiB Symmetric attenuation histogram for the DUET benchmark test.
benchmark_wmat.npy DuetUnitTests 3.4MiB Frequency matrix for the DUET benchmark test.
To download one of these files insert the file name as the first parameter to nussl.download_benchmark_file, like so:
>>> nussl.efz_utils.download_benchmark_file('example.npy')
Notes:
Most of the entries in the printed list are self-explanatory, but note the ``for_class``
entry. The ``for_class`` entry specifies which `nussl` benchmark class will load the
corresponding benchmark file. Make sure these match exactly when writing tests!
See Also:
* :func:`get_available_benchmark_files`, prints a list of the benchmark files to the
console.
* :func:`download_benchmark_file` to download an benchmark file from the EFZ.
"""
file_metadata = get_available_benchmark_files()
print(f'{"File Name":40} {"For Class":15} {"Size":10} {"Description":50}')
for info in file_metadata:
print(
f'{info["file_name"]:40} {info["for_class"]:15} {info["file_size"]:10}'
f' {info["file_description"]:50}'
)
print('To download one of these files insert the file name '
'as the first parameter to nussl.download_benchmark_file, like so: \n'
' >>> nussl.efz_utils.download_benchmark_file(\'example.npy\')')
def _download_all_metadata(url):
"""
Downloads the json file that contains all of the metadata for a specific file type (read:
audio files, benchmark files, or trained models) that is on the EFZ server. This is retrieved
from one of following three URLs (which are stored in nussl.constants):
NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or NUSSL_EFZ_MODEL_METADATA_URL.
Args:
url (str): URL for the EFZ server that has metadata. One of these three:
NUSSL_EFZ_AUDIO_METADATA_URL, NUSSL_EFZ_BENCHMARK_METADATA_URL, or
NUSSL_EFZ_MODEL_METADATA_URL.
Returns:
(list): List of dicts with metadata for the desired file type.
"""
request = Request(url)
# Make sure to get the newest data
request.add_header('Pragma', 'no-cache')
request.add_header('Cache-Control', 'max-age=0')
try:
return json.loads(urlopen(request).read())
except:
raise NoConnectivityError("Can't connect to internet")
def _download_metadata_for_file(file_name, file_type):
"""
Downloads the metadata entry for a specific file (:param:`file_name`) on the EFZ server.
Args:
file_name (str): File name as specified on the EFZ server.
file_type (str): 'Type' of file, either 'audio', 'model', or 'benchmark'.
Returns:
(dict) Metadata entry for the specified file, or ``None`` if it could not be located.
"""
metadata_urls = {
'audio': constants.NUSSL_EFZ_AUDIO_METADATA_URL,
'benchmark': constants.NUSSL_EFZ_BENCHMARK_METADATA_URL,
'model': constants.NUSSL_EFZ_MODEL_METADATA_URL,
}
if file_type in metadata_urls:
metadata_url = metadata_urls[file_type]
else:
# wrong file type, return
raise MetadataError(f'Cannot find metadata of type {file_type}.')
metadata = _download_all_metadata(metadata_url)
for file_metadata in metadata:
if file_metadata['file_name'] == file_name:
return file_metadata
raise MetadataError(
f'No matching metadata for file {file_name}'
f' at url {constants.NUSSL_EFZ_AUDIO_METADATA_URL}!'
)
def download_audio_file(audio_file_name, local_folder=None, verbose=True):
"""
Downloads the specified audio file from the `nussl` External File Zoo (EFZ) server. The
downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is
not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in
`tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two
aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ
server metadata, then the file will not be downloaded.
Args:
audio_file_name: (str) Name of the audio file to attempt to download.
local_folder: (str) Path to local folder in which to download the file.
If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in
`/tmp/.nussl`.
verbose (bool): If ``True`` prints the status of the download to the console.
Returns:
(String) Full path to the requested file (whether downloaded or not).
Example:
>>> import nussl
>>> piano_path = nussl.efz_utils.download_audio_file('K0140.wav')
>>> piano_signal = nussl.AudioSignal(piano_path)
"""
file_metadata = _download_metadata_for_file(audio_file_name, 'audio')
file_hash = file_metadata['file_hash']
file_url = urljoin(constants.NUSSL_EFZ_AUDIO_URL, audio_file_name)
result = _download_file(audio_file_name, file_url, local_folder, 'audio',
file_hash=file_hash, verbose=verbose)
return result
def download_trained_model(model_name, local_folder=None, verbose=True):
"""
Downloads the specified trained model from the `nussl` External File Zoo (EFZ) server. The
downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is
not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in
`tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two
aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ
server metadata, then the file will not be downloaded.
Args:
model_name: (str) Name of the trained model to attempt to download.
local_folder: (str) Path to local folder in which to download the file.
If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in
`/tmp/.nussl`.
verbose (bool): If ``True`` prints the status of the download to the console.
Returns:
(String) Full path to the requested file (whether downloaded or not).
Example:
>>> import nussl
>>> model_path = nussl.efz_utils.download_trained_model('deep_clustering_model.h5')
>>> signal = nussl.AudioSignal()
>>> piano_signal = nussl.DeepClustering(signal, model_path=model_path)
"""
file_metadata = _download_metadata_for_file(model_name, 'model')
file_hash = file_metadata['file_hash']
file_url = urljoin(constants.NUSSL_EFZ_MODELS_URL, model_name)
result = _download_file(model_name, file_url, local_folder, 'models',
file_hash=file_hash, verbose=verbose)
return result
def download_benchmark_file(benchmark_name, local_folder=None, verbose=True):
"""
Downloads the specified benchmark file from the `nussl` External File Zoo (EFZ) server. The
downloaded file is stored in :param:`local_folder` if a folder is provided. If a folder is
not provided, `nussl` attempts to save the downloaded file in `~/.nussl/` (expanded) or in
`/tmp/.nussl`. If the requested file is already in :param:`local_folder` (or one of the two
aforementioned directories) and the calculated hash matches the precomputed hash from the EFZ
server metadata, then the file will not be downloaded.
Args:
benchmark_name: (str) Name of the trained model to attempt to download.
local_folder: (str) Path to local folder in which to download the file.
If no folder is provided, `nussl` will store the file in `~/.nussl/` (expanded) or in
`tmp/.nussl`.
verbose (bool): If ``True`` prints the status of the download to the console.
Returns:
(String) Full path to the requested file (whether downloaded or not).
Example:
>>> import nussl
>>> import numpy as np
>>> stm_atn_path = nussl.efz_utils.download_benchmark_file('benchmark_sym_atn.npy')
>>> sym_atm = np.load(stm_atn_path)
"""
file_metadata = _download_metadata_for_file(benchmark_name, 'benchmark')
file_hash = file_metadata['file_hash']
file_url = urljoin(constants.NUSSL_EFZ_BENCHMARKS_URL, benchmark_name)
result = _download_file(benchmark_name, file_url, local_folder, 'benchmarks',
file_hash=file_hash, verbose=verbose)
return result
def _download_file(file_name, url, local_folder, cache_subdir,
file_hash=None, cache_dir=None, verbose=True):
"""
Downloads the specified file from the
Heavily inspired by and lovingly adapted from keras' `get_file` function:
https://github.com/fchollet/keras/blob/afbd5d34a3bdbb0916d558f96af197af1e92ce70/keras/utils/data_utils.py#L109
Args:
file_name: (String) name of the file located on the server
url: (String) url of the file
local_folder: (String) alternate folder in which to download the file
cache_subdir: (String) subdirectory of folder in which to download flie
file_hash: (String) expected hash of downloaded file
cache_dir:
Returns:
(String) local path to downloaded file
"""
if local_folder not in [None, '']:
# local folder provided, let's create it if it doesn't exist and use it as datadir
os.makedirs(os.path.expanduser(local_folder), exist_ok=True)
datadir = os.path.expanduser(local_folder)
else:
if cache_dir is None:
cache_dir = os.path.expanduser(os.path.join('~', '.nussl'))
datadir_base = os.path.expanduser(cache_dir)
datadir = os.path.join(datadir_base, cache_subdir)
os.makedirs(datadir, exist_ok=True)
file_path = os.path.join(datadir, file_name)
download = False
if os.path.exists(file_path):
if file_hash is not None:
# compare the provided hash with the hash of the file currently at file_path
current_hash = _hash_file(file_path)
# if the hashes are equal, we already have the file we need, so don't download
if file_hash != current_hash:
if verbose:
warnings.warn(
f'Hash for {file_path} does not match known hash. '
f' Downloading {file_name} from servers...'
)
download = True
elif verbose:
print(f'Matching file found at {file_path}, skipping download.')
else:
download = True
else:
download = True
if download:
if verbose:
print(f'Saving file at {file_path}\nDownloading {file_name} from {url}')
def _dl_progress(count, block_size, total_size):
percent = int(count * block_size * 100 / total_size)
if percent <= 100:
sys.stdout.write(f'\r{file_name}...{percent}%')
sys.stdout.flush()
try:
try:
reporthook = _dl_progress if verbose else None
urlretrieve(url, file_path, reporthook)
if verbose: print() # print a new line after the progress is done.
except HTTPError as e:
raise FailedDownloadError(f'URL fetch failure on {url}: {e.code} -- {e.msg}')
except URLError as e:
raise FailedDownloadError(f'URL fetch failure on {url}: {e.errno} -- {e.reason}')
except (Exception, KeyboardInterrupt) as e:
if os.path.exists(file_path):
os.remove(file_path)
raise e
# check hash of received file to see if it matches the provided hash
if file_hash is not None:
download_hash = _hash_file(file_path)
if file_hash != download_hash:
# the downloaded file is not what it should be. Get rid of it.
os.remove(file_path)
raise MismatchedHashError(
f'Deleted downloaded file ({file_path}) because of a hash mismatch.'
)
return file_path
else:
return file_path
def _hash_directory(directory, ext=None):
"""
Calculates the hash of every child file in the given directory using python's built-in SHA256
function (using `os.walk()`, which also searches subdirectories recursively). If :param:`ext`
is specified, this will only look at files with extension provided.
This function is used to verify the integrity of data sets for use with nussl. Pretty much
just makes sure that when we loop through/look at a directory, we understand the structure
because the organization of the data set directories for different data sets are all unique
and thus need to be hard coded by each generator function (below). If we get a hash mismatch
we can throw an error easily.
Args:
directory (str): Directory within which file hashes get calculated. Searches recursively.
ext (str): If provided, this function will only calculate the hash on files with the given
extension.
Returns:
(str): String containing only hexadecimal digits of the has of the
contents of the given directory.
"""
hash_list = []
for path, sub_dirs, files in os.walk(directory):
if ext is None:
hash_list.extend([_hash_file(os.path.join(path, f)) for f in files
if os.path.isfile(os.path.join(path, f))])
else:
hash_list.extend([_hash_file(os.path.join(path, f)) for f in files
if os.path.isfile(os.path.join(path, f))
if os.path.splitext(f)[1] == ext])
hasher = hashlib.sha256()
for hash_val in sorted(hash_list): # Sort this list so we're platform agnostic
hasher.update(hash_val.encode('utf-8'))
return hasher.hexdigest()
def _hash_file(file_path, chunk_size=65535):
"""
Args:
file_path: System path to the file to be hashed
chunk_size: size of chunks
Returns:
file_hash: the SHA256 hashed string in hex
"""
hasher = hashlib.sha256()
with open(file_path, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
########################################
# Error Classes
########################################
class NoConnectivityError(Exception):
"""
Exception class for lack of internet connection.
"""
pass
class FailedDownloadError(Exception):
"""
Exception class for failed file downloads.
"""
pass
class MismatchedHashError(Exception):
"""
Exception class for when a computed hash function does match a pre-computed hash.
"""
pass
class MetadataError(Exception):
"""
Exception class for errors with metadata.
"""
pass
| 25,938 | 40.30414 | 159 | py |
nussl | nussl-master/nussl/separation/base/deep_mixin.py | import torch
import yaml
import json
from ...ml import SeparationModel
from ...datasets import transforms as tfm
OMITTED_TRANSFORMS = (
tfm.GetExcerpt,
tfm.MagnitudeWeights,
tfm.SumSources,
tfm.Cache,
tfm.IndexSources,
)
class DeepMixin:
def load_model(self, model_path, device='cpu'):
"""
Loads the model at specified path `model_path`. Uses GPU if
available.
Args:
model_path (str): path to model saved as SeparationModel.
device (str or torch.Device): loads model on CPU or GPU. Defaults to
'cuda'.
Returns:
model (SeparationModel): Loaded model, nn.Module
metadata (dict): metadata associated with model, used for making
the input data into the model.
"""
device = device if torch.cuda.is_available() else 'cpu'
self.device = device
model, metadata = SeparationModel.load(model_path)
model = model.to(device).eval()
self.model = model
self.config = metadata['config']
self.metadata.update(metadata)
self.transform = self._get_transforms(metadata['train_dataset']['transforms'])
@staticmethod
def _get_transforms(loaded_tfm):
"""
Look through the loaded transforms and omits any that are in
`OMITTED_TRANSFORMS`.
Args:
loaded_tfm (Transform): A Transform from `nussl.datasets.transforms`.
Returns:
Transform: If the transform was a Compose, this returns a new Compose that
omits the transforms listed in `OMITTED_TRANSFORMS`.
"""
if isinstance(loaded_tfm, tfm.Compose):
transform = []
for _tfm in loaded_tfm.transforms:
if not isinstance(_tfm, OMITTED_TRANSFORMS):
transform.append(_tfm)
transform = tfm.Compose(transform)
else:
if not isinstance(loaded_tfm, OMITTED_TRANSFORMS):
transform = loaded_tfm
else:
transform = None
return transform
def modify_input_data(self, data, **kwargs):
"""Add or modify input data to dictionary before passing
it to the model. By default this just updates the data
dictionary with what is needed, but can be overridden
by classes inheriting this method to modify the data
dictionary as needed.
Parameters
----------
data : dict,
The data dictionary before this function is called.
kwargs : keyword arguments, optional
Data dictionary after this function is called, by default None
"""
data.update(kwargs)
return data
def _get_input_data_for_model(self, **kwargs):
"""
Sets up the audio signal with the appropriate STFT parameters and runs it
through the transform found in the metadata.
Args:
kwargs: Any additional data that will
be merged with the input dictionary.
Returns:
dict: Data dictionary to pass into the model.
"""
if self.metadata['sample_rate'] is not None:
if self.audio_signal.sample_rate != self.metadata['sample_rate']:
self.audio_signal.resample(self.metadata['sample_rate'])
self.audio_signal.stft_params = self.metadata['stft_params']
self.audio_signal.stft()
data = {'mix': self.audio_signal}
data = self.transform(data)
for key in data:
if torch.is_tensor(data[key]):
data[key] = data[key].unsqueeze(0).to(self.device).float()
if self.metadata['num_channels'] == 1:
# then each channel is processed indep
data[key] = data[key].transpose(0, self.channel_dim)
data = self.modify_input_data(data, **kwargs)
self.input_data = data
return self.input_data
def get_metadata(self, to_str=False, **kwargs):
"""
Gets the metadata associated with this model.
Args:
to_str (bool): If True, will return a string, else will return dict.
for_upload (bool): If True, will scrub metadata for uploading to EFZ.
Returns:
(str) or (dict) containing metadata.
"""
for_upload = kwargs.get('for_upload', False)
truncate_loss = kwargs.get('truncate_loss', False)
metadata = getattr(self, 'metadata', None)
if metadata is None:
raise ValueError('Could not find associated metadata.')
if for_upload:
# remove paths
keys = ['train_dataset', 'val_dataset']
for k in keys:
if k in metadata:
metadata[k].pop('folder')
if for_upload or truncate_loss:
if 'trainer.state.epoch_history' in metadata:
loss_history = metadata.pop('trainer.state.epoch_history')
metadata['final_loss'] = {k: float(v[-1]) for k, v in loss_history.items()}
if isinstance(metadata['config'], str):
metadata['config'] = json.loads(metadata['config'])
metadata['separation_class'] = type(self).__name__
metadata['model_name'] = metadata['config']['name']
if to_str:
return yaml.dump(metadata, indent=4)
else:
return metadata
| 5,490 | 33.753165 | 91 | py |
nussl | nussl-master/nussl/separation/spatial/projet.py | import copy
import numpy as np
import torch
from .. import SeparationBase, SeparationException
from ... import AudioSignal
class Projet(SeparationBase):
"""
Implements the PROJET algorithm for spatial audio separation using projections.
This implementation uses PyTorch to speed up computation considerably. PROJET
does the following steps:
1. Project the complex stereo STFT onto multiple angles and delay via
projection and delay matrix transformations.
2. Initialize the parameters of the system to "remix" these projections along with
PSDs of the sources such that they try to reconstruct the original stereo mixture.
3. Find the optimal parameters via multiplicative update rules for P and for Q.
4. Use the discovered parameters to isolate the sources via spatial cues.
This implementation considers BOTH panning and delays when isolating sources.
PROJET is not a masking based method, it estimates the sources directly by
projecting the complex STFT.
Args:
input_audio_signal (AudioSignal): Audio signal to separate.
num_sources (int): Number of source to separate.
estimates (list of AudioSignal): initial estimates for the separated sources
if available. These will be used to initialize the update algorithm. So
one could (for example), run FT2D on a signal and then refine the estimates
using PROJET. Defaults to None (randomly initialize P).
num_iterations (int, optional): Number of iterations to do for the update
rules for P and Q. Defaults to 50.
maximum_delay_in_samples (int, optional): Maximum delay in samples that you are
willing to consider in the projection matrices. Defaults to 20.
location_set_panning (int, optional): How many locations in panning you are
willing to consider. Defaults to 30.
location_set_delay (int, optional): How many delays you are willing to
consider. Defaults to 17.
projection_set_panning (int, optional): How many projections you are willing
use in panning-space. Defaults to 10.
projection_set_delay (int, optional): How many delays you are willing to project
the mixutre onto in panning-space. Defaults to 9.
beta (int, optional): Beta in beta divergence. See Table 1 in [1]. Defaults to 1.
alpha (int, optional): Power to raise each power spectral density estimate of each
source to. Defaults to 1.
device (str, optional): Device to use when performing update rules. 'cuda' will
be fastest, if available. Defaults to 'cpu'.
References:
[1] Fitzgerald, Derry, Antoine Liutkus, and Roland Badeau.
"Projection-based demixing of spatial audio."
IEEE/ACM Transactions on Audio, Speech, and Language
Processing 24.9 (2016): 1560-1572.
[2] Fitzgerald, Derry, Antoine Liutkus, and Roland Badeau.
"Projet—spatial audio separation using projections." 2016 IEEE International
Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2016.
"""
def __init__(self, input_audio_signal, num_sources, estimates=None, num_iterations=50,
maximum_delay_in_samples=20, location_set_panning=30, location_set_delay=17,
projection_set_panning=10, projection_set_delay=9, beta=1, alpha=1, device='cpu'):
self.num_sources = num_sources
self.alpha = alpha
self.beta = beta
self.num_iterations = num_iterations
self.eps = 1e-8
self.device = device
self.projection_set_panning = projection_set_panning
self.projection_set_delay = projection_set_delay
self.location_set_panning = location_set_panning
self.location_set_delay = location_set_delay
self.maximum_delay_in_samples = maximum_delay_in_samples
self.projection_set = None
self.inverse_projection_set = None
self.reconstructions = None
super().__init__(input_audio_signal=input_audio_signal)
if estimates is None:
self.P = None
else:
if len(estimates) != self.num_sources:
raise SeparationException(
"Number of estimates must be equal to num_sources!")
psds = []
for e in estimates:
_e = copy.deepcopy(e)
_e.to_mono()
_e.stft_params = self.audio_signal.stft_params
psds.append(np.abs(_e.stft()))
self.P = np.stack(psds, axis=-1)
def _preprocess_audio_signal(self):
self.stft = self.audio_signal.stft()
F, T, I = self.stft.shape
self.device = 'cpu' if not torch.cuda.is_available() else self.device
pannings = np.linspace(0, np.pi / 2, self.location_set_panning)
delays = np.linspace(
-self.maximum_delay_in_samples,
self.maximum_delay_in_samples,
self.location_set_delay
)
self.location_set = self.create_panning_delay_set(pannings, delays, F, I)
pannings = np.linspace(-np.pi / 2, 0, self.projection_set_panning)
delays = np.linspace(
-self.maximum_delay_in_samples,
self.maximum_delay_in_samples,
self.projection_set_delay
)
self.projection_set = self.create_panning_delay_set(pannings, delays, F, I)
self.inverse_projection_set = np.linalg.pinv(self.projection_set)
self.V, self.complex_projections = self.create_projections()
self.K = self.create_k_matrix()
@staticmethod
def create_panning_delay_set(pannings, delays, F, I):
panning_delay_set = np.zeros(
(F, len(pannings), len(delays), I), dtype='complex'
)
for i, delay in enumerate(delays):
phase_change = np.exp(
-1j * 2 * np.pi * np.linspace(0, 0.5, F) * delay
)
panning_delay_set[:, :, i, 0] = np.outer(
np.ones(F), np.cos(pannings)
)
panning_delay_set[:, :, i, 1] = np.outer(
phase_change, np.sin(pannings)
)
return panning_delay_set
def create_projections(self):
F = self.stft.shape[0]
inner_dim = self.projection_set.shape[-1]
V = (
self.projection_set.reshape(F, -1, inner_dim) @
self.stft.reshape(F, -1, inner_dim).transpose(0, 2, 1)
)
V = V.reshape((F,) + self.projection_set.shape[1:-1] + (-1,))
return self._convert_to_tensor(np.abs(V) ** self.alpha), V
def _convert_to_tensor(self, data):
tensor = torch.from_numpy(data)
return tensor.to(self.device)
@staticmethod
def _convert_to_numpy(data):
array = data.cpu().data.numpy()
return array
def create_k_matrix(self):
F = self.stft.shape[0]
inner_dim = self.location_set.shape[-1]
K = (
self.projection_set.reshape(F, -1, inner_dim) @
self.location_set.reshape(F, -1, inner_dim).transpose(0, 2, 1)
)
K = np.abs(K.reshape(
(F,) + self.projection_set.shape[1:-1] + self.location_set.shape[1:-1])) ** self.alpha
return self._convert_to_tensor(K)
def initialize_parameters(self):
F, T, I = self.stft.shape
P = np.abs(np.random.randn(F, T, self.num_sources))
Q = np.abs(np.random.randn(*self.location_set.shape[1:3], self.num_sources))
return self._convert_to_tensor(P), self._convert_to_tensor(Q)
def _update_sigma(self, P, Q, KQ):
F = self.stft.shape[0]
inner_dim = KQ.shape[-1]
sigma = (
KQ.reshape(F, -1, inner_dim) @
P.transpose(2, 1)
)
sigma = sigma.reshape(
P.shape[0], KQ.shape[1], KQ.shape[2], P.shape[1])
return sigma
def _update_P(self, P, sigma, KQ):
F = self.stft.shape[0]
temps = [
(sigma ** (self.beta - 2)) * self.V,
sigma ** (self.beta - 1)
]
inner_dim = KQ.shape[1] * KQ.shape[2]
P_num_denom = [
self.eps + (
KQ.reshape(F, inner_dim, -1).transpose(2, 1) @
temp.reshape(F, inner_dim, -1)
)
for temp in temps
]
P_update = (P_num_denom[0] / P_num_denom[1]).transpose(2, 1)
return P * P_update
def _update_Q(self, P, sigma, Q):
F = self.stft.shape[0]
temps = [
(sigma ** (self.beta - 2)) * self.V,
sigma ** (self.beta - 1)
]
inner_dim = self.K.shape[1] * self.K.shape[2]
Q_num_denom = [
self.K.reshape(F, inner_dim, -1).transpose(2, 1) @
(temp.reshape(F, -1, P.shape[1]) @ P)
for temp in temps
]
Q_num_denom = [
x.reshape(F, *Q.shape).sum(dim=0) for x in Q_num_denom
]
Q_update = Q_num_denom[0] / Q_num_denom[1]
return Q * Q_update
def _get_kq(self, Q):
F = self.stft.shape[0]
# get KQ
inner_dim = Q.shape[0] * Q.shape[1]
KQ = (
self.K.reshape(-1, inner_dim) @ Q.reshape(inner_dim, -1)
)
KQ = KQ.reshape(
F, self.K.shape[1], self.K.shape[2], Q.shape[-1])
return KQ
def _update(self, P, Q):
KQ = self._get_kq(Q)
sigma = self._update_sigma(P, Q, KQ)
P = self._update_P(P, sigma, KQ)
sigma = self._update_sigma(P, Q, KQ)
Q = self._update_Q(P, sigma, Q)
return P, Q
def run(self):
P, Q = self.initialize_parameters()
for i in range(self.num_iterations):
P, Q = self._update(P, Q)
KQ = self._get_kq(Q)
KQ = KQ.reshape(KQ.shape[0], -1, 1, KQ.shape[-1])
sigma_j = KQ / P[:, None, ...]
sigma_j = sigma_j / (self.eps + sigma_j.sum(dim=-1)[..., None])
sigma_j = self._convert_to_numpy(sigma_j)
self.projection_set = self.projection_set.reshape(
self.projection_set.shape[0],
self.projection_set.shape[1] * self.projection_set.shape[2],
self.projection_set.shape[-1]
)
self.inverse_projection_set = np.linalg.pinv(self.projection_set)
cf_j = (
(self.projection_set @
self.stft.transpose(0, 2, 1))[..., None]
* sigma_j
)
shape = cf_j.shape
reconstructions = (
self.inverse_projection_set @
cf_j.reshape(
cf_j.shape[0],
cf_j.shape[1],
-1
)
)
reconstructions = reconstructions.reshape(
shape[0], self.stft.shape[-1], -1, shape[-1]
)
self.reconstructions = np.swapaxes(reconstructions, 1, 2)
return reconstructions
def make_audio_signals(self):
estimates = []
for j in range(self.reconstructions.shape[-1]):
estimate_stft = self.reconstructions[..., j]
estimate = self.audio_signal.make_copy_with_stft_data(estimate_stft)
estimate.istft()
estimates.append(estimate)
return estimates
| 11,377 | 37.181208 | 99 | py |
nussl | nussl-master/nussl/separation/deep/deep_mask_estimation.py | import torch
from ..base import MaskSeparationBase, DeepMixin, SeparationException
from ... import ml
class DeepMaskEstimation(DeepMixin, MaskSeparationBase):
"""
Separates an audio signal using the masks produced by a deep model for every
time-frequency point. It expects that the model outputs a dictionary where one
of the keys is 'masks'. This uses the `DeepMixin` class to load the model
and set the audio signal's parameters to be appropriate for the model.
Args:
input_audio_signal: (AudioSignal`) An AudioSignal object containing the
mixture to be separated.
model_path (str, optional): Path to the model that will be used. Can be None,
so that you can initialize a class and load the model later.
Defaults to None.
device (str, optional): Device to put the model on. Defaults to 'cpu'.
**kwargs (dict): Keyword arguments for MaskSeparationBase.
"""
def __init__(self, input_audio_signal, model_path=None, device='cpu',
**kwargs):
super().__init__(input_audio_signal, **kwargs)
if model_path is not None:
self.load_model(model_path, device=device)
self.model_output = None
# audio channel dimension in a mask estimation model
self.channel_dim = -1
def forward(self, **kwargs):
input_data = self._get_input_data_for_model(**kwargs)
with torch.no_grad():
output = self.model(input_data)
if 'mask' not in output:
raise SeparationException(
"This model is not a deep mask estimation model! "
"Did not find 'mask' key in output dictionary.")
masks = output['mask']
# swap back batch and sample dims
if self.metadata['num_channels'] == 1:
masks = masks.transpose(0, -2)
masks = masks.squeeze(0).transpose(0, 1)
masks = masks.cpu().data.numpy()
self.model_output = output
return masks
def run(self, masks=None):
self.result_masks = []
if masks is None:
masks = self.forward()
for i in range(masks.shape[-1]):
mask_data = masks[..., i]
if self.mask_type == self.MASKS['binary']:
mask_data = masks[..., i] == masks.max(axis=-1)
mask = self.mask_type(mask_data)
self.result_masks.append(mask)
return self.result_masks
def confidence(self, approach='silhouette_confidence', num_sources=2, **kwargs):
"""
In embedding-based separation algorithms, we can compute a confidence
measure based on the clusterability of the embedding space. This can
be used if the model also computes an embedding alongside the estimates
(e.g. as in Chimera models.)
Args:
approach (str, optional): What approach to use for getting the confidence
measure. Options are 'jensen_shannon_confidence', 'posterior_confidence',
'silhouette_confidence', 'loudness_confidence', 'whitened_kmeans_confidence',
'dpcl_classic_confidence'. Defaults to 'silhouette_confidence'.
kwargs: Keyword arguments to the function being used to compute the confidence.
"""
if self.model_output is None:
raise SeparationException(
"self.model_output is None! Did you run forward?")
if 'embedding' not in self.model_output:
raise SeparationException(
"embedding not in self.model_output! Can't compute confidence.")
features = self.model_output['embedding']
if self.metadata['num_channels'] == 1:
features = features.transpose(0, -2)
features = features.squeeze(0).transpose(0, 1)
features = features.cpu().data.numpy()
confidence_function = getattr(ml.confidence, approach)
confidence = confidence_function(
self.audio_signal, features, num_sources, **kwargs)
return confidence
| 4,119 | 42.829787 | 91 | py |
nussl | nussl-master/nussl/separation/deep/deep_audio_estimation.py | import torch
from ..base import SeparationBase, DeepMixin, SeparationException
class DeepAudioEstimation(DeepMixin, SeparationBase):
"""
Separates an audio signal using a model that produces separated sources directly
in the waveform domain. It expects that the model outputs a dictionary where one
of the keys is 'audio'. This uses the `DeepMixin` class to load the model
and set the audio signal's parameters to be appropriate for the model.
Args:
input_audio_signal: (AudioSignal`) An AudioSignal object containing the
mixture to be separated.
model_path (str, optional): Path to the model that will be used. Can be None,
so that you can initialize a class and load the model later.
Defaults to None.
device (str, optional): Device to put the model on. Defaults to 'cpu'.
**kwargs (dict): Keyword arguments for MaskSeparationBase.
"""
def __init__(self, input_audio_signal, model_path=None, device='cpu',
**kwargs):
super().__init__(input_audio_signal, **kwargs)
if model_path is not None:
self.load_model(model_path, device=device)
self.model_output = None
# audio channel dimension in an audio model
self.channel_dim = 1
def forward(self, **kwargs):
input_data = self._get_input_data_for_model(**kwargs)
with torch.no_grad():
output = self.model(input_data)
if 'audio' not in output:
raise SeparationException(
"This model is not a deep audio estimation model! "
"Did not find 'audio' key in output dictionary.")
audio = output['audio']
# swap back batch and sample dims
if self.metadata['num_channels'] == 1:
audio = audio.transpose(0, self.channel_dim)
audio = audio.squeeze(0)
audio = audio.cpu().data.numpy()
self.model_output = output
return audio
def run(self, audio=None):
if audio is None:
audio = self.forward()
self.audio = audio
return self.audio
def make_audio_signals(self):
estimates = []
for i in range(self.audio.shape[-1]):
_estimate = self.audio_signal.make_copy_with_audio_data(
self.audio[..., i])
estimates.append(_estimate)
return estimates
| 2,455 | 39.933333 | 86 | py |
nussl | nussl-master/nussl/separation/deep/deep_clustering.py | import torch
from ..base import ClusteringSeparationBase, DeepMixin, SeparationException
class DeepClustering(DeepMixin, ClusteringSeparationBase):
"""
Clusters the embedding produced by a deep model for every time-frequency point.
This is the deep clustering source separation approach. It is flexible with
the number of sources. It expects that the model outputs a dictionary where one
of the keys is 'embedding'. This uses the `DeepMixin` class to load the model
and set the audio signal's parameters to be appropriate for the model.
Args:
input_audio_signal: (AudioSignal`) An AudioSignal object containing the
mixture to be separated.
num_sources (int): Number of sources to cluster the features of and separate
the mixture.
model_path (str, optional): Path to the model that will be used. Can be None,
so that you can initialize a class and load the model later.
Defaults to None.
device (str, optional): Device to put the model on. Defaults to 'cpu'.
**kwargs (dict): Keyword arguments for ClusteringSeparationBase and the
clustering object used for clustering (one of KMeans, GaussianMixture,
MiniBatchKmeans).
Raises:
SeparationException: If 'embedding' isn't in the output of the model.
"""
def __init__(self, input_audio_signal, num_sources, model_path=None,
device='cpu', **kwargs):
super().__init__(input_audio_signal, num_sources, **kwargs)
if model_path is not None:
self.load_model(model_path, device=device)
# audio channel dimension in a dpcl model
self.channel_dim = -1
def forward(self):
return self.extract_features()
def extract_features(self, **kwargs):
input_data = self._get_input_data_for_model(**kwargs)
with torch.no_grad():
output = self.model(input_data)
if 'embedding' not in output:
raise SeparationException(
"This model is not a deep clustering model! "
"Did not find 'embedding' key in output dictionary.")
embedding = output['embedding']
# swap back batch and sample dims
if self.metadata['num_channels'] == 1:
embedding = embedding.transpose(0, -2)
embedding = embedding.squeeze(0).transpose(0, 1)
self._preprocess_audio_signal()
return embedding.cpu().data.numpy()
| 2,524 | 44.089286 | 86 | py |
nussl | nussl-master/nussl/datasets/base_dataset.py | import warnings
from typing import Iterable
import copy
from torch.utils.data import Dataset
from .. import AudioSignal
from . import transforms as tfm
import tqdm
class BaseDataset(Dataset, Iterable):
"""
The BaseDataset class is the starting point for all dataset hooks
in nussl. To subclass BaseDataset, you only have to implement two
functions:
- ``get_items``: a function that is passed the folder and generates a
list of items that will be processed by the next function. The
number of items in the list will dictate len(dataset). Must return
a list.
- ``process_item``: this function processes a single item in the list
generated by get_items. Must return a dictionary.
After process_item is called, a set of Transforms can be applied to the
output of process_item. If no transforms are defined (``self.transforms = None``),
then the output of process_item is returned by self[i]. For implemented
Transforms, see nussl.datasets.transforms. For example,
PhaseSpectrumApproximation will add three new keys to the output dictionary
of process_item:
- mix_magnitude: the magnitude spectrogram of the mixture
- source_magnitudes: the magnitude spectrogram of each source
- ideal_binary_mask: the ideal binary mask for each source
The transforms are applied in sequence using transforms.Compose.
Not all sequences of transforms will be valid (e.g. if you pop a key in
one transform but a later transform operates on that key, you will get
an error).
For examples of subclassing, see ``nussl.datasets.hooks``.
Args:
folder (str): location that should be processed to produce the list of files
transform (transforms.* object, optional): A transforms to apply to the output of
``self.process_item``. If using transforms.Compose, each transform will be
applied in sequence. Defaults to None.
sample_rate (int, optional): Sample rate to use for each audio files. If
audio file sample rate doesn't match, it will be resampled on the fly.
If None, uses the default sample rate. Defaults to None.
stft_params (STFTParams, optional): STFTParams object defining window_length,
hop_length, and window_type that will be set for each AudioSignal object.
Defaults to None (32ms window length, 8ms hop, 'hann' window).
num_channels (int, optional): Number of channels to make each AudioSignal
object conform to. If an audio signal in your dataset has fewer channels
than ``num_channels``, a warning is raised, as the behavior in this case
is undefined. Defaults to None.
strict_sample_rate (bool, optional): Whether to raise an error if
Raises:
DataSetException: Exceptions are raised if the output of the implemented
functions by the subclass don't match the specification.
"""
def __init__(self, folder, transform=None, sample_rate=None, stft_params=None,
num_channels=None, strict_sample_rate=True, cache_populated=False):
self.folder = folder
self.items = self.get_items(self.folder)
self.transform = transform
self.cache_populated = cache_populated
self.stft_params = stft_params
self.sample_rate = sample_rate
self.num_channels = num_channels
self.strict_sample_rate = strict_sample_rate
self.metadata = {
'name': self.__class__.__name__,
'stft_params': stft_params,
'sample_rate': sample_rate,
'num_channels': num_channels,
'folder': folder,
'transforms': copy.deepcopy(transform)
}
if not isinstance(self.items, list):
raise DataSetException("Output of self.get_items must be a list!")
# getting one item in order to set up parameters for audio
# signals if necessary, if there are any items
if self.items:
self.process_item(self.items[0])
def filter_items_by_condition(self, func):
"""
Filter the items in the list according to a function that takes
in both the dataset as well as the item currently be processed.
If the item in the list passes the condition, then it is kept
in the list. Otherwise it is taken out of the list. For example,
a function that would get rid of an item if it is below some
minimum number of seconds would look like this:
.. code-block:: python
min_length = 1 # in seconds
# self here refers to the dataset
def remove_short_audio(self, item):
processed_item = self.process_item(item)
mix_length = processed_item['mix'].signal_duration
if mix_length < min_length:
return False
return True
dataset.items # contains all items
dataset.filter_items_by_condition(remove_short_audio)
dataset.items # contains only items longer than min length
Args:
func (function): A function that takes in two arguments: the dataset and
this dataset object (self). The function must return a bool.
"""
filtered_items = []
n_removed = 0
desc = f"Filtered {n_removed} items out of dataset"
pbar = tqdm.tqdm(self.items, desc=desc)
for item in pbar:
check = func(self, item)
if not isinstance(check, bool):
raise DataSetException(
"Output of filter function must be True or False!"
)
if check:
filtered_items.append(item)
else:
n_removed += 1
pbar.set_description(f"Filtered {n_removed} items out of dataset")
self.items = filtered_items
@property
def cache_populated(self):
return self._cache_populated
@cache_populated.setter
def cache_populated(self, value):
self.post_cache_transforms = []
cache_transform = None
transforms = (
self.transform.transforms
if isinstance(self.transform, tfm.Compose)
else [self.transform])
found_cache_transform = False
for t in transforms:
if isinstance(t, tfm.Cache):
found_cache_transform = True
cache_transform = t
if found_cache_transform:
self.post_cache_transforms.append(t)
if not found_cache_transform:
# there is no cache transform
self._cache_populated = False
else:
self._cache_populated = value
cache_transform.cache_size = len(self)
cache_transform.overwrite = not value
self.post_cache_transforms = tfm.Compose(
self.post_cache_transforms)
def get_items(self, folder):
"""
This function must be implemented by whatever class inherits BaseDataset.
It should return a list of items in the given folder, each of which is
processed by process_items in some way to produce mixes, sources, class
labels, etc.
Args:
folder (str): location that should be processed to produce the list of files.
Returns:
list: list of items that should be processed
"""
raise NotImplementedError()
def __len__(self):
"""
Gets the length of the dataset (the number of items that will be processed).
Returns:
int: Length of the dataset (``len(self.items)``).
"""
return len(self.items)
def __getitem__(self, i):
"""
Processes a single item in ``self.items`` using ``self.process_item``.
The output of ``self.process_item`` is further passed through bunch of
of transforms if they are defined in parallel. If you want to have
a set of transforms that depend on each other, then you should compose them
into a single transforms and then pass it into here. The output of each
transform is added to an output dictionary which is returned by this
function.
Args:
i (int): Index of the dataset to return. Indexes ``self.items``.
Returns:
dict: Dictionary with keys and values corresponding to the processed
item after being put through the set of transforms (if any are
defined).
"""
if self.cache_populated:
data = {'index': i}
data = self.post_cache_transforms(data)
else:
data = self.process_item(self.items[i])
if not isinstance(data, dict):
raise DataSetException(
"The output of process_item must be a dictionary!")
if self.transform:
data['index'] = i
data = self.transform(data)
if not isinstance(data, dict):
raise tfm.TransformException(
"The output of transform must be a dictionary!")
return data
def __iter__(self):
"""
Calls ``self.__getitem__`` from ``0`` to ``self.__len__()``.
Required when inheriting Iterable.
Yields:
dict: Dictionary with keys and values corresponding to the processed
item after being put through the set of transforms (if any are
defined).
"""
for i in range(len(self)):
yield self[i]
def process_item(self, item):
"""Each file returned by get_items is processed by this function. For example,
if each file is a json file containing the paths to the mixture and sources,
then this function should parse the json file and load the mixture and sources
and return them.
Exact behavior of this functionality is determined by implementation by subclass.
Args:
item (object): the item that will be processed by this function. Input depends
on implementation of ``self.get_items``.
Returns:
This should return a dictionary that gets processed by the transforms.
"""
raise NotImplementedError()
def _load_audio_file(self, path_to_audio_file, **kwargs):
"""
Loads audio file at given path. Uses AudioSignal to load the audio data
from disk.
Args:
path_to_audio_file: relative or absolute path to file to load
kwargs: Keyword arguments to AudioSignal.
Returns:
AudioSignal: loaded AudioSignal object of path_to_audio_file
"""
audio_signal = AudioSignal(path_to_audio_file, **kwargs)
self._setup_audio_signal(audio_signal)
return audio_signal
def _load_audio_from_array(self, audio_data, sample_rate=None):
"""
Loads the audio data into an AudioSignal object with the appropriate
sample rate.
Args:
audio_data (np.ndarray): numpy array containing the samples containing
the audio data.
sample_rate (int): the sample rate at which to load the audio file.
If None, self.sample_rate or the sample rate of the actual file is used.
Defaults to None.
Returns:
AudioSignal: loaded AudioSignal object of audio_data
"""
sample_rate = sample_rate if sample_rate else self.sample_rate
audio_signal = AudioSignal(
audio_data_array=audio_data, sample_rate=sample_rate)
self._setup_audio_signal(audio_signal)
return audio_signal
def _setup_audio_signal(self, audio_signal):
"""
You will want every item from a dataset to be uniform in sample rate, STFT
parameters, and number of channels. This function takes an audio signal
object loaded by the dataset and uses it to set the sample rate, STFT parameters,
and the number of channels. If ``self.sample_rate``, ``self.stft_params``, and
``self.num_channels`` are set at construction time of the dataset, then the
opposite happens - attributes of the AudioSignal object are set to the desired
values.
Args:
audio_signal (AudioSignal): AudioSignal object to query to set the parameters
of this dataset or to set the parameters of, according to what is in the
dataset.
"""
if self.sample_rate and self.sample_rate != audio_signal.sample_rate:
if self.strict_sample_rate:
raise DataSetException(
f"All audio files should have been the same sample rate already "
f"because self.strict_sample_rate = True. Please resample or "
f"turn set self.strict_sample_rate = False"
)
audio_signal.resample(self.sample_rate)
else:
self.sample_rate = audio_signal.sample_rate
# set audio signal attributes to requested values, if they exist
if self.stft_params:
audio_signal.stft_params = self.stft_params
else:
self.stft_params = audio_signal.stft_params
if self.num_channels:
if audio_signal.num_channels > self.num_channels:
# pick the first ``self.num_channels`` channels
audio_signal.audio_data = audio_signal.audio_data[:self.num_channels]
elif audio_signal.num_channels < self.num_channels:
warnings.warn(
f"AudioSignal had {audio_signal.num_channels} channels "
f"but self.num_channels = {self.num_channels}. Unsure "
f"of what to do, so warning. You might want to make sure "
f"your dataset is uniform!"
)
else:
self.num_channels = audio_signal.num_channels
class DataSetException(Exception):
"""
Exception class for errors when working with data sets in nussl.
"""
pass
| 14,293 | 38.927374 | 90 | py |
nussl | nussl-master/nussl/datasets/transforms.py | import os
import shutil
import logging
import random
from collections import OrderedDict
import torch
import zarr
import numcodecs
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from .. import utils
# This is for when you're running multiple
# training threads
if hasattr(numcodecs, 'blosc'):
numcodecs.blosc.use_threads = False
def compute_ideal_binary_mask(source_magnitudes):
ibm = (
source_magnitudes == np.max(source_magnitudes, axis=-1, keepdims=True)
).astype(float)
ibm = ibm / np.sum(ibm, axis=-1, keepdims=True)
ibm[ibm <= .5] = 0
return ibm
# Keys that correspond to the time-frequency representations after being passed through
# the transforms here.
time_frequency_keys = ['mix_magnitude', 'source_magnitudes', 'ideal_binary_mask', 'weights']
class SumSources(object):
"""
Sums sources together. Looks for sources in ``data[self.source_key]``. If
a source belongs to a group, it is popped from the ``data[self.source_key]`` and
summed with the other sources in the group. If there is a corresponding
group_name in group_names, it is named that in ``data[self.source_key]``. If
group_names are not given, then the names are constructed using the keys
in each group (e.g. `drums+bass+other`).
If using Scaper datasets, then there may be multiple sources with the same
label but different counts. The Scaper dataset hook organizes the source
dictionary as follows:
.. code-block:: none
data['sources] = {
'{label}::{count}': AudioSignal,
'{label}::{count}': AudioSignal,
...
}
SumSources sums by source label, so the ``::count`` will be ignored and only the
label part will be used when grouping sources.
Example:
>>> import nussl
>>> tfm = nussl.datasets.transforms.SumSources(
groupings=[['drums', 'bass', 'other]],
group_names=['accompaniment],
)
>>> # data['sources'] is a dict containing keys:
>>> # ['vocals', 'drums', 'bass', 'other]
>>> data = tfm(data)
>>> # data['sources'] is now a dict containing keys:
>>> # ['vocals', 'accompaniment']
Args:
groupings (list): a list of lists telling how to group each sources.
group_names (list, optional): A list containing the names of each group, or None.
Defaults to None.
source_key (str, optional): The key to look for in the data containing the list of
source AudioSignals. Defaults to 'sources'.
Raises:
TransformException: if groupings is not a list
TransformException: if group_names is not None but
len(groupings) != len(group_names)
Returns:
data: modified dictionary with summed sources
"""
def __init__(self, groupings, group_names=None, source_key='sources'):
if not isinstance(groupings, list):
raise TransformException(
f"groupings must be a list, got {type(groupings)}!")
if group_names:
if len(group_names) != len(groupings):
raise TransformException(
f"group_names and groupings must be same length or "
f"group_names can be None! Got {len(group_names)} for "
f"len(group_names) and {len(groupings)} for len(groupings)."
)
self.groupings = groupings
self.source_key = source_key
if group_names is None:
group_names = ['+'.join(groupings[i]) for i in range(len(groupings))]
self.group_names = group_names
def __call__(self, data):
if self.source_key not in data:
raise TransformException(
f"Expected {self.source_key} in dictionary "
f"passed to this Transform!"
)
sources = data[self.source_key]
source_keys = [(k.split('::')[0], k) for k in list(sources.keys())]
for i, group in enumerate(self.groupings):
combined = []
group_name = self.group_names[i]
for key1 in group:
for key2 in source_keys:
if key2[0] == key1:
combined.append(sources[key2[1]])
sources.pop(key2[1])
sources[group_name] = sum(combined)
sources[group_name].path_to_input_file = group_name
data[self.source_key] = sources
if 'metadata' in data:
if 'labels' in data['metadata']:
data['metadata']['labels'].extend(self.group_names)
return data
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"groupings = {self.groupings}, "
f"group_names = {self.group_names}, "
f"source_key = {self.source_key}"
f")"
)
class LabelsToOneHot(object):
"""
Takes a data dictionary with sources and their keys and converts the keys to
a one-hot numpy array using the list in data['metadata']['labels'] to figure
out which index goes where.
"""
def __init__(self, source_key='sources'):
self.source_key = source_key
def __repr__(self):
return f'{self.__class__.__name__}(source_key = {self.source_key})'
def __call__(self, data):
if 'metadata' not in data:
raise TransformException(
f"Expected metadata in data, got {list(data.keys())}")
if 'labels' not in data['metadata']:
raise TransformException(
f"Expected labels in data['metadata'], got "
f"{list(data['metadata'].keys())}")
enc = OneHotEncoder(categories=[data['metadata']['labels']])
sources = data[self.source_key]
source_keys = [k.split('::')[0] for k in list(sources.keys())]
source_labels = [[l] for l in sorted(source_keys)]
one_hot_labels = enc.fit_transform(source_labels)
data['one_hot_labels'] = one_hot_labels.toarray()
return data
class MagnitudeSpectrumApproximation(object):
"""
Takes a dictionary and looks for two special keys, defined by the
arguments ``mix_key`` and ``source_key``. These default to `mix` and `sources`.
These values of these keys are used to calculate the magnitude spectrum
approximation [1]. The input dictionary is modified to have additional
keys:
- mix_magnitude: The magnitude spectrogram of the mixture audio signal.
- source_magnitudes: The magnitude spectrograms of each source spectrogram.
- assignments: The ideal binary assignments for each time-frequency bin.
``data[self.source_key]`` points to a dictionary containing the source names in
the keys and the corresponding AudioSignal in the values. The keys are sorted
in alphabetical order and then appended to the mask. ``data[self.source_key]``
then points to an OrderedDict instead, where the keys are in the same order
as in ``data['source_magnitudes']`` and ``data['assignments']``.
This transform uses the STFTParams that are attached to the AudioSignal objects
contained in ``data[mix_key]`` and ``data[source_key]``.
[1] Erdogan, Hakan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux.
"Phase-sensitive and recognition-boosted speech separation using
deep recurrent neural networks." In 2015 IEEE International Conference
on Acoustics, Speech and Signal Processing (ICASSP), pp. 708-712. IEEE,
2015.
Args:
mix_key (str, optional): The key to look for in data for the mixture AudioSignal.
Defaults to 'mix'.
source_key (str, optional): The key to look for in the data containing the dict of
source AudioSignals. Defaults to 'sources'.
Raises:
TransformException: if the expected keys are not in the dictionary, an
Exception is raised.
Returns:
data: Modified version of the input dictionary.
"""
def __init__(self, mix_key='mix', source_key='sources'):
self.mix_key = mix_key
self.source_key = source_key
def __call__(self, data):
if self.mix_key not in data:
raise TransformException(
f"Expected {self.mix_key} in dictionary "
f"passed to this Transform! Got {list(data.keys())}."
)
mixture = data[self.mix_key]
mixture.stft()
mix_magnitude = mixture.magnitude_spectrogram_data
data['mix_magnitude'] = mix_magnitude
if self.source_key not in data:
return data
_sources = data[self.source_key]
source_names = sorted(list(_sources.keys()))
sources = OrderedDict()
for key in source_names:
sources[key] = _sources[key]
data[self.source_key] = sources
source_magnitudes = []
for key in source_names:
s = sources[key]
s.stft()
source_magnitudes.append(s.magnitude_spectrogram_data)
source_magnitudes = np.stack(source_magnitudes, axis=-1)
data['ideal_binary_mask'] = compute_ideal_binary_mask(source_magnitudes)
data['source_magnitudes'] = source_magnitudes
return data
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"mix_key = {self.mix_key}, "
f"source_key = {self.source_key}"
f")"
)
class MagnitudeWeights(object):
"""
Applying time-frequency weights to the deep clustering objective results in a
huge performance boost. This transform looks for 'mix_magnitude', which is output
by either MagnitudeSpectrumApproximation or PhaseSensitiveSpectrumApproximation
and puts it into the weights.
[1] Wang, Zhong-Qiu, Jonathan Le Roux, and John R. Hershey.
"Alternative objective functions for deep clustering." 2018 IEEE International
Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2018.
Args:
mix_magnitude_key (str): Which key to look for the mix_magnitude data in.
"""
def __init__(self, mix_key='mix', mix_magnitude_key='mix_magnitude'):
self.mix_magnitude_key = mix_magnitude_key
self.mix_key = mix_key
def __repr__(self):
return f'{self.__class__.__name__}(mix_key = {self.mix_key}, ' \
f'mix_magnitude_key = {self.mix_magnitude_key})'
def __call__(self, data):
if self.mix_magnitude_key not in data and self.mix_key not in data:
raise TransformException(
f"Expected {self.mix_magnitude_key} or {self.mix_key} in dictionary "
f"passed to this Transform! Got {list(data.keys())}. "
"Either MagnitudeSpectrumApproximation or "
"PhaseSensitiveSpectrumApproximation should be called "
"on the data dict prior to this transform. "
)
elif self.mix_magnitude_key not in data:
data[self.mix_magnitude_key] = np.abs(data[self.mix_key].stft())
magnitude_spectrogram = data[self.mix_magnitude_key]
weights = magnitude_spectrogram / (np.sum(magnitude_spectrogram) + 1e-6)
weights *= (
magnitude_spectrogram.shape[0] * magnitude_spectrogram.shape[1]
)
data['weights'] = np.sqrt(weights)
return data
class PhaseSensitiveSpectrumApproximation(object):
"""
Takes a dictionary and looks for two special keys, defined by the
arguments ``mix_key`` and ``source_key``. These default to `mix` and `sources`.
These values of these keys are used to calculate the phase sensitive spectrum
approximation [1]. The input dictionary is modified to have additional
keys:
- mix_magnitude: The magnitude spectrogram of the mixture audio signal.
- source_magnitudes: The magnitude spectrograms of each source spectrogram.
- assignments: The ideal binary assignments for each time-frequency bin.
``data[self.source_key]`` points to a dictionary containing the source names in
the keys and the corresponding AudioSignal in the values. The keys are sorted
in alphabetical order and then appended to the mask. ``data[self.source_key]``
then points to an OrderedDict instead, where the keys are in the same order
as in ``data['source_magnitudes']`` and ``data['assignments']``.
This transform uses the STFTParams that are attached to the AudioSignal objects
contained in ``data[mix_key]`` and ``data[source_key]``.
[1] Erdogan, Hakan, John R. Hershey, Shinji Watanabe, and Jonathan Le Roux.
"Phase-sensitive and recognition-boosted speech separation using
deep recurrent neural networks." In 2015 IEEE International Conference
on Acoustics, Speech and Signal Processing (ICASSP), pp. 708-712. IEEE,
2015.
Args:
mix_key (str, optional): The key to look for in data for the mixture AudioSignal.
Defaults to 'mix'.
source_key (str, optional): The key to look for in the data containing the list of
source AudioSignals. Defaults to 'sources'.
range_min (float, optional): The lower end to use when truncating the source
magnitudes in the phase sensitive spectrum approximation. Defaults to 0.0 (construct
non-negative masks). Use -np.inf for untruncated source magnitudes.
range_max (float, optional): The higher end of the truncated spectrum. This gets
multiplied by the magnitude of the mixture. Use 1.0 to truncate the source
magnitudes to `max(source_magnitudes, mix_magnitude)`. Use np.inf for untruncated
source magnitudes (best performance for an oracle mask but may be beyond what a
neural network is capable of masking). Defaults to 1.0.
Raises:
TransformException: if the expected keys are not in the dictionary, an
Exception is raised.
Returns:
data: Modified version of the input dictionary.
"""
def __init__(self, mix_key='mix', source_key='sources',
range_min=0.0, range_max=1.0):
self.mix_key = mix_key
self.source_key = source_key
self.range_min = range_min
self.range_max = range_max
def __call__(self, data):
if self.mix_key not in data:
raise TransformException(
f"Expected {self.mix_key} in dictionary "
f"passed to this Transform! Got {list(data.keys())}."
)
mixture = data[self.mix_key]
mix_stft = mixture.stft()
mix_magnitude = np.abs(mix_stft)
mix_angle = np.angle(mix_stft)
data['mix_magnitude'] = mix_magnitude
if self.source_key not in data:
return data
_sources = data[self.source_key]
source_names = sorted(list(_sources.keys()))
sources = OrderedDict()
for key in source_names:
sources[key] = _sources[key]
data[self.source_key] = sources
source_angles = []
source_magnitudes = []
for key in source_names:
s = sources[key]
_stft = s.stft()
source_magnitudes.append(np.abs(_stft))
source_angles.append(np.angle(_stft))
source_magnitudes = np.stack(source_magnitudes, axis=-1)
source_angles = np.stack(source_angles, axis=-1)
range_min = self.range_min
range_max = self.range_max * mix_magnitude[..., None]
# Section 3.1: https://arxiv.org/pdf/1909.08494.pdf
source_magnitudes = np.minimum(
np.maximum(
source_magnitudes * np.cos(source_angles - mix_angle[..., None]),
range_min
),
range_max
)
data['ideal_binary_mask'] = compute_ideal_binary_mask(source_magnitudes)
data['source_magnitudes'] = source_magnitudes
return data
def __repr__(self):
return (
f"{self.__class__.__name__}("
f"mix_key = {self.mix_key}, "
f"source_key = {self.source_key}, "
f"range_min = {self.range_min}, "
f"range_max = {self.range_max}"
f")"
)
class IndexSources(object):
"""
Takes in a dictionary containing Torch tensors or numpy arrays and extracts the
indexed sources from the set key (usually either `source_magnitudes` or
`ideal_binary_mask`). Can be used to train single-source separation models
(e.g. mix goes in, vocals come out).
You need to know which slice of the source magnitudes or ideal binary mask arrays
to extract. The order of the sources in the source magnitudes array will be in
alphabetical order according to their source labels.
For example, if source magnitudes has shape `(257, 400, 1, 4)`, and the data is
from MUSDB, then the four possible source labels are bass, drums, other, and vocals.
The data in source magnitudes is in alphabetical order, so:
.. code-block:: python
# source_magnitudes is an array returned by either MagnitudeSpectrumApproximation
# or PhaseSensitiveSpectrumApproximation
source_magnitudes[..., 0] # bass spectrogram
source_magnitudes[..., 1] # drums spectrogram
source_magnitudes[..., 2] # other spectrogram
source_magnitudes[..., 3] # vocals spectrogram
# ideal_binary_mask is an array returned by either MagnitudeSpectrumApproximation
# or PhaseSensitiveSpectrumApproximation
ideal_binary_mask[..., 0] # bass ibm mask
ideal_binary_mask[..., 1] # drums ibm mask
ideal_binary_mask[..., 2] # other ibm mask
ideal_binary_mask[..., 3] # vocals ibm mask
You can apply this transform to either the `source_magnitudes` or the
`ideal_binary_mask` or both.
Args:
object ([type]): [description]
"""
def __init__(self, target_key, index):
self.target_key = target_key
self.index = index
def __call__(self, data):
if self.target_key not in data:
raise TransformException(
f"Expected {self.target_key} in dictionary, got {list(data.keys())}")
if self.index >= data[self.target_key].shape[-1]:
raise TransformException(
f"Shape of data[{self.target_key}] is {data[self.target_key].shape} "
f"but index = {self.index} out of bounds bounds of last dim.")
data[self.target_key] = data[self.target_key][..., self.index, None]
return data
def __repr__(self):
return f'{self.__class__.__name__}(target_key = {self.target_key}, ' \
f'index = {self.index})'
class GetExcerpt(object):
"""
Takes in a dictionary containing Torch tensors or numpy arrays and extracts an
excerpt from each tensor corresponding to a spectral representation of a specified
length in frames. Can be used to get L-length spectrograms from mixture and source
spectrograms. If the data is shorter than the specified length, it
is padded to the specified length. If it is longer, a random offset between
``(0, data_length - specified_length)`` is chosen. This function assumes that
it is being passed data AFTER ToSeparationModel. Thus the time dimension is
on axis=1.
Args:
excerpt_length (int): Specified length of transformed data in frames.
time_dim (int): Which dimension time is on (excerpts are taken along this axis).
Defaults to 0.
time_frequency_keys (list): Which keys to look at it in the data dictionary to
take excerpts from.
"""
def __init__(self, excerpt_length, time_dim=0,
tf_keys=None):
self.excerpt_length = excerpt_length
self.time_dim = time_dim
self.time_frequency_keys = tf_keys if tf_keys else time_frequency_keys
def __repr__(self):
return f'{self.__class__.__name__}(excerpt_length = {self.excerpt_length}), ' \
f'time_dim = {self.time_dim}, tf_keys = {self.time_frequency_keys})'
@staticmethod
def _validate(data, key):
is_tensor = torch.is_tensor(data[key])
is_array = isinstance(data[key], np.ndarray)
if not is_tensor and not is_array:
raise TransformException(
f"data[{key}] was not a torch Tensor or a numpy array!")
return is_tensor, is_array
def _get_offset(self, data, key):
self._validate(data, key)
data_length = data[key].shape[self.time_dim]
if data_length >= self.excerpt_length:
offset = random.randint(0, data_length - self.excerpt_length)
else:
offset = 0
pad_amount = max(0, self.excerpt_length - data_length)
return offset, pad_amount
def _construct_pad_func_tuple(self, shape, pad_amount, is_tensor):
if is_tensor:
pad_func = torch.nn.functional.pad
pad_tuple = [0 for _ in range(2 * len(shape))]
pad_tuple[2 * self.time_dim] = pad_amount
pad_tuple = pad_tuple[::-1]
else:
pad_func = np.pad
pad_tuple = [(0, 0) for _ in range(len(shape))]
pad_tuple[self.time_dim] = (0, pad_amount)
return pad_func, pad_tuple
def __call__(self, data):
offset, pad_amount = self._get_offset(
data, self.time_frequency_keys[0])
for key in data:
if key in self.time_frequency_keys:
is_tensor, is_array = self._validate(data, key)
if pad_amount > 0:
pad_func, pad_tuple = self._construct_pad_func_tuple(
data[key].shape, pad_amount, is_tensor)
data[key] = pad_func(data[key], pad_tuple)
data[key] = utils._slice_along_dim(
data[key], self.time_dim, offset, offset + self.excerpt_length)
return data
class Cache(object):
"""
The Cache transform can be placed within a Compose transform. The data
dictionary coming into this transform will be saved to the specified
location using ``zarr``. Then instead of computing all of the transforms
before the cache, one can simply read from the cache. The transforms after
this will then be applied to the data dictionary that is read from the
cache. A typical pipeline might look like this:
.. code-block:: python
dataset = datasets.Scaper('path/to/scaper/folder')
tfm = transforms.Compose([
transforms.PhaseSensitiveApproximation(),
transforms.ToSeparationModel(),
transforms.Cache('~/.nussl/cache/tag', overwrite=True),
transforms.GetExcerpt()
])
dataset[0] # first time will write to cache then apply GetExcerpt
dataset.cache_populated = True # switches to reading from cache
dataset[0] # second time will read from cache then apply GetExcerpt
dataset[1] # will error out as it wasn't written to the cache!
dataset.cache_populated = False
for i in range(len(dataset)):
dataset[i] # every item will get written to cache
dataset.cache_populated = True
dataset[1] # now it exists
dataset = datasets.Scaper('path/to/scaper/folder') # next time around
tfm = transforms.Compose([
transforms.PhaseSensitiveApproximation(),
transforms.ToSeparationModel(),
transforms.Cache('~/.nussl/cache/tag', overwrite=False),
transforms.GetExcerpt()
])
dataset.cache_populated = True
dataset[0] # will read from cache, which still exists from last time
Args:
object ([type]): [description]
"""
def __init__(self, location, cache_size=1, overwrite=False):
self.location = location
self.cache_size = cache_size
self.cache = None
self.overwrite = overwrite
@property
def info(self):
return self.cache.info
@property
def overwrite(self):
return self._overwrite
@overwrite.setter
def overwrite(self, value):
self._overwrite = value
self._clear_cache(self.location)
self._open_cache(self.location)
def _clear_cache(self, location):
if os.path.exists(location):
if self.overwrite:
logging.info(
f"Cache {location} exists and overwrite = True, clearing cache.")
shutil.rmtree(location, ignore_errors=True)
def _open_cache(self, location):
if self.overwrite:
self.cache = zarr.open(location, mode='w', shape=(self.cache_size,),
chunks=(1,), dtype=object,
object_codec=numcodecs.Pickle(),
synchronizer=zarr.ThreadSynchronizer())
else:
if os.path.exists(location):
self.cache = zarr.open(location, mode='r',
object_codec=numcodecs.Pickle(),
synchronizer=zarr.ThreadSynchronizer())
def __call__(self, data):
if 'index' not in data:
raise TransformException(
f"Expected 'index' in dictionary, got {list(data.keys())}")
index = data['index']
if self.overwrite:
self.cache[index] = data
data = self.cache[index]
if not isinstance(data, dict):
raise TransformException(
f"Reading from cache resulted in not a dictionary! "
f"Maybe you haven't written to index {index} yet in "
f"the cache?")
return data
class GetAudio(object):
"""
Extracts the audio from each signal in `mix_key` and `source_key`.
These will be at new keys, called `mix_audio` and `source_audio`.
Can be used for training end-to-end models.
Args:
mix_key (str, optional): The key to look for in data for the mixture AudioSignal.
Defaults to 'mix'.
source_key (str, optional): The key to look for in the data containing the dict of
source AudioSignals. Defaults to 'sources'.
"""
def __init__(self, mix_key='mix', source_key='sources'):
self.mix_key = mix_key
self.source_key = source_key
def __repr__(self):
return f'{self.__class__.__name__}(mix_key = {self.mix_key}, ' \
f'source_key = {self.source_key})'
def __call__(self, data):
if self.mix_key not in data:
raise TransformException(
f"Expected {self.mix_key} in dictionary "
f"passed to this Transform! Got {list(data.keys())}."
)
mix = data[self.mix_key]
data['mix_audio'] = mix.audio_data
if self.source_key not in data:
return data
_sources = data[self.source_key]
source_names = sorted(list(_sources.keys()))
source_audio = []
for key in source_names:
source_audio.append(_sources[key].audio_data)
# sources on last axis
source_audio = np.stack(source_audio, axis=-1)
data['source_audio'] = source_audio
return data
class ToSeparationModel(object):
"""
Takes in a dictionary containing objects and removes any objects that cannot
be passed to SeparationModel (e.g. not a numpy array or torch Tensor).
If these objects are passed to SeparationModel, then an error will occur. This
class should be the last one in your list of transforms, if you're using
this dataset in a DataLoader object for training a network. If the keys
correspond to numpy arrays, they are converted to tensors using
``torch.from_numpy``. Finally, the dimensions corresponding to time and
frequency are swapped for all the keys in swap_tf_dims, as this is how
SeparationModel expects it.
Example:
.. code-block:: none
data = {
# 2ch spectrogram for mixture
'mix_magnitude': torch.randn(513, 400, 2),
# 2ch spectrogram for each source
'source_magnitudes': torch.randn(513, 400, 2, 4)
'mix': AudioSignal()
}
tfm = transforms.ToSeparationModel()
data = tfm(data)
data['mix_magnitude'].shape # (400, 513, 2)
data['source_magnitudes].shape # (400, 513, 2, 4)
'mix' in data.keys() # False
If this class isn't in your transforms list for the dataset, but you are
using it in the Trainer class, then it is added automatically as the
last transform.
"""
def __init__(self, swap_tf_dims=None):
self.swap_tf_dims = swap_tf_dims if swap_tf_dims else time_frequency_keys
def __call__(self, data):
keys = list(data.keys())
for key in keys:
if key != 'index':
is_array = isinstance(data[key], np.ndarray)
if is_array:
data[key] = torch.from_numpy(data[key])
if not torch.is_tensor(data[key]):
data.pop(key)
if key in self.swap_tf_dims:
data[key] = data[key].transpose(1, 0)
return data
def __repr__(self):
return f"{self.__class__.__name__}(swap_tf_dims = {self.swap_tf_dims})"
class Compose(object):
"""Composes several transforms together. Inspired by torchvision implementation.
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
Example:
>>> transforms.Compose([
>>> transforms.MagnitudeSpectrumApproximation(),
>>> transforms.ToSeparationModel(),
>>> ])
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, data):
for t in self.transforms:
data = t(data)
if not isinstance(data, dict):
raise TransformException(
"The output of every transform must be a dictionary!")
return data
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
class TransformException(Exception):
"""
Exception class for errors when working with transforms in nussl.
"""
pass
| 30,671 | 36.496333 | 94 | py |
nussl | nussl-master/nussl/ml/confidence.py | """
There are ways to measure the quality of a separated source without
requiring ground truth. These functions operate on the output of
clustering-based separation algorithms and work by analyzing
the clusterability of the feature space used to generate the
separated sources.
"""
from sklearn.metrics import silhouette_samples
import numpy as np
from .cluster import KMeans, GaussianMixture
from scipy.special import logsumexp
from .train import loss
import torch
def softmax(x, axis=None):
return np.exp(x - logsumexp(x, axis=axis, keepdims=True))
def jensen_shannon_divergence(gmm_p, gmm_q, n_samples=10**5):
"""
Compute Jensen-Shannon (JS) divergence between two Gaussian Mixture Models via
sampling. JS divergence is also known as symmetric Kullback-Leibler divergence.
JS divergence has no closed form in general for GMMs, thus we use sampling to
compute it.
Args:
gmm_p (GaussianMixture): A GaussianMixture class fit to some data.
gmm_q (GaussianMixture): Another GaussianMixture class fit to some data.
n_samples (int): Number of samples to use to estimate JS divergence.
Returns:
JS divergence between gmm_p and gmm_q
"""
X = gmm_p.sample(n_samples)[0]
log_p_X = gmm_p.score_samples(X)
log_q_X = gmm_q.score_samples(X)
log_mix_X = np.logaddexp(log_p_X, log_q_X)
Y = gmm_q.sample(n_samples)[0]
log_p_Y = gmm_p.score_samples(Y)
log_q_Y = gmm_q.score_samples(Y)
log_mix_Y = np.logaddexp(log_p_Y, log_q_Y)
return (log_p_X.mean() - (log_mix_X.mean() - np.log(2))
+ log_q_Y.mean() - (log_mix_Y.mean() - np.log(2))) / 2
def _get_loud_bins_mask(threshold, audio_signal=None, representation=None):
if representation is None:
representation = np.abs(audio_signal.stft())
threshold = np.percentile(representation, threshold)
mask = representation > threshold
return mask, representation
def jensen_shannon_confidence(audio_signal, features, num_sources, threshold=95,
n_samples=10**5, **kwargs):
"""
Calculates the clusterability of a space by comparing a K-cluster GMM
with a 1-cluster GMM on the same features. This function fits two
GMMs to all of the points that are above the specified threshold (defaults
to 95: 95th percentile of all the data). This saves on computation time and
also allows one to have the confidence measure only focus on the louder
more perceptually important points.
References:
Seetharaman, Prem, Gordon Wichern, Jonathan Le Roux, and Bryan Pardo.
“Bootstrapping Single-Channel Source Separation via Unsupervised Spatial
Clustering on Stereo Mixtures”. 44th International Conference on Acoustics,
Speech, and Signal Processing, Brighton, UK, May, 2019
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by Jensen-Shannon divergence.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
one_component_gmm = GaussianMixture(1)
n_component_gmm = GaussianMixture(num_sources)
one_component_gmm.fit(features)
n_component_gmm.fit(features)
confidence = jensen_shannon_divergence(
one_component_gmm, n_component_gmm, n_samples=n_samples)
return confidence
def posterior_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Calculates the clusterability of an embedding space by looking at the
strength of the assignments of each point to a specific cluster. The
more points that are "in between" clusters (e.g. no strong assignmment),
the lower the clusterability.
References:
Seetharaman, Prem, Gordon Wichern, Jonathan Le Roux, and Bryan Pardo.
“Bootstrapping Single-Channel Source Separation via Unsupervised Spatial
Clustering on Stereo Mixtures”. 44th International Conference on Acoustics,
Speech, and Signal Processing, Brighton, UK, May, 2019
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by posteriors.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
kmeans = KMeans(num_sources)
distances = kmeans.fit_transform(features)
confidence = softmax(-distances, axis=-1)
confidence = (
(num_sources * np.max(confidence, axis=-1) - 1) /
(num_sources - 1)
)
return confidence.mean()
def silhouette_confidence(audio_signal, features, num_sources, threshold=95,
max_points=1000, **kwargs):
"""
Uses the silhouette score to compute the clusterability of the feature space.
The Silhouette Coefficient is calculated using the
mean intra-cluster distance (a) and the mean nearest-cluster distance (b)
for each sample. The Silhouette Coefficient for a sample is (b - a) / max(a, b).
To clarify, b is the distance between a sample and the nearest cluster
that the sample is not a part of. Note that Silhouette Coefficient is
only defined if number of labels is 2 <= n_labels <= n_samples - 1.
References:
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Peter J. Rousseeuw (1987). “Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis”. Computational and
Applied Mathematics 20: 53-65.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
max_points (int, optional): Maximum number of points to compute the Silhouette
score for. Silhouette score is a costly operation. Defaults to 1000.
Returns:
float: Confidence given by Silhouette score.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
if features.shape[0] > max_points:
idx = np.random.choice(
np.arange(features.shape[0]), max_points,
replace=False)
features = features[idx]
kmeans = KMeans(num_sources)
labels = kmeans.fit_predict(features)
confidence = silhouette_samples(features, labels)
return confidence.mean()
def loudness_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Computes the clusterability of the feature space by comparing the absolute
size of each cluster.
References:
Seetharaman, Prem, Gordon Wichern, Jonathan Le Roux, and Bryan Pardo.
“Bootstrapping Single-Channel Source Separation via Unsupervised Spatial
Clustering on Stereo Mixtures”. 44th International Conference on Acoustics,
Speech, and Signal Processing, Brighton, UK, May, 2019
Seetharaman, Prem. Bootstrapping the Learning Process for Computer Audition.
Diss. Northwestern University, 2019.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by size of smallest cluster.
"""
mask, _ = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
kmeans = KMeans(num_sources)
labels = kmeans.fit_predict(features)
source_shares = np.array(
[(labels == i).sum() for i in range(num_sources)]
).astype(float)
source_shares *= (1 / source_shares.sum())
confidence = source_shares.min()
return confidence
def whitened_kmeans_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Computes the clusterability in two steps:
1. Cluster the feature space using KMeans into assignments
2. Compute the Whitened K-Means loss between the features and the assignments.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by whitened k-means loss.
"""
mask, representation = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
weights = representation[mask].reshape(-1)
kmeans = KMeans(num_sources)
distances = kmeans.fit_transform(features)
assignments = (distances == distances.max(axis=-1, keepdims=True))
loss_func = loss.WhitenedKMeansLoss()
features = torch.from_numpy(features).unsqueeze(0).float()
assignments = torch.from_numpy(assignments).unsqueeze(0).float()
weights = torch.from_numpy(weights).unsqueeze(0).float()
loss_val = loss_func(features, assignments, weights).item()
upper_bound = embedding_size + num_sources
confidence = 1 - (loss_val / upper_bound)
return confidence
def dpcl_classic_confidence(audio_signal, features, num_sources, threshold=95,
**kwargs):
"""
Computes the clusterability in two steps:
1. Cluster the feature space using KMeans into assignments
2. Compute the classic deep clustering loss between the features and the assignments.
Args:
audio_signal (AudioSignal): AudioSignal object which will be used to compute
the mask over which to compute the confidence measure. This can be None, if
and only if ``representation`` is passed as a keyword argument to this
function.
features (np.ndarray): Numpy array containing the features to be clustered.
Should have the same dimensions as the representation.
n_sources (int): Number of sources to cluster the features into.
threshold (int, optional): Threshold by loudness. Points below the threshold are
excluded from being used in the confidence measure. Defaults to 95.
kwargs: Keyword arguments to `_get_loud_bins_mask`. Namely, representation can
go here as a keyword argument.
Returns:
float: Confidence given by deep clustering loss.
"""
mask, representation = _get_loud_bins_mask(threshold, audio_signal, **kwargs)
embedding_size = features.shape[-1]
features = features[mask].reshape(-1, embedding_size)
weights = representation[mask].reshape(-1)
kmeans = KMeans(num_sources)
distances = kmeans.fit_transform(features)
assignments = (distances == distances.max(axis=-1, keepdims=True))
loss_func = loss.DeepClusteringLoss()
features = torch.from_numpy(features).unsqueeze(0).float()
assignments = torch.from_numpy(assignments).unsqueeze(0).float()
weights = torch.from_numpy(weights).unsqueeze(0).float()
loss_val = loss_func(features, assignments, weights).item()
confidence = 1 - loss_val
return confidence
| 14,964 | 42.502907 | 89 | py |
nussl | nussl-master/nussl/ml/networks/separation_model.py | import os
import json
import inspect
import torch
from torch import nn
import numpy as np
from . import modules
from ... import __version__
import copy
def _remove_cache_from_tfms(transforms):
"""Helper function to remove cache from transforms.
"""
from ... import datasets
transforms = copy.deepcopy(transforms)
if isinstance(transforms, datasets.transforms.Compose):
for t in transforms.transforms:
if isinstance(t, datasets.transforms.Cache):
transforms.transforms.remove(t)
return transforms
def _prep_metadata(metadata):
"""Helper function for preparing metadata before saving a model.
"""
metadata = copy.deepcopy(metadata)
if 'transforms' in metadata:
metadata['transforms'] = _remove_cache_from_tfms(metadata['transforms'])
return metadata
class SeparationModel(nn.Module):
"""
SeparationModel takes a configuration file or dictionary that describes the model
structure, which is some combination of MelProjection, Embedding, RecurrentStack,
ConvolutionalStack, and other modules found in ``nussl.ml.networks.modules``.
References:
Hershey, J. R., Chen, Z., Le Roux, J., & Watanabe, S. (2016, March).
Deep clustering: Discriminative embeddings for segmentation and separation.
In Acoustics, Speech and Signal Processing (ICASSP),
2016 IEEE International Conference on (pp. 31-35). IEEE.
Luo, Y., Chen, Z., Hershey, J. R., Le Roux, J., & Mesgarani, N. (2017, March).
Deep clustering and conventional networks for music separation: Stronger together.
In Acoustics, Speech and Signal Processing (ICASSP),
2017 IEEE International Conference on (pp. 61-65). IEEE.
Args:
config: (str, dict) Either a config dictionary that defines the model and its
connections, or the path to a json file containing the dictionary. If the
latter, the path will be loaded and used.
Attributes:
config: (dict) The loaded config dictionary passed in upon init.
connections: (list) A list of strings that define the connections as given
in `config`.
output: (list)
See also:
ml.register_module to register your custom modules with SeparationModel.
Examples:
>>> import nussl
>>> config = nussl.ml.networks.builders.build_recurrent_dpcl(
>>> num_features=512, hidden_size=300, num_layers=3, bidirectional=True,
>>> dropout=0.3, embedding_size=20,
>>> embedding_activation=['sigmoid', 'unit_norm'])
>>>
>>> model = SeparationModel(config)
"""
def __init__(self, config, verbose=False):
super(SeparationModel, self).__init__()
if type(config) is str:
if os.path.exists(config):
with open(config, 'r') as f:
config = json.load(f)
else:
config = json.loads(config)
self._validate_config(config)
module_dict = {}
self.input = {}
self.exposed_module = None
for module_key in config['modules']:
module = config['modules'][module_key]
if 'class' in module:
if module['class'] in dir(modules):
class_func = getattr(modules, module['class'])
try:
module_snapshot = inspect.getsource(class_func)
except TypeError: # pragma: no cover
module_snapshot = (
"No module snapshot could be found. Did you define "
"your class in an interactive Python environment? "
"See https://bugs.python.org/issue12920 for more details."
)
else:
class_func = getattr(nn, module['class'])
module_snapshot = f'pytorch v{torch.__version__} builtin'
config['modules'][module_key]['module_snapshot'] = module_snapshot
if 'args' not in module:
module['args'] = {}
if 'expose_forward' in module:
if module['expose_forward']:
self.exposed_module = module_key
module_dict[module_key] = class_func(**module['args'])
else:
self.input[module_key] = module_key
self.layers = nn.ModuleDict(module_dict)
for key in self.layers:
setattr(self, key, self.layers[key])
self.connections = config['connections']
self.output_keys = config['output']
self.config = config
self.verbose = verbose
self.metadata = {
'config': config,
'nussl_version': __version__
}
@staticmethod
def _validate_config(config):
expected_keys = ['connections', 'modules', 'name', 'output']
got_keys = sorted(list(config.keys()))
if got_keys != expected_keys:
raise ValueError(
f"Expected keys {expected_keys}, got {got_keys}")
if not isinstance(config['modules'], dict):
raise ValueError("config['modules'] must be a dict!")
if not isinstance(config['connections'], list):
raise ValueError("config['connections'] must be a list!")
if not isinstance(config['output'], list):
raise ValueError("config['output'] must be a list!")
if not isinstance(config['name'], str):
raise ValueError("config['name'] must be a string!")
def forward(self, data=None, **kwargs):
"""
Args:
data: (dict) a dictionary containing the input data for the model.
Should match the input_keys in self.input.
Returns:
"""
data = {} if data is None else data
data.update(kwargs)
if self.exposed_module is not None:
layer = getattr(self, self.exposed_module)
return layer(**data)
if not all(name in list(data) for name in list(self.input)):
raise ValueError(
f'Not all keys present in data! Needs {", ".join(self.input)}')
output = {}
for connection in self.connections:
layer = getattr(self, connection[0])
input_data = []
kwargs = {}
if len(connection) == 2:
for c in connection[1]:
if isinstance(c, dict):
for key, val in c.items():
if val in output:
kwargs[key] = output[val]
elif val in data:
kwargs[key] = data[val]
elif key in data:
kwargs[key] = data[key]
else:
kwargs[key] = val
else:
input_data.append(output[c] if c in output else data[c])
_output = layer(*input_data, **kwargs)
added_keys = []
if isinstance(_output, dict):
for k in _output:
_key = f'{connection[0]}:{k}'
output[_key] = _output[k]
added_keys.append(_key)
elif isinstance(_output, tuple):
for i, val in enumerate(_output):
_key = f'{connection[0]}:{i}'
output[_key] = val
added_keys.append(_key)
else:
_key = connection[0]
output[_key] = _output
added_keys.append(_key)
if self.verbose:
input_shapes = []
for d in input_data:
if torch.is_tensor(d):
input_shapes.append(tuple(d.shape))
input_desc = ", ".join(map(str, input_shapes))
output_desc = ", ".join(
[f"'{k}': {tuple(output[k].shape)}" for k in added_keys])
stats = {}
for k in added_keys:
stats[k] = {
'min': output[k].detach().min().item(),
'max': output[k].detach().max().item(),
'mean': output[k].detach().mean().item(),
'std': output[k].detach().std().item(),
}
stats_desc = "\tStatistics:"
for o in stats:
stats_desc += f"\n\t\t{o}"
for k in stats[o]:
stats_desc += f"\n\t\t\t{k}: {stats[o][k]:.4f}"
print(
f"{connection[1]} -> {connection[0]} \n"
f"\tTook inputs: {input_desc} \n"
f"\tProduced {output_desc} \n"
f"{stats_desc}"
)
return {o: output[o] for o in self.output_keys}
@staticmethod
def load(location):
# Circular import
from ...core.migration import SafeModelLoader
safe_loader = SafeModelLoader()
model_dict = safe_loader.load(location, 'cpu')
metadata = model_dict['metadata']
model = SeparationModel(metadata['config'])
model.load_state_dict(model_dict['state_dict'])
return model, metadata
def save(self, location, metadata=None, train_data=None,
val_data=None, trainer=None):
"""
Saves a SeparationModel into a location into a dictionary with the
weights and model configuration.
Args:
location: (str) Where you want the model saved, as a path.
metadata: (dict) Additional metadata to save along with the model. By default,
model config and nussl version is saved as metadata.
train_data: (BaseDataset) Dataset used for training. Metadata will be extracted
from this object if it is passed into the save function, and saved
alongside the model.
val_data: (BaseDataset) Dataset used for validation. Metadata will be extracted
from this object if it is passed into the save function, and saved
alongside the model.
trainer: (ignite.Engine) Engine used for training. Metadata will be extracted
from this object if it is passed into the save function, and saved alongside
the model.
Returns:
(str): where the model was saved.
"""
save_dict = {
'state_dict': self.state_dict(),
'config': json.dumps(self.config)
}
metadata = metadata if metadata else {}
metadata.update(self.metadata)
if train_data is not None:
dataset_metadata = {
'stft_params': train_data.stft_params,
'sample_rate': train_data.sample_rate,
'num_channels': train_data.num_channels,
'train_dataset': _prep_metadata(train_data.metadata),
}
metadata.update(dataset_metadata)
try:
metadata['val_dataset'] = _prep_metadata(val_data.metadata)
except: # pragma: no cover
pass
if trainer is not None:
train_metadata = {
'trainer.state_dict': {
'epoch': trainer.state.epoch,
'epoch_length': trainer.state.epoch_length,
'max_epochs': trainer.state.max_epochs,
'output': trainer.state.output,
'metrics': trainer.state.metrics,
'seed': trainer.state.seed,
},
'trainer.state.epoch_history': trainer.state.epoch_history,
}
metadata.update(train_metadata)
save_dict = {**save_dict, 'metadata': metadata}
torch.save(save_dict, location)
return location
def __repr__(self):
output = super().__repr__()
num_parameters = 0
for p in self.parameters():
if p.requires_grad:
num_parameters += np.cumprod(p.size())[-1]
output += '\nNumber of parameters: %d' % num_parameters
return output
| 12,448 | 36.954268 | 92 | py |
nussl | nussl-master/nussl/ml/networks/modules/filter_bank.py | import nussl
from torch import nn
import torch
from .... import AudioSignal
class FilterBank(nn.Module):
"""
Base class for implementing short-time filter-bank style transformations
of an audio signal.
This class accepts two different tensors, as there are two modes it can
be called in:
- transform: This takes an audio signal and maps it to a spectral
representation by applying the internal filterbank.
- inverse: This takes a spectral representation and maps it back to
the audio domain.
There are two unimplemented functions in this class that must be
implemented by the subclass:
- ``get_transform_filters``: This should produce a filter bank that can be
applied to the audio signal, of shape ``(filter_length, num_filters)``. The
filter bank is applied by chunking the signal into overlapping segments
using ``nn.Unfold``, and performing a matrix multiplication of the chunks
(after permuting the dimensions appropriately) with the filter bank.
- ``get_inverse_filters``: This should produce a filter bank that maps the
spectral representation back to the audio domain. This is done by first
permuting the dimensions of the spectral representations appropriately,
then matrix multiplying the spectral representation with the filter bank.
Finally, the signal is resynthesized via overlap-add using ``nn.Fold``.
Windowing is applied to the signal, according to ``window_type``, which
can be any of the windows found in ``nussl.core.constants.ALL_WINDOWS``.
This can also be applied to multiple sources at once, if they are on the
last axis after all of the feature dimensions. If the number of features
in the data is greater than the expected number by one, then the last
dimension is assumed to be the source dimension. This dimension is then
mapped and merged with the batch dimension so the transforms can be applied
to all of the sources. Before returning the data, the source dimension is
moved back to the last dimension.
In the forward pass, keyword arguments can also be passed through. These
keyword arguments get passed through to ``apply_filter`` and ``get_filters``,
in case these should be conditioned on something during the forward pass.
Note:
The output dimensionality may not always match what is given by
something like scipy.stft (e.g. might be off by one in frames) for
some hop lengths! However, an attempt was made and for hop lengths
that are half the filter length or a quarter of the filter length, the
number of segments will match that of scipy.stft.
Args:
num_filters (int): Number of filters in both filterbanks.
filter_length (int, optional): Length of each filter. Defaults to None,
equal to num_filters.
hop_length (int, optional): Hop length between each filter.
Defaults to None, half of filter_length.
window_type (str, optional): Type of window to use. Defaults to
'sqrt_hann'.
dilation (int, optional): Dilation of nn.Unfold and nn.Fold. Could be
useful for implementing dilated convolutional frontends.
Defaults to 1.
direction (str, optional): In which direction to take the input data.
Either 'transform' or 'inverse'. Can also be set during the
forward pass. Defaults to 'transform'.
requires_grad (bool, optional): Whether to make the filterbank learnable
during backprop. Defaults to False.
"""
def __init__(self, num_filters, filter_length=None, hop_length=None,
window_type='sqrt_hann', dilation=1, direction='transform',
requires_grad=False):
super().__init__()
self.num_filters = num_filters
self.filter_length = (
num_filters
if filter_length is None
else filter_length
)
self.hop_length = (
self.filter_length // 2
if hop_length is None
else hop_length
)
self.direction = direction
self.requires_grad = requires_grad
self.dilation = dilation
self.window_type = window_type
self.output_length = None
self.register_buffer('window', self._get_window())
self.transform_filters = self.get_transform_filters()
self.inverse_filters = self.get_inverse_filters()
def _get_window(self):
window = AudioSignal.get_window(
self.window_type, self.filter_length
)
window = torch.from_numpy(window).float()
return window.reshape(1, 1, -1, 1)
def get_transform_filters(self):
raise NotImplementedError()
def get_inverse_filters(self):
raise NotImplementedError()
def get_filters(self):
filters = (
self.transform_filters
if self.direction == 'transform'
else self.inverse_filters
)
return filters
def apply_filter(self, data, **kwargs):
filters = self.get_filters(**kwargs)
data = data.transpose(-1, 2)
data = data @ filters
data = data.transpose(-1, 2)
return data
def transform(self, data, **kwargs):
ndim = data.ndim
if ndim > 3:
# move sources to the batch dimension
# then fix it later
num_sources = data.shape[-1]
data = data.permute(0, -1, 1, 2)
data = data.reshape(-1, *data.shape[2:])
self.original_length = data.shape[-1]
pad_extra = (
(-(data.shape[-1] - self.filter_length) % self.hop_length)
% self.filter_length
)
pad_tuple = (
self.filter_length // 2,
self.filter_length // 2 + pad_extra
)
data = nn.functional.pad(data, pad_tuple)
self.output_length = data.shape[-1]
num_batch, num_audio_channels, num_samples = data.shape
unfold = nn.Unfold(
kernel_size=(1, self.filter_length),
stride=(1, self.hop_length),
dilation=self.dilation,
padding=(0, 0)
)
data = data.reshape(
num_batch * num_audio_channels,
1, 1, num_samples
)
data = unfold(data)
data = data.view(
num_batch, num_audio_channels,
self.filter_length, -1
)
data = data.permute(0, 3, 2, 1)
data = data * self.window
data = self.apply_filter(data, **kwargs)
if ndim > 3:
# then we moved sources to the batch dimension
# we need to move it back before returning
data = data.reshape(
-1, num_sources, *data.shape[1:])
data = data.permute(0, 2, 3, 4, 1)
return data
def inverse(self, data, **kwargs):
ndim = data.ndim
if ndim > 4:
# move sources to the batch dimension
# then fix it later
num_sources = data.shape[-1]
data = data.permute(0, -1, 1, 2, 3)
data = data.reshape(-1, *data.shape[2:])
data = self.apply_filter(data, **kwargs)
data *= self.window.sum()
data = data * self.window
num_batch, sequence_length, num_features, num_audio_channels = (
data.shape
)
data = data.permute(0, 3, 2, 1)
data = data.reshape(-1, data.shape[2], data.shape[3])
fold = nn.Fold(
(1, self.output_length),
kernel_size=(1, self.filter_length),
stride=(1, self.hop_length),
dilation=self.dilation,
padding=(0, 0),
)
norm = data.new_ones(data.shape)
norm *= self.window.view(1, -1, 1) ** 2
data = fold(data)
norm = fold(norm)
norm[norm < 1e-10] = 1
data = data / norm
data = data.reshape(num_batch, num_audio_channels, -1)
boundary = self.filter_length // 2
data = data[..., boundary:-boundary]
data = data[..., :self.original_length]
if ndim > 4:
# then we moved sources to the batch dimension
# we need to move it back before returning
data = data.reshape(
-1, num_sources, num_audio_channels, data.shape[-1])
data = data.permute(0, 2, 3, 1)
return data
def forward(self, data, direction=None, **kwargs):
if direction is not None:
self.direction = direction
if self.direction == 'transform':
func = self.transform
elif self.direction == 'inverse':
func = self.inverse
return func(data, **kwargs)
class STFT(FilterBank):
"""
An implementation of STFT and iSTFT using nn.Unfold, nn.Fold, and matrix
multiplication with a Fourier basis.
The usual way to compute an STFT is to split the signal into overlapping
chunks, multiply each chunk with a window function, and then
apply an FFT to each chunk individually. Here, instead of taking the
FFT with something like ``torch.fft``, we instead use the matrix
formulation of FFT.
To resynthesize the signal, we use inverse windowing and the pseudoinverse
of the FFT matrix as our filterbank. We then use overlap/add and divide by
a normalization factor to reconstruct the signal. The usual error for this
implementation is on the order of 1e-7.
In the `transform` direction, this class returns the magnitude and phase
of the STFT, concatenated along a single dimension. Generally, the first
half of this is what you would operate on (the magnitudes), while the
second half you would keep around for reconstructing the signal later on.
"""
def apply_filter(self, data):
if self.direction == 'transform':
scale = torch.sqrt(1.0 / (self.window.sum() ** 2))
data = super().apply_filter(data)
data *= scale
eps = 1e-8
cutoff = 1 + self.filter_length // 2
real_part = data[..., :cutoff, :]
imag_part = data[..., cutoff:, :]
real_part[real_part.abs() <= eps] = eps
imag_part[imag_part.abs() <= eps] = eps
magnitude = torch.sqrt(
real_part ** 2 + imag_part ** 2)
phase = torch.atan2(imag_part, real_part)
data = torch.cat([magnitude, phase], dim=2)
elif self.direction == 'inverse':
cutoff = 1 + self.filter_length // 2
magnitude = data[..., :cutoff, :]
phase = data[..., cutoff:, :]
data = torch.cat(
[
magnitude * torch.cos(phase),
magnitude * torch.sin(phase)
],
dim=2
)
data = super().apply_filter(data)
return data
def _get_fft_basis(self):
fourier_basis = torch.fft.rfft(
torch.eye(self.filter_length)
)
cutoff = 1 + self.filter_length // 2
fourier_basis = torch.cat([
torch.real(fourier_basis[:, :cutoff]),
torch.imag(fourier_basis[:, :cutoff])
], dim=1)
return fourier_basis.float()
def get_transform_filters(self):
fourier_basis = self._get_fft_basis()
return nn.Parameter(fourier_basis, requires_grad=self.requires_grad)
def get_inverse_filters(self):
fourier_basis = self._get_fft_basis()
inverse_filters = torch.pinverse(
fourier_basis.unsqueeze(0)).squeeze(0)
return nn.Parameter(inverse_filters, requires_grad=self.requires_grad)
class LearnedFilterBank(FilterBank):
"""
Implements a simple learnable filter bank. The filter bank is completely
random on initialization and should be learned during backprop.
`requires_grad` is set to True regardless of what the parent class
init has.
"""
def get_transform_filters(self):
basis = nn.Parameter(
torch.ones(self.filter_length, self.num_filters),
requires_grad=True
)
nn.init.xavier_normal_(basis)
return basis
def get_inverse_filters(self):
basis = nn.Parameter(
torch.ones(self.num_filters, self.filter_length),
requires_grad=True
)
nn.init.xavier_normal_(basis)
return basis
| 12,694 | 36.670623 | 81 | py |
nussl | nussl-master/nussl/ml/networks/modules/blocks.py | import warnings
import torch
import torch.nn as nn
import librosa
import numpy as np
from torch.utils.checkpoint import checkpoint
class AmplitudeToDB(nn.Module):
"""
Takes a magnitude spectrogram and converts it to a log
amplitude spectrogram in decibels.
Args:
data (torch.Tensor): Magnitude spectrogram to convert to
log spectrogram.
ref (float): reference value. Defaults to 1.0.
amin (float): lowest possible value for numerical stability.
Defaults to 1e-8.
Returns:
torch.Tensor: log-magnitude energy in each bin in representation.
"""
def forward(self, data, ref=1.0, amin=1e-4):
data = data ** 2
amin = amin ** 2
ref = np.log10(np.maximum(amin, ref ** 2))
data = 10.0 * (torch.log10(torch.clamp(data, min=amin)) - ref)
return data
class ShiftAndScale(nn.Module):
"""
Takes in some data, shifts it by a learned bias and multiplies it
by a learned scalar.
Args:
torch.Tensor: shifted and scaled data
"""
def __init__(self, learn_scale=True, learn_shift=True):
super().__init__()
self.scale = nn.Parameter(
torch.ones(1), requires_grad=learn_scale)
self.shift = nn.Parameter(
torch.zeros(1), requires_grad=learn_shift)
def forward(self, data):
return self.scale * data + self.shift
class BatchNorm(nn.Module):
"""
Applies a batch norm layer. Defaults to using only 1 feature, commonly
used at the very beginning of the network to normalize the input spectrogram.
Data comes and goes as (nb, nt, nf, nc, ...). Inside this module, the data undergoes
the following procedure:
1. It is first reshaped to (nb, nf, nt, nc, ...)
2. Data is reshaped to (nb, nf, -1).
3. ``BatchNorm1d`` is applied with ``num_features`` to data.
4. Data is reshaped back to (nb, nt, nf, nc) and returned.
Args:
num_features (int): num_features argument to BatchNorm1d, defaults to 1.
feature_dim (int): which dimension the features that are being normalized are on.
Defaults to 2.
**kwargs (dict): additional keyword arguments that can be passed to BatchNorm2d.
Returns:
torch.Tensor: modified input data tensor with batch norm.
"""
def __init__(self, num_features=1, feature_dim=2, **kwargs):
super(BatchNorm, self).__init__()
self.num_features = num_features
self.feature_dim = feature_dim
self.add_module('batch_norm', nn.BatchNorm1d(self.num_features, **kwargs))
def forward(self, data):
data = data.transpose(self.feature_dim, 1)
shape = data.shape
new_shape = (shape[0], self.num_features, -1)
data = data.reshape(new_shape)
data = self.batch_norm(data)
data = data.reshape(shape)
data = data.transpose(self.feature_dim, 1)
return data
class InstanceNorm(nn.Module):
"""
Applies an instance norm layer. Defaults to using only 1 feature, commonly
used at the very beginning of the network to normalize the input spectrogram.
Data comes and goes as (nb, nt, nf, nc). Inside this module, the data undergoes
the following procedure:
1. It is first reshaped to (nb, nf, nt, nc)
2. Data is reshaped to (nb, nf, nt * nc).
3. ``InstanceNorm1d`` is applied with ``num_features`` to data.
4. Data is reshaped back to (nb, nt, nf, nc) and returned.
Args:
num_features (int): num_features argument to InstanceNorm1d, defaults to 1.
feature_dim (int): which dimension the features that are being normalized are on.
Defaults to 2.
**kwargs (dict): additional keyword arguments that are passed to InstanceNorm2d.
Returns:
torch.Tensor: modified input data tensor with instance norm applied.
"""
def __init__(self, num_features=1, feature_dim=2, **kwargs):
super(InstanceNorm, self).__init__()
self.num_features = num_features
self.feature_dim = feature_dim
self.add_module('instance_norm', nn.InstanceNorm1d(self.num_features, **kwargs))
def forward(self, data):
data = data.transpose(self.feature_dim, 1)
shape = data.shape
new_shape = (shape[0], self.num_features, -1)
data = data.reshape(new_shape)
data = self.instance_norm(data)
data = data.reshape(shape)
data = data.transpose(self.feature_dim, 1)
return data
class GroupNorm(nn.Module):
"""
Applies a group norm layer. Defaults to using 1 group and all feature, commonly
used at the very beginning of the network to normalize each feature of
the input spectrogram.
Data comes and goes as (nb, nt, nf, nc). Inside this module, the data undergoes
the following procedure:
1. It is first reshaped to (nb, nf, nt, nc)
2. Data is reshaped to (nb, nf, nt * nc).
3. ``GroupNorm`` is applied with ``num_features`` to data.
4. Data is reshaped back to (nb, nt, nf, nc) and returned.
Args:
num_features (int): num_features argument to GroupNorm.
num_groups (int): Number of groups, defaults to 1.
feature_dim (int): which dimension the features that are being normalized are on.
Defaults to 2.
**kwargs (dict): additional keyword arguments that are passed to InstanceNorm2d.
Returns:
torch.Tensor: modified input data tensor with instance norm applied.
"""
def __init__(self, num_features, num_groups=1, feature_dim=2, **kwargs):
super().__init__()
self.num_features = num_features
self.num_groups = num_groups
self.feature_dim = feature_dim
self.add_module('group_norm', nn.GroupNorm(
self.num_groups, self.num_features, **kwargs))
def forward(self, data):
data = data.transpose(self.feature_dim, 1)
shape = data.shape
new_shape = (shape[0], self.num_features, -1, 1)
data = data.reshape(new_shape)
data = self.group_norm(data)
data = data.reshape(shape)
data = data.transpose(self.feature_dim, 1)
return data
class Alias(nn.Module):
"""
Super simple module that just passes the data through without altering it, so
that the output of a model can be renamed in a SeparationModel.
"""
def forward(self, data):
return data
class LayerNorm(nn.Module):
"""
Applies an layer norm layer. Defaults to using only 1 feature, commonly
used at the very beginning of the network to normalize the input representation.
This wraps nn.LayerNorm with some quality of life improvements for dealing
specifically with audio representations.
Data comes and goes as (nb, nt, nf, nc). Inside this module, the data undergoes
the following procedure:
1. It is first transposed according to feature_dims, moving relevant features
to the end. For example, (nb, nt, nf, nc) -> (nb, nt, nc, nf), with
normalized_shape = (nf,).
2. ``LayerNorm`` is applied with ``normalized_shape`` to data.
3. Data is transposed and reshaped back to (nb, nt, nf, nc) and returned.
Args:
normalized_shape (int, list, or tuple): normalized_shape argument to LayerNorm.
feature_dims (list): The dimensions that the features are on, should be
the same length as normalized_shape. These features will be moved to the end
of the tensor in order, then layer norm is applied, then the features are
moved back to their original locations.
kwargs: additional keyword arguments that are passed to nn.LayerNorm.
Returns:
torch.Tensor: modified input data tensor with layer norm applied.
"""
def __init__(self, num_features, feature_dims, **kwargs):
super().__init__()
self.gamma = nn.Parameter(torch.ones(num_features))
self.beta = nn.Parameter(torch.zeros(num_features))
self.feature_dims = feature_dims
def forward(self, data):
# make a bunch of dummy dimensions to move
# things to the end
mean_dims = []
for d in self.feature_dims:
data = data.unsqueeze(-1).transpose(d, -1)
mean_dims.append(len(data.shape) - 1)
mean = data.mean(dim=mean_dims, keepdims=True)
var = data.var(dim=mean_dims, keepdims=True)
data = (data - mean) / ((var + 1e-8) ** 0.5)
data = data * self.gamma + self.beta
# move everything back and squeeze off extra
# dimensions
for d in self.feature_dims[::-1]:
data = data.transpose(-1, d).squeeze(-1)
return data
class MelProjection(nn.Module):
"""
MelProjection takes as input a time-frequency representation (e.g. a spectrogram, or a mask) and outputs a mel
project that can be learned or fixed. The initialization uses librosa to get a mel filterbank. Direction
controls whether it is a forward transform or the inverse transform (e.g. back to spectrogram).
Args:
sample_rate: (int) Sample rate of audio for computing the mel filters.
num_frequencies: (int) Number of frequency bins in input spectrogram.
num_mels: (int) Number of mel bins in output mel spectrogram. if num_mels < 0, this does nothing
other than clamping the output if clamp is True.
direction: (str) Which direction to go in (either 'forward' - to mel, or 'backward' - to frequencies).
Defaults to 'forward'.
clamp: (bool) Whether to clamp the output values of the transform between 0.0 and 1.0. Used for transforming
a mask in and out of the mel-domain. Defaults to False.
trainable: (bool) Whether the mel transform can be adjusted by the optimizer. Defaults to False.
normalize (bool): Whether or not to divide the mel filters by the sum of each column. Defaults to True.
"""
def __init__(self, sample_rate, num_frequencies, num_mels, direction='forward',
clamp=False, trainable=False, normalize=True):
super(MelProjection, self).__init__()
self.num_mels = num_mels
if direction not in ['backward', 'forward']:
raise ValueError("direction must be one of ['backward', 'forward']!")
self.direction = direction
self.clamp = clamp
if self.num_mels > 0:
shape = (
(num_frequencies, num_mels)
if self.direction == 'forward' else
(num_mels, num_frequencies)
)
self.add_module('transform', nn.Linear(*shape))
mel_filters = librosa.filters.mel(sample_rate, 2 * (num_frequencies - 1), num_mels)
if normalize:
mel_filters = (mel_filters.T / (mel_filters.sum(axis=1) + 1e-8)).T
filter_bank = (
mel_filters
if self.direction == 'forward'
else np.linalg.pinv(mel_filters)
)
for name, param in self.transform.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
if 'weight' in name:
param.data = torch.from_numpy(filter_bank).float()
param.requires_grad_(trainable)
def forward(self, data):
"""
Args:
data: Representation - shape:
(batch_size, sequence_length, num_frequencies or num_mels, num_sources)
Returns:
Mel-spectrogram or time-frequency representation of shape:
(batch_size, sequence_length, num_mels or num_frequencies, num_sources).
"""
if self.num_mels > 0:
data = data.transpose(2, -1)
data = self.transform(data)
data = data.transpose(2, -1)
if self.clamp:
data = data.clamp(0.0, 1.0)
return data.contiguous()
class Embedding(nn.Module):
"""
Maps output from an audio representation module (e.g. RecurrentStack,
DilatedConvolutionalStack) to an embedding space. The output shape is
(batch_size, sequence_length, num_features, embedding_size). The embeddings can
be passed through an activation function. If activation is 'softmax' or
'sigmoid', and embedding_size is equal to the number of sources, this module
can be used to implement a mask inference network (or a mask inference
head in a Chimera network setup).
Args:
num_features (int): Number of features being mapped for each frame.
Either num_frequencies, or if used with MelProjection, num_mels if using
RecurrentStack. Should be 1 if using DilatedConvolutionalStack.
hidden_size (int): Size of output from RecurrentStack (hidden_size) or
DilatedConvolutionalStack (num_filters). If RecurrentStack is bidirectional,
this should be set to 2 * hidden_size.
embedding_size (int): Dimensionality of embedding.
activation (list of str): Activation functions to be applied. Options
are 'sigmoid', 'tanh', 'softmax', 'relu', 'gate'. Unit normalization can be applied by
adding 'unit_norm' to the list (e.g. ['sigmoid', unit_norm']). Alternatively,
L1 normalization can be applied by adding 'l1_norm' to the list.
dim_to_embed (int): Which dimension of the input to apply the embedding to.
Defaults to -1 (the last dimension).
bias (bool): Whether or not to place a bias on the linear layer. Defaults to
True.
reshape (bool): Whether to reshape the output of the linear layer to look
like a time-frequency representation (nb, nt, nf, nc, ...). Defaults to
True.
"""
def __init__(self, num_features, hidden_size, embedding_size, activation,
num_audio_channels=1, dim_to_embed=-1, bias=True, reshape=True):
super(Embedding, self).__init__()
self.add_module(
'linear',
nn.Linear(
hidden_size,
num_features * num_audio_channels * embedding_size,
bias=bias
)
)
self.num_features = num_features
self.num_audio_channels = num_audio_channels
self.activation = activation
self.embedding_size = embedding_size
self.reshape = reshape
if 'gated_tanh' in self.activation:
self.embed_linear = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.Tanh()
)
self.embed_gate = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.Sigmoid()
)
if isinstance(dim_to_embed, int):
dim_to_embed = [dim_to_embed]
self.dim_to_embed = dim_to_embed
for name, param in self.linear.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
def forward(self, data):
"""
Args:
data: output from RecurrentStack or ConvolutionalStack. Shape is:
(num_batch, ..., hidden_size or num_filters)
Returns:
An embedding (with an optional activation) for each point in the
representation of shape (num_batch, ..., embedding_size).
"""
shape = list(data.shape)
_dims = []
for _dim in self.dim_to_embed:
# move each dimension to embed to end of tensor
if _dim == -1:
_dim = len(shape) - 1
data = data.transpose(_dim, -1)
_dims.append(_dim)
# the new shape doesn't have the embedded dimensions
shape = [
v for i, v in enumerate(shape)
if i not in _dims
]
shape = tuple(shape)
data = data.reshape(shape + (-1,))
if 'gated_tanh' in self.activation:
data = self.embed_linear(data) * self.embed_gate(data)
data = self.linear(data)
if self.reshape:
shape = shape + (
self.num_features, self.num_audio_channels, self.embedding_size,)
data = data.reshape(shape)
if 'sigmoid' in self.activation:
data = torch.sigmoid(data)
elif 'tanh' in self.activation:
data = torch.tanh(data)
elif 'relu' in self.activation:
data = torch.relu(data)
elif 'softmax' in self.activation:
data = torch.softmax(data, dim=-1)
if 'unit_norm' in self.activation:
data = nn.functional.normalize(data, dim=-1, p=2)
elif 'l1_norm' in self.activation:
data = nn.functional.normalize(data, dim=-1, p=1)
return data
class Mask(nn.Module):
"""
Takes a mask and applies it to a representation. Mask and representation must match
in their first 3 dimensions (nb, nt, nf). The last
dimension of the representation is unsqueezed to match the mask shape. So if there
are ``ns`` sources to separate, the mask shape will be (nb, nt, nf, ns), the
representation shape will be (nb, nt, nf).
Representation gets unsqueezed to (nb, nt, nf, 1). Multiplying with the mask
broadcasts, resulting in (nb, nt, nf, ns) output corresponding to each separated
source from the representation.
Detaches the representation before multiplying the mask element-wise.
"""
def __init__(self):
super(Mask, self).__init__()
def forward(self, mask, representation):
# add a source dimension
representation = representation.unsqueeze(-1).expand_as(mask)
return mask * representation.detach()
class Split(nn.Module):
"""
Splits a piece of data on a specified axis into a tuple of
specified splits. Defaults to splitting on the last axis.
Example:
.. code-block:: python
data = torch.randn(100, 10)
split = Split((3, 7), dim=-1)
split1, split2 = split(data)
split1 # has shape (100, 3)
split2 # has shape (100, 7)
"""
def __init__(self, split_sizes, dim=-1):
self.dim = dim
self.split_sizes = split_sizes
super().__init__()
def forward(self, data):
data = torch.split_with_sizes(
data, self.split_sizes, dim=self.dim)
return data
class Expand(nn.Module):
"""
Expand a seconod tensor to match the dimensions of the first
tensor, if possible. Just wraps `.expand_as`. Expands on
the last axis.
"""
def __init__(self):
super().__init__()
def forward(self, tensor_a, tensor_b):
if tensor_a.ndim < tensor_b.ndim:
raise ValueError("tensor_a should have more dimensions than tensor b!")
for _ in range(tensor_a.ndim - tensor_b.ndim):
tensor_b = tensor_b.unsqueeze(-1)
return tensor_b.expand_as(tensor_a)
class Concatenate(nn.Module):
"""
Concatenates two or more pieces of data together along a
specified dimension. Takes in a list of tensors and a
concatenation dimension.
"""
def __init__(self, dim=-1):
self.dim = dim
super(Concatenate, self).__init__()
def forward(self, *data):
return torch.cat(data, dim=self.dim)
class RecurrentStack(nn.Module):
"""
Creates a stack of RNNs used to process an audio sequence represented as
(sequence_length, num_features). With bidirectional = True, hidden_size = 600,
num_layers = 4, rnn_type='lstm', and dropout = .3, this becomes the
audio processor used in deep clustering networks, deep attractor networks, etc.
Note that batch_first is set to True here.
Args:
num_features: (int) Number of features being mapped for each frame.
Either num_frequencies, or if used with MelProjection, num_mels.
hidden_size: (int) Hidden size of recurrent stack for each layer.
num_layers: (int) Number of layers in stack.
bidirectional: (int) True makes this a BiLSTM or a BiGRU. Note that this
doubles the hidden size.
dropout: (float) Dropout between layers.
rnn_type: (str) LSTM ('lstm') or GRU ('gru').
"""
def __init__(self, num_features, hidden_size, num_layers, bidirectional, dropout,
rnn_type='lstm', batch_first=True, init_forget=True):
super(RecurrentStack, self).__init__()
if rnn_type not in ['lstm', 'gru']:
raise ValueError("rnn_type must be one of ['lstm', 'gru']!")
RNNClass = nn.LSTM if rnn_type == 'lstm' else nn.GRU
self.add_module(
'rnn', RNNClass(
num_features, hidden_size, num_layers, batch_first=batch_first,
bidirectional=bidirectional, dropout=dropout))
if init_forget:
for name, param in self.rnn.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0.0)
elif 'weight' in name:
nn.init.xavier_normal_(param)
for names in self.rnn._all_weights:
for name in filter(lambda nm: "bias" in nm, names):
bias = getattr(self.rnn, name)
n = bias.size(0)
start, end = n // 4, n // 2
bias.data[start:end].fill_(1.)
def forward(self, data):
"""
Args:
data: Audio representation to be processed. Should be of shape:
(num_batch, sequence_length, ...).
Returns:
Outputs the features after processing of the RNN. Shape is:
(num_batch, sequence_length, hidden_size or hidden_size*2 if
bidirectional=True)
"""
shape = data.shape
data = data.reshape(shape[0], shape[1], -1)
self.rnn.flatten_parameters()
data = self.rnn(data)[0]
return data
class ConvolutionalStack2D(nn.Module):
"""
Implements a stack of dilated convolutional layers for source separation from
the following papers:
Mobin, Shariq, Brian Cheung, and Bruno Olshausen. "Convolutional vs. recurrent
neural networks for audio source separation."
arXiv preprint arXiv:1803.08629 (2018). https://arxiv.org/pdf/1803.08629.pdf
Yu, Fisher, Vladlen Koltun, and Thomas Funkhouser. "Dilated residual networks."
Proceedings of the IEEE conference on computer vision and pattern recognition.
2017. https://arxiv.org/abs/1705.09914
Args:
in_channels (int): Number of channels in input
channels (list of int): Number of channels for each layer
dilations (list of ints or int tuples): Dilation rate for each layer. If
int, it is same in both height and width. If tuple, tuple is defined as
(height, width).
filter_shapes (list of ints or int tuples): Filter shape for each layer. If
int, it is same in both height and width. If tuple, tuple is defined as
(height, width).
residuals (list of bool): Whether or not to keep a residual connection at
each layer.
batch_norm (bool): Whether to use BatchNorm or not at each layer (default: True)
use_checkpointing (bool): Whether to use torch's checkpointing functionality
to reduce memory usage.
Raises:
ValueError -- All the input lists must be the same length.
"""
def __init__(self, in_channels, channels, dilations, filter_shapes, residuals,
batch_norm=True, use_checkpointing=False):
for x in [dilations, filter_shapes, residuals]:
if len(x) != len(channels):
raise ValueError(
f"All lists (channels, dilations, filters, residuals) should have"
f"the same length!"
)
super().__init__()
if any([d != 1 for d in dilations]):
warnings.warn(
"You specified a dilation != 1. Input size and output size are "
"not guaranteed to be the same! This is due to the lacking of "
"padding = 'same' in PyTorch."
)
self.dilations = dilations
self.filter_shapes = filter_shapes
self.residuals = residuals
self.batch_norm = batch_norm
self.padding = None
self.channels = channels
self.in_channels = in_channels
self.use_checkpointing = use_checkpointing
self.num_layers = len(channels)
self.layers = nn.ModuleList()
for i in range(len(channels)):
self.layers.append(self._make_layer(i))
def _make_layer(self, i):
convolution = nn.Conv2d(
in_channels=self.channels[i - 1] if i > 0 else self.in_channels,
out_channels=self.channels[i],
kernel_size=self.filter_shapes[i],
dilation=self.dilations[i],
padding=self.filter_shapes[i] // 2,
)
if i == len(self.channels) - 1:
layer = convolution
self.add_module(f'layer{i}', layer)
return layer
layer = nn.Sequential()
layer.add_module('conv', convolution)
if self.batch_norm:
batch_norm = nn.BatchNorm2d(self.channels[i])
layer.add_module('batch_norm', batch_norm)
layer.add_module('relu', nn.ReLU())
return layer
def layer_function(self, data, layer, previous_layer, i):
if self.use_checkpointing:
data = checkpoint(layer, data)
else:
data = layer(data)
if self.residuals[i]:
if previous_layer is not None:
data += previous_layer
previous_layer = data
return data, previous_layer
def forward(self, data):
"""
Data comes in as: [num_batch, sequence_length, num_frequencies, num_audio_channels]
We reshape it in the forward pass so that everything works to:
[num_batch, num_audio_channels, sequence_length, num_frequencies]
After this input is processed, the shape is then:
[num_batch, num_output_channels, sequence_length, num_frequencies]
We transpose again to make the shape:
[num_batch, sequence_length, num_frequencies, num_output_channels]
So it can be passed to an Embedding module.
"""
data = data.permute(0, 3, 1, 2)
previous_layer = None
for i in range(self.num_layers):
data, previous_layer = self.layer_function(
data, self.layers[i], previous_layer, i
)
data = data.permute(0, 2, 3, 1)
return data
def _find_and_instantiate_module(module_name, module_args):
# Check this file first
module = None
if module_name in globals():
module = globals()[module_name](**module_args)
# Then check torch.nn
elif module_name in dir(nn):
module_func = getattr(nn, module_name)
module = module_func(**module_args)
return module
class DualPathBlock(nn.Module):
def __init__(self, n_features, hidden_size,
intra_processor, inter_processor,
**kwargs):
super().__init__()
# Saving needed stuff
self.n_features = n_features
self.hidden_size = hidden_size
# Projections down and up
self.intra_fc = nn.Linear(
hidden_size, n_features)
self.inter_fc = nn.Linear(
hidden_size, n_features)
# Inter/intra chunk processors
if isinstance(inter_processor, dict):
inter_processor = _find_and_instantiate_module(
inter_processor['class'], inter_processor['args'])
if isinstance(intra_processor, dict):
intra_processor = _find_and_instantiate_module(
intra_processor['class'], intra_processor['args'])
if any([intra_processor is None, inter_processor is None]):
raise ValueError(
f"Tried to instantiate inter/intra processor, but failed!")
self.intra_processor = intra_processor
self.inter_processor = inter_processor
# Normalization + activation layers
self.intra_norm = nn.GroupNorm(1, n_features, eps=1e-8)
self.inter_norm = nn.GroupNorm(1, n_features, eps=1e-8)
def apply_norm(self, data, norm_layer, shape):
in_shape = data.shape
data = data.reshape(*shape)
# data is (batch_size, n_chunks, chunk_size, n_features)
data = data.permute(0, 3, 1, 2)
# now (nb, nf, nch, ncs)
data = norm_layer(data)
data = data.permute(0, 2, 3, 1)
data = data.reshape(*in_shape)
return data
def forward(self, data):
# data comes in as (batch_size, n_chunks, chunk_size, n_features)
batch_size, n_chunks, chunk_size, n_features = data.shape
data = data.reshape(batch_size * n_chunks, chunk_size, data.shape[-1])
output = data # Skip connection -------->--------->
# Apply intra-chunk processor # |
data = self.intra_processor(data) # |
data = self.intra_fc(data) # |
data = self.apply_norm( # |
data, self.intra_norm, ( # |
batch_size, n_chunks, chunk_size, -1 # |
) # |
) # |
data = output + data # <---------------<---------<
# Reshape so chunks are on other side
data = data.reshape(batch_size, n_chunks, chunk_size, -1)
data = data.transpose(2, 1)
data = data.reshape(batch_size * chunk_size, n_chunks, -1)
output = data # Skip connection -------->--------->
# Apply inter-chunk processor # |
data = self.inter_processor(data) # |
data = self.inter_fc(data) # |
data = self.apply_norm( # |
data, self.inter_norm, ( # |
batch_size, chunk_size, n_chunks, -1 # |
) # |
) # |
data = output + data # <---------------<---------<
# Reshape back to original
data = data.reshape(batch_size, chunk_size, n_chunks, -1)
data = data.transpose(2, 1)
return data
class DualPath(nn.Module):
"""
Implements a dual path processor, which takes a sequence and splits it
into overlapping chunks. Each chunk is then processed by a specified
class which will process each chunk individually, and also process
the chunks in sequence. The output of the processor then resynthesizes
something with the input shape by doing an overlap/add procedure.
Args:
num_layers (int): Number of layers.
chunk_size (int): Chunk size of intra-processing.
hop_size (int): Hop length between chunks.
layer_class (object): Class to use to process the data. Defaults to
DualPathBlock. Keyword arguments can be specified.
skip_connection (bool, optional): Whether to put skip connections between
each layer. Defaults to False.
kwargs: Keyword arguments to the layer class.
"""
def __init__(self, num_layers, chunk_size, hop_size, in_features,
bottleneck_size, skip_connection=False, **kwargs):
super().__init__()
self.chunk_size = chunk_size
self.hop_size = hop_size
self.layers = nn.ModuleList()
for i in range(num_layers):
_block = DualPathBlock(n_features=bottleneck_size, **kwargs)
self.layers.append(_block)
self.skip_connection = skip_connection
self.prelu = nn.PReLU()
self.bottleneck = nn.Linear(in_features, bottleneck_size)
self.bottleneck_norm = nn.GroupNorm(1, in_features)
self.inv_bottleneck = nn.Linear(
bottleneck_size, in_features)
def forward(self, data):
fold = nn.Fold(
(1, data.shape[1]),
(1, self.chunk_size),
stride=(1, self.hop_size)
)
unfold = nn.Unfold(
(1, self.chunk_size), stride=(1, self.hop_size)
)
batch_size, sequence_length, n_features, n_channels = (
data.shape
)
# extract chunks
data = data.transpose(1, 2)
data = self.bottleneck_norm(data)
data = data.transpose(1, -1)
data = self.bottleneck(data)
data = data.permute(0, 2, 3, 1)
data = data.reshape(
batch_size, sequence_length, 1, -1)
data = data.transpose(3, 1)
data = unfold(data)
# unfold makes the data (batch_size, bottleneck_size * chunk_size, n_chunks)
n_chunks = data.shape[-1]
data = data.reshape(batch_size, -1, self.chunk_size, n_chunks)
data = data.transpose(3, 1)
# data is now (batch_size, n_chunks, chunk_size, bottleneck_size)
# process
output = data # Skip connection --->
for layer in self.layers: # |
data = layer(output) # |
if self.skip_connection: # |
output += data # <----<----<
else:
output = data
data = output
data = self.prelu(data)
# data is still (batch_size, n_chunks, chunk_size, bottleneck_size)
data = self.inv_bottleneck(data)
# data is now (batch_size, n_chunks, chunk_size, in_features)
#
# resynthesize with overlap/add
data = data.transpose(1, 3)
data = data.reshape(-1, n_features * self.chunk_size, n_chunks)
data = fold(data)
data = data.transpose(3, 1)
data = data.reshape(
batch_size, sequence_length, n_features, n_channels)
return data | 34,373 | 38.239726 | 116 | py |
nussl | nussl-master/nussl/ml/unfold/gaussian_mixture.py | import torch
import torch.nn as nn
import numpy as np
import gpytorch
class GaussianMixtureTorch(nn.Module):
def __init__(self, n_components, n_iter=5, covariance_type='diag',
covariance_init=1.0, reg_covar=1e-4):
"""
Initializes a Gaussian mixture model with n_clusters.
Args:
n_components (int): Number of components.
n_iter (int, optional): Number of EM iterations. Defaults to 5.
covariance_type (str, optional): Covariance type.
String describing the type of covariance parameters to use.
Must be one of:
'full'
each component has its own general covariance matrix (this case
is harder to fit in EM than the others and isn't recommended at
the moment
'tied'
all components share the same general covariance matrix
'diag'
each component has its own diagonal covariance matrix
'spherical'
each component has its own single variance
Defaults to 'diag'.
covariance_init (float, optional): Initial covariance for all
features and all clusters. Defaults to 0.1.
reg_covar (float, optional): Regularization amount to add to
covariance matrix.
"""
self.n_components = n_components
self.n_iter = n_iter
self.covariance_type = covariance_type
self.covariance_init = covariance_init
self.reg_covar = reg_covar
super().__init__()
def _m_step(self, X, resp):
"""
Takes a maximization step on the data X.
Args:
X (torch.Tensor): Data, shape (n_batch, n_samples, n_features)
resp (torch.Tensor): Responsibilities each Gaussian has for
each sample. (n_batch, n_samples, n_components)
"""
n_batch, n_samples, n_features = X.shape
_, _, n_components = resp.shape
resp = resp.view(n_batch, n_samples, n_components, 1)
X = X.view(n_batch, n_samples, 1, n_features)
# update means
_top = (resp * X).sum(dim=1, keepdims=True)
_bottom = resp.sum(dim=1, keepdims=True)
means = _top / _bottom
# update covariance
diff = X - means
diff = diff * resp
covariance = diff.permute(0, 2, 3, 1) @ diff.permute(0, 2, 1, 3)
covariance = covariance.unsqueeze(1) / _bottom[..., None]
covariance = covariance.squeeze(1).clamp(min=self.reg_covar)
covariance = self._enforce_covariance_type(covariance)
# update prior
prior = _bottom
return means.squeeze(1), covariance, prior
@staticmethod
def _e_step(X, means, covariance):
"""
Takes the expectation of X. Returns the log probability of X under each
Gaussian in the mixture model.
Args:
X (torch.Tensor): Data, shape (n_batch, n_samples, n_features)
means (torch.Tensor): Means, shape (n_batch, n_components, n_features)
covariance (torch.Tensor): (n_batch, n_components, n_features, n_features)
"""
n_batch, n_samples, n_features = X.shape
_, n_components, _ = means.shape
X = X.view(n_batch, n_samples, 1, n_features)
means = means.view(n_batch, 1, n_components, n_features)
covariance = covariance.view(
n_batch, 1, n_components, n_features, n_features)
mvn = gpytorch.distributions.MultivariateNormal(
means, covariance_matrix=covariance
)
log_prob = mvn.log_prob(X)
prob = torch.exp(log_prob) + 1e-8
resp = nn.functional.normalize(prob, p=1, dim=-1)
return resp, log_prob
def _enforce_covariance_type(self, covariance):
n_features = covariance.shape[-1]
diag_mask = torch.eye(n_features, device=covariance.device)
diag_mask = diag_mask.reshape(1, 1, n_features, n_features)
covariance = covariance * diag_mask
if 'spherical' in self.covariance_type:
covariance[..., :, :] = (
covariance.mean(dim=[-2, -1], keepdims=True)
)
covariance = covariance * diag_mask
if 'tied' in self.covariance_type:
covariance[..., :, :] = covariance.mean(
dim=1, keepdims=True
)
return covariance
def init_params(self, X, means=None, covariance=None):
"""
Initializes Gaussian parameters.
Args:
X (torch.Tensor): Data, shape (n_batch, n_samples, n_features)
means (torch.Tensor): Means, shape (n_batch, n_components, n_features). Defaults
to None.
covariance (torch.Tensor): (n_batch, n_components, n_features, n_features)
or (n_batch, n_components, n_features).
Defaults to None.
"""
if means is None:
sampled = X.new(
X.shape[0], self.n_components).random_(0, X.shape[1])
sampled += X.new(np.arange(0, X.shape[0])).unsqueeze(
1).expand(-1, sampled.shape[1]) * X.shape[1]
sampled = sampled.long()
means = torch.index_select(
X.view(-1, X.shape[-1]), 0, sampled.view(-1)).view(
X.shape[0], sampled.shape[-1], -1)
if covariance is None:
covariance = X.new(
X.shape[0], self.n_components, X.shape[-1]).fill_(
self.covariance_init).clone()
if len(covariance.shape) < 4:
covariance = covariance.unsqueeze(-1).expand(-1, -1, -1, X.shape[-1])
covariance = self._enforce_covariance_type(covariance.clone())
return means, covariance
def forward(self, data, means=None, covariance=None):
"""
Does a forward pass of the GMM.
Args:
data (torch.Tensor): Data, shape is (n_batch, ..., n_features)
means (torch.Tensor): Means, shape (n_batch, n_components, n_features).
Defaults to None.
covariance (torch.Tensor): (n_batch, n_components, n_features, n_features)
or (n_batch, n_components, n_features).
Returns:
dict: Containing keys 'resp', 'log_prob', 'means', 'covariance', 'prior'.
"""
shape = data.shape
data = data.view(shape[0], -1, shape[-1])
means, covariance = self.init_params(data, means, covariance)
resp = log_prob = prior = None
for i in range(self.n_iter):
resp, log_prob = self._e_step(data, means, covariance)
means, covariance, prior = self._m_step(data, resp)
return {
'resp': resp.view(shape[:-1] + (-1,)),
'log_prob': log_prob.view(shape[:-1] + (-1,)),
'means': means,
'covariance': covariance,
'prior': prior
}
| 7,016 | 36.524064 | 92 | py |
nussl | nussl-master/nussl/ml/unfold/__init__.py | """
Deep unfolding is a type of architecture where an optimization
process like clustering, non-negative matrix factorization and
other EM style algorithms (anything with update functions) are
unfolded as layers in a neural network. In practice this results
in having the operations available to do on torch Tensors. This
submodule collects implementations that allow one to unfold different
optimization processes in a neural network.
"""
from .gaussian_mixture import GaussianMixtureTorch
| 492 | 40.083333 | 69 | py |
nussl | nussl-master/nussl/ml/train/loss.py | from itertools import permutations, combinations
import torch
import torch.nn as nn
class L1Loss(nn.L1Loss):
DEFAULT_KEYS = {'estimates': 'input', 'source_magnitudes': 'target'}
class MSELoss(nn.MSELoss):
DEFAULT_KEYS = {'estimates': 'input', 'source_magnitudes': 'target'}
class KLDivLoss(nn.KLDivLoss):
DEFAULT_KEYS = {'estimates': 'input', 'source_magnitudes': 'target'}
class SISDRLoss(nn.Module):
"""
Computes the Scale-Invariant Source-to-Distortion Ratio between a batch
of estimated and reference audio signals. Used in end-to-end networks.
This is essentially a batch PyTorch version of the function
``nussl.evaluation.bss_eval.scale_bss_eval`` and can be used to compute
SI-SDR or SNR.
Args:
scaling (bool, optional): Whether to use scale-invariant (True) or
signal-to-noise ratio (False). Defaults to True.
return_scaling (bool, optional): Whether to only return the scaling
factor that the estimate gets scaled by relative to the reference.
This is just for monitoring this value during training, don't actually
train with it! Defaults to False.
reduction (str, optional): How to reduce across the batch (either 'mean',
'sum', or none). Defaults to 'mean'.
zero_mean (bool, optional): Zero mean the references and estimates before
computing the loss. Defaults to True.
clip_min (float, optional): The minimum possible loss value. Helps network
to not focus on making already good examples better. Defaults to None.
"""
DEFAULT_KEYS = {'audio': 'estimates', 'source_audio': 'references'}
def __init__(self, scaling=True, return_scaling=False, reduction='mean',
zero_mean=True, clip_min=None):
self.scaling = scaling
self.reduction = reduction
self.zero_mean = zero_mean
self.return_scaling = return_scaling
self.clip_min = clip_min
super().__init__()
def forward(self, estimates, references):
eps = 1e-8
# num_batch, num_samples, num_sources
_shape = references.shape
references = references.reshape(-1, _shape[-2], _shape[-1])
estimates = estimates.reshape(-1, _shape[-2], _shape[-1])
# samples now on axis 1
if self.zero_mean:
mean_reference = references.mean(dim=1, keepdim=True)
mean_estimate = estimates.mean(dim=1, keepdim=True)
else:
mean_reference = 0
mean_estimate = 0
_references = references - mean_reference
_estimates = estimates - mean_estimate
references_projection = (_references ** 2).sum(dim=-2) + eps
references_on_estimates = (_estimates * _references).sum(dim=-2) + eps
scale = (
(references_on_estimates / references_projection).unsqueeze(1)
if self.scaling else 1)
e_true = scale * _references
e_res = _estimates - e_true
signal = (e_true ** 2).sum(dim=1)
noise = (e_res ** 2).sum(dim=1)
sdr = -10 * torch.log10(signal / noise + eps)
if self.clip_min is not None:
sdr = torch.clamp(sdr, min=self.clip_min)
if self.reduction == 'mean':
sdr = sdr.mean()
elif self.reduction == 'sum':
sdr = sdr.sum()
if self.return_scaling:
return scale
return sdr
class DeepClusteringLoss(nn.Module):
"""
Computes the deep clustering loss with weights. Equation (7) in [1].
References:
[1] Wang, Z. Q., Le Roux, J., & Hershey, J. R. (2018, April).
Alternative Objective Functions for Deep Clustering.
In Proc. IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP).
"""
DEFAULT_KEYS = {
'embedding': 'embedding',
'ideal_binary_mask': 'assignments',
'weights': 'weights'
}
def __init__(self):
super(DeepClusteringLoss, self).__init__()
def forward(self, embedding, assignments, weights):
batch_size = embedding.shape[0]
embedding_size = embedding.shape[-1]
num_sources = assignments.shape[-1]
weights = weights.view(batch_size, -1, 1)
# make everything unit norm
embedding = embedding.reshape(batch_size, -1, embedding_size)
embedding = nn.functional.normalize(embedding, dim=-1, p=2)
assignments = assignments.view(batch_size, -1, num_sources)
assignments = nn.functional.normalize(assignments, dim=-1, p=2)
norm = (((weights.reshape(batch_size, -1)) ** 2).sum(dim=1) ** 2) + 1e-8
assignments = weights * assignments
embedding = weights * embedding
vTv = ((embedding.transpose(2, 1) @ embedding) ** 2).reshape(
batch_size, -1).sum(dim=-1)
vTy = ((embedding.transpose(2, 1) @ assignments) ** 2).reshape(
batch_size, -1).sum(dim=-1)
yTy = ((assignments.transpose(2, 1) @ assignments) ** 2).reshape(
batch_size, -1).sum(dim=-1)
loss = (vTv - 2 * vTy + yTy) / norm
return loss.mean()
class WhitenedKMeansLoss(nn.Module):
"""
Computes the whitened K-Means loss with weights. Equation (6) in [1].
References:
[1] Wang, Z. Q., Le Roux, J., & Hershey, J. R. (2018, April).
Alternative Objective Functions for Deep Clustering.
In Proc. IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP).
"""
DEFAULT_KEYS = {
'embedding': 'embedding',
'ideal_binary_mask': 'assignments',
'weights': 'weights'
}
def __init__(self):
super(WhitenedKMeansLoss, self).__init__()
def forward(self, embedding, assignments, weights):
batch_size = embedding.shape[0]
embedding_size = embedding.shape[-1]
num_sources = assignments.shape[-1]
weights = weights.view(batch_size, -1, 1)
# make everything unit norm
embedding = embedding.reshape(batch_size, -1, embedding_size)
embedding = nn.functional.normalize(embedding, dim=-1, p=2)
assignments = assignments.view(batch_size, -1, num_sources)
assignments = nn.functional.normalize(assignments, dim=-1, p=2)
assignments = weights * assignments
embedding = weights * embedding
embedding_dim_identity = torch.eye(
embedding_size, device=embedding.device).float()
source_dim_identity = torch.eye(
num_sources, device=embedding.device).float()
vTv = (embedding.transpose(2, 1) @ embedding)
vTy = (embedding.transpose(2, 1) @ assignments)
yTy = (assignments.transpose(2, 1) @ assignments)
ivTv = torch.inverse(vTv + embedding_dim_identity)
iyTy = torch.inverse(yTy + source_dim_identity)
ivTv_vTy = ivTv @ vTy
vTy_iyTy = vTy @ iyTy
# tr(AB) = sum(A^T o B)
# where o denotes element-wise product
# this is the trace trick
# http://andreweckford.blogspot.com/2009/09/trace-tricks.html
trace = (ivTv_vTy * vTy_iyTy).sum()
D = (embedding_size + num_sources) * batch_size
loss = D - 2 * trace
return loss / batch_size
class PermutationInvariantLoss(nn.Module):
"""
Computes the Permutation Invariant Loss (PIT) [1] by permuting the estimated
sources and the reference sources. Takes the best permutation and only backprops
the loss from that.
For when you're trying to match the estimates to the sources but you don't
know the order in which your model outputs the estimates.
References:
[1] Yu, Dong, Morten Kolbæk, Zheng-Hua Tan, and Jesper Jensen.
"Permutation invariant training of deep models for speaker-independent
multi-talker speech separation." In 2017 IEEE International Conference on
Acoustics, Speech and Signal Processing (ICASSP), pp. 241-245. IEEE, 2017.
"""
DEFAULT_KEYS = {'estimates': 'estimates', 'source_magnitudes': 'targets'}
def __init__(self, loss_function):
super(PermutationInvariantLoss, self).__init__()
self.loss_function = loss_function
self.loss_function.reduction = 'none'
def forward(self, estimates, targets):
num_batch = estimates.shape[0]
num_sources = estimates.shape[-1]
estimates = estimates.reshape(num_batch, -1, num_sources)
targets = targets.reshape(num_batch, -1, num_sources)
losses = []
for p in permutations(range(num_sources)):
_targets = targets[..., list(p)]
loss = self.loss_function(estimates, _targets)
loss = loss.mean(dim=-1)
losses.append(loss)
losses = torch.stack(losses, dim=-1)
losses = torch.min(losses, dim=-1)[0]
loss = torch.mean(losses)
return loss
class CombinationInvariantLoss(nn.Module):
"""
Variant on Permutation Invariant Loss where instead a combination of the
sources output by the model are used. This way a model can output more
sources than there are in the ground truth. A subset of the output sources
will be compared using Permutation Invariant Loss with the ground truth
estimates.
For when you're trying to match the estimates to the sources but you don't
know the order in which your model outputs the estimates AND you are
outputting more estimates then there are sources.
"""
DEFAULT_KEYS = {'estimates': 'estimates', 'source_magnitudes': 'targets'}
def __init__(self, loss_function):
super(CombinationInvariantLoss, self).__init__()
self.loss_function = loss_function
self.loss_function.reduction = 'none'
def forward(self, estimates, targets):
num_batch = estimates.shape[0]
num_target_sources = targets.shape[-1]
num_estimate_sources = estimates.shape[-1]
estimates = estimates.reshape(num_batch, -1, num_estimate_sources)
targets = targets.reshape(num_batch, -1, num_target_sources)
losses = []
for c in combinations(range(num_estimate_sources), num_target_sources):
_estimates = estimates[..., list(c)]
for p in permutations(range(num_target_sources)):
_targets = targets[..., list(p)]
loss = self.loss_function(_estimates, _targets)
loss = loss.mean(dim=-1)
losses.append(loss)
losses = torch.stack(losses, dim=-1)
losses = torch.min(losses, dim=-1)[0]
loss = torch.mean(losses)
return loss
| 10,749 | 35.815068 | 84 | py |
nussl | nussl-master/nussl/ml/train/trainer.py | import os
import logging
import copy
import time
from datetime import timedelta
from ignite.engine import Events, Engine, EventEnum
from ignite.handlers import Timer
from ignite.contrib.handlers import ProgressBar
from ignite.metrics import RunningAverage
from torch.utils.tensorboard import SummaryWriter
import torch
from torch import nn
import numpy as np
class ValidationEvents(EventEnum):
"""
Events based on validation running
"""
VALIDATION_STARTED = 'validation_started'
VALIDATION_COMPLETED = 'validation_completed'
class BackwardsEvents(EventEnum):
"""
Events based on validation running
"""
BACKWARDS_COMPLETED = 'backwards_completed'
def cache_dataset(dataset):
"""
Runs through an entire dataset and caches it if there nussl.datasets.transforms.Cache
is in dataset.transform. If there is no caching, or dataset.cache_populated = True,
then this function just iterates through the dataset and does nothing.
This function can also take a `torch.util.data.DataLoader` object wrapped around
a `nussl.datasets.BaseDataset` object.
Args:
dataset (nussl.datasets.BaseDataset): Must be a subclass of
`nussl.datasets.BaseDataset`.
"""
def dummy_process(engine, data):
pass
cache = Engine(dummy_process)
ProgressBar().attach(cache)
cache.run(dataset)
dataset.cache_populated = True
def create_train_and_validation_engines(train_func, val_func=None, device='cpu'):
"""
Helper function for creating an ignite Engine object with helpful defaults.
This sets up an Engine that has four handlers attached to it:
- prepare_batch: before a batch is passed to train_func or val_func, this
function runs, moving every item in the batch (which is a dictionary) to
the appropriate device ('cpu' or 'cuda').
- book_keeping: sets up some dictionaries that are used for bookkeeping so one
can easily track the epoch and iteration losses for both training and
validation.
- add_to_iter_history: records the iteration, epoch, and past iteration losses
into the dictionaries set up by book_keeping.
- clear_iter_history: resets the current iteration history of losses after moving
the current iteration history into past iteration history.
Args:
train_func (func): Function that provides the closure for training for
a single batch.
val_func (func, optional): Function that provides the closure for
validating a single batch. Defaults to None.
device (str, optional): Device to move tensors to. Defaults to 'cpu'.
"""
# Set up engines for training and validation
trainer = Engine(train_func)
trainer.register_events(*ValidationEvents)
trainer.register_events(*BackwardsEvents)
validator = None if val_func is None else Engine(val_func)
# Before a batch starts, the items should be float and moved to the
# correct device, for both training and validation. Checks to make
# sure "cuda" is available if user requested cuda.
device = device if torch.cuda.is_available() else 'cpu'
device = torch.device(device)
def prepare_batch(engine):
batch = engine.state.batch
for key in batch:
if torch.is_tensor(batch[key]):
batch[key] = batch[key].float().to(device)
engine.state.batch = batch
# Set up stuff for bookkeeping as training progresses.
def book_keeping(engine):
engine.state.epoch_history = {}
engine.state.iter_history = {}
engine.state.past_iter_history = {}
def add_to_iter_history(engine):
for key in engine.state.output:
if key not in engine.state.iter_history:
engine.state.iter_history[key] = []
if key not in engine.state.past_iter_history:
engine.state.past_iter_history[key] = []
engine.state.iter_history[key].append(
engine.state.output[key]
)
engine.state.past_iter_history[key].append(
engine.state.iter_history[key]
)
def clear_iter_history(engine):
engine.state.iter_history = {}
trainer.add_event_handler(
Events.ITERATION_STARTED, prepare_batch)
trainer.add_event_handler(
Events.STARTED, book_keeping)
trainer.add_event_handler(
Events.ITERATION_COMPLETED, add_to_iter_history)
trainer.add_event_handler(
Events.EPOCH_STARTED, clear_iter_history)
if validator is not None:
validator.add_event_handler(
Events.ITERATION_STARTED, prepare_batch)
validator.add_event_handler(
Events.STARTED, book_keeping)
validator.add_event_handler(
Events.ITERATION_COMPLETED, add_to_iter_history)
validator.add_event_handler(
Events.EPOCH_STARTED, clear_iter_history)
return trainer, validator
def add_validate_and_checkpoint(output_folder, model, optimizer, train_data, trainer,
val_data=None, validator=None, save_by_epoch=None):
"""
This adds the following handler to the trainer:
- validate_and_checkpoint: this runs the validator on the validation dataset
(``val_data``) using a defined validation process function ``val_func``.
These are optional. If these are not provided, then no validator is run
and the model is simply checkpointed. The model is always saved to
``{output_folder}/checkpoints/latest.model.pth``. If the model is also the
one with the lowest validation loss, then it is *also* saved to
``{output_folder}/checkpoints/best.model.pth. This is attached to
``Events.EPOCH_COMPLETED`` on the trainer. After completion, it fires a
``ValidationEvents.VALIDATION_COMPLETED`` event.
Args:
model (torch.nn.Module): Model that is being trained (typically a SeparationModel).
optimizer (torch.optim.Optimizer): Optimizer being used to train.
train_data (BaseDataset): dataset that is being used to train the model. This is to
save additional metadata information alongside the model checkpoint such as the
STFTParams, dataset folder, length, list of transforms, etc.
trainer (ignite.Engine): Engine for trainer
validator (ignite.Engine, optional): Engine for validation.
Defaults to None.
val_data (torch.utils.data.Dataset, optional): The validation data.
Defaults to None.
save_by_epoch (int, optional): Save the model by epoch number. If this is set to
N, then every Nth model will be saved in the format epoch{N}.model.pth.
"""
# When the trainer finishes an epoch, it should validate and save
# the model.
@trainer.on(Events.EPOCH_COMPLETED)
def validate_and_checkpoint(trainer):
trainer.fire_event(ValidationEvents.VALIDATION_STARTED)
is_best = True
if validator is not None:
validator.run(val_data)
for key in validator.state.iter_history:
_key = f"validation/{key}"
if _key not in trainer.state.epoch_history:
trainer.state.epoch_history[_key] = []
trainer.state.epoch_history[_key].append(np.mean(
validator.state.iter_history[key]
))
if 'validation/loss' in trainer.state.epoch_history:
cur = trainer.state.epoch_history['validation/loss'][-1]
is_best = cur == min(trainer.state.epoch_history['validation/loss'])
for key in trainer.state.iter_history:
_key = f"train/{key}"
if _key not in trainer.state.epoch_history:
trainer.state.epoch_history[_key] = []
trainer.state.epoch_history[_key].append(np.mean(
trainer.state.iter_history[key]
))
output_paths = [os.path.join(
output_folder, 'checkpoints', 'latest.model.pth')]
if is_best:
output_paths.append(os.path.join(
output_folder, 'checkpoints', 'best.model.pth'
))
if isinstance(model, nn.DataParallel):
_model = model.module
else:
_model = model
for _path in output_paths:
os.makedirs(os.path.join(
output_folder, 'checkpoints'), exist_ok=True)
_model.save(_path, train_data=train_data, val_data=val_data,
trainer=trainer)
torch.save(optimizer.state_dict(),
_path.replace('model.pth', 'optimizer.pth'))
if save_by_epoch is not None:
if trainer.state.epoch % save_by_epoch == 0:
_path = output_paths[0].replace('latest', f'epoch{trainer.state.epoch}')
_model.save(_path, train_data=train_data, val_data=val_data,
trainer=trainer)
trainer.state.saved_model_path = output_paths[-1]
trainer.state.output_folder = output_folder
trainer.fire_event(ValidationEvents.VALIDATION_COMPLETED)
def add_stdout_handler(trainer, validator=None):
"""
This adds the following handler to the trainer engine, and also sets up
Timers:
- log_epoch_to_stdout: This logs the results of a model after it has trained
for a single epoch on both the training and validation set. The output typically
looks like this:
.. code-block:: none
EPOCH SUMMARY
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Epoch number: 0010 / 0010
- Training loss: 0.583591
- Validation loss: 0.137209
- Epoch took: 00:00:03
- Time since start: 00:00:32
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Saving to test.
Output @ tests/local/trainer
Args:
trainer (ignite.Engine): Engine for trainer
validator (ignite.Engine, optional): Engine for validation.
Defaults to None.
"""
# Set up timers for overall time taken and each epoch
overall_timer = Timer(average=False)
overall_timer.attach(trainer,
start=Events.STARTED, pause=Events.COMPLETED)
epoch_timer = Timer(average=False)
epoch_timer.attach(
trainer, start=Events.EPOCH_STARTED,
pause=ValidationEvents.VALIDATION_COMPLETED
)
@trainer.on(ValidationEvents.VALIDATION_COMPLETED)
def log_epoch_to_stdout(trainer):
epoch_time = epoch_timer.value()
epoch_time = timedelta(seconds=epoch_time)
overall_time = overall_timer.value()
overall_time = timedelta(seconds=overall_time)
epoch_number = trainer.state.epoch
total_epochs = trainer.state.max_epochs
try:
validation_loss = (
f"{trainer.state.epoch_history['validation/loss'][-1]:04f}")
except:
validation_loss = 'N/A'
train_loss = trainer.state.epoch_history['train/loss'][-1]
saved_model_path = trainer.state.saved_model_path
logging_str = (
f"\n\n"
f"EPOCH SUMMARY \n"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n"
f"- Epoch number: {epoch_number:04d} / {total_epochs:04d} \n"
f"- Training loss: {train_loss:04f} \n"
f"- Validation loss: {validation_loss} \n"
f"- Epoch took: {epoch_time} \n"
f"- Time since start: {overall_time} \n"
f"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ \n"
f"Saving to {saved_model_path}. \n"
f"Output @ {trainer.state.output_folder} \n"
)
logging.info(logging_str)
def add_progress_bar_handler(*engines):
"""
Adds a progress bar to each engine. Keeps track of a running
average of the loss as well.
Usage::
.. code-block:: python
tr_engine, val_engine = ...
add_progress_bar_handler(tr_engine, val_engine)
"""
for engine in engines:
output_transform = lambda x: x['loss']
RunningAverage(output_transform=output_transform).attach(engine, 'avg_loss')
ProgressBar().attach(engine, ['avg_loss'])
def add_tensorboard_handler(tensorboard_folder, engine, every_iteration=False):
"""
Every key in engine.state.epoch_history[-1] is logged to TensorBoard.
Args:
tensorboard_folder (str): Where the tensorboard logs should go.
trainer (ignite.Engine): The engine to log.
every_iteration (bool, optional): Whether to also log the values at every
iteration.
"""
writer = SummaryWriter(tensorboard_folder)
@engine.on(ValidationEvents.VALIDATION_COMPLETED)
def log_to_tensorboard(engine):
for key in engine.state.epoch_history:
writer.add_scalar(
key, engine.state.epoch_history[key][-1], engine.state.epoch)
if every_iteration:
@engine.on(Events.ITERATION_COMPLETED)
def log_iteration_to_tensorboard(engine):
for key in engine.state.iter_history:
writer.add_scalar(
key, engine.state.iter_history[key][-1], engine.state.iteration)
| 13,287 | 36.325843 | 91 | py |
nussl | nussl-master/nussl/ml/train/closures.py | import copy
import torch
from . import loss
from .trainer import BackwardsEvents
class Closure(object):
"""
Closures are used with ignite Engines to train a model given an optimizer
and a set of loss functions. Closures perform forward passes of models given
the input data. The loss is computed via ``self.compute_loss``.
The forward pass is implemented via the objects ``__call__`` function.
This closure object provides a way to define the loss functions you want to
use to train your model as a loss dictionary that is structured as follows:
.. code-block:: python
loss_dictionary = {
'LossClassName': {
'weight': [how much to weight the loss in the sum, defaults to 1],
'keys': [key mapping items in dictionary to arguments to loss],
'args': [any positional arguments to the loss class],
'kwargs': [keyword arguments to the loss class],
}
}
The keys value will default to ``LossClassName.DEFAULT_KEYS``, which can be
found in ``nussl.ml.train.loss`` within each available class. Here's an example
of a Chimera loss combining deep clustering with permutation invariant L1 loss:
.. code-block:: python
loss_dictionary = {
'DeepClusteringLoss': {
'weight': .2,
},
'PermutationInvariantLoss': {
'weight': .8,
'args': ['L1Loss']
}
}
Or if you're using permutation invariant loss but need to specify arguments to the
loss function being wrapped by PIT, you can do this:
.. code-block:: python
loss_dictionary = {
'PITLoss': {
'class': 'PermutationInvariantLoss',
'keys': {'audio': 'estimates', 'source_audio': 'targets'},
'args': [{
'class': 'SISDRLoss',
'kwargs': {'scaling': False}
}]
}
}
If you have your own loss function classes you wish to use, you can pass those
into the loss dictionary and make them discoverable by the closure by using
`ml.register_loss.`
Args:
loss_dictionary (dict): Dictionary of losses described above.
combination_approach (str): How to combine losses, if there are multiple
losses. The default is that the losses will be combined via a weighted
sum ('combine_by_sum'). Can also do 'combine_by_multiply'. Defaults to
'combine_by_sum'.
args: Positional arguments to ``combination_approach``.
kwargs: Keyword arguments to ``combination_approach``.
See also:
ml.register_loss to register your loss functions with this closure.
"""
def __init__(self, loss_dictionary, combination_approach='combine_by_sum',
*args, **kwargs):
loss_dictionary = self._validate_loss_dictionary(loss_dictionary)
self.combination_func = getattr(self, combination_approach)
self.args = args
self.kwargs = kwargs
self.losses = []
for key, val in loss_dictionary.items():
_loss_name = val['class'] if 'class' in val else key
loss_class = getattr(loss, _loss_name)
weight = 1 if 'weight' not in val else val['weight']
keys = loss_class.DEFAULT_KEYS if 'keys' not in val else val['keys']
args = [] if 'args' not in val else copy.deepcopy(val['args'])
kwargs = {} if 'kwargs' not in val else copy.deepcopy(val['kwargs'])
if _loss_name in ['CombinationInvariantLoss', 'PermutationInvariantLoss']:
if isinstance(args[0], str):
args[0] = getattr(loss, args[0])()
elif isinstance(args[0], dict):
arg_class = getattr(loss, args[0]['class'])
args_to_loss = [] if 'args' not in args[0] else args[0]['args']
kwargs_to_loss = {} if 'kwargs' not in args[0] else args[0]['kwargs']
args[0] = arg_class(*args_to_loss, **kwargs_to_loss)
_loss = (loss_class(*args, **kwargs), weight, keys, key)
self.losses.append(_loss)
@staticmethod
def _validate_loss_dictionary(loss_dictionary):
if not isinstance(loss_dictionary, dict):
raise ClosureException(
"loss_dictionary must be a dictionary specifying the "
"class and arguments for each loss function! ")
for key, val in loss_dictionary.items():
_loss = val['class'] if 'class' in val else key
if _loss not in dir(loss):
raise ClosureException(
f"Loss function {_loss} not found in loss which has {dir(loss)}")
if not isinstance(val, dict):
raise ClosureException(
"Each key in loss dictionary must point to a dict!")
for val_key in val:
if val_key not in ['weight', 'keys', 'args', 'kwargs', 'class']:
raise ClosureException(
f"{key} in loss_dictionary not in ['weight', 'args', 'kwargs'")
elif val_key == 'weight':
if not isinstance(val[val_key], (float, int)) and not torch.is_tensor(val[val_key]):
raise ClosureException(f"weight can only be an int or a float")
elif val_key == 'args':
if not isinstance(val[val_key], list):
raise ClosureException(f"args must be a list")
elif val_key == 'kwargs':
if not isinstance(val[val_key], dict):
raise ClosureException("kwargs must be a dict")
return loss_dictionary
def __call__(self, engine, data):
raise NotImplementedError()
def combine_by_multitask(self, loss_output):
"""
Implements a multitask learning objective [1] where each loss
is weighted by a learned parameter with the following
function:
combined_loss = \sum_i exp(-weight_i) * loss_i + weight_i
where i indexes each loss. The weights come from the loss
dictionary and can point to nn.Parameter teensors that get
learned jointly with the model.
References:
[1] Kendall, Alex, Yarin Gal, and Roberto Cipolla.
"Multi-task learning using uncertainty to weigh losses
for scene geometry and semantics." Proceedings of the
IEEE conference on computer vision and pattern recognition. 2018.
"""
combined_loss = 0
for _, weight, _, name in self.losses:
sigma = torch.exp(-weight)
combined_loss += sigma * loss_output[name] + weight
return combined_loss
def combine_by_multiply(self, loss_output):
combined_loss = 1
for _, weight, _, name in self.losses:
combined_loss *= weight * loss_output[name]
return combined_loss
def combine_by_sum(self, loss_output):
combined_loss = 0
for _, weight, _, name in self.losses:
# if the weight is 0, then the loss is just for
# monitoring and we won't bother summing with it,
# in case its shape doesnt match.
if weight != 0:
combined_loss += weight * loss_output[name]
return combined_loss
def compute_loss(self, output, target):
loss_output = {}
output.update(target)
for loss_obj, weight, keys, name in self.losses:
kwargs = {keys[k]: output[k] for k in keys}
loss_output[name] = loss_obj(**kwargs)
loss_output['loss'] = self.combination_func(
loss_output, *self.args, **self.kwargs)
return loss_output
class TrainClosure(Closure):
"""
This closure takes an optimization step on a SeparationModel object given a
loss.
Args:
loss_dictionary (dict): Dictionary containing loss functions and specification.
optimizer (torch Optimizer): Optimizer to use to train the model.
model (SeparationModel): The model to be trained.
"""
def __init__(self, loss_dictionary, optimizer, model, *args, **kwargs):
super().__init__(loss_dictionary, *args, **kwargs)
self.optimizer = optimizer
self.model = model
# Save about training metadata to model.info
self.model.metadata['optimizer'] = {
'name': type(optimizer).__name__,
'params': optimizer.defaults # All of the settings are stored here.
}
self.model.metadata['loss_dictionary'] = loss_dictionary
def _fire_event(self, engine, output, event):
if engine is not None:
if engine.state is not None:
engine.state.model_output = output
engine.fire_event(event)
def __call__(self, engine, data):
self.model.train()
self.optimizer.zero_grad()
output = self.model(data)
loss_ = self.compute_loss(output, data)
loss_['loss'].backward()
self._fire_event(engine, output, BackwardsEvents.BACKWARDS_COMPLETED)
self.optimizer.step()
loss_ = {key: loss_[key].item() for key in loss_}
return loss_
class ValidationClosure(Closure):
"""
This closure validates the model on some data dictionary.
Args:
loss_dictionary (dict): Dictionary containing loss functions and specification.
model (SeparationModel): The model to be validated.
"""
def __init__(self, loss_dictionary, model, *args, **kwargs):
super().__init__(loss_dictionary, *args, **kwargs)
self.model = model
def __call__(self, engine, data):
with torch.no_grad():
self.model.eval()
output = self.model(data)
loss_ = self.compute_loss(output, data)
loss_ = {key: loss_[key].item() for key in loss_}
return loss_
class ClosureException(Exception):
"""
Exception class for errors when working with closures in nussl.
"""
pass
| 10,220 | 36.577206 | 104 | py |
nussl | nussl-master/recipes/wham/chimera.py | """
This recipe trains and evaluates a mask inference model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
"""
import nussl
from nussl import ml, datasets, utils, separation, evaluation
import os
import torch
import multiprocessing
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from torch import optim
import logging
import matplotlib.pyplot as plt
import shutil
import json
import tqdm
import glob
import numpy as np
import termtables
# ----------------------------------------------------
# ------------------- SETTING UP ---------------------
# ----------------------------------------------------
# seed this recipe for reproducibility
utils.seed(0)
# set up logging
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO)
# make sure this is set to WHAM root directory
WHAM_ROOT = os.getenv("WHAM_ROOT")
CACHE_ROOT = os.getenv("CACHE_ROOT")
NUM_WORKERS = multiprocessing.cpu_count() // 4
OUTPUT_DIR = os.path.expanduser('~/.nussl/recipes/wham_chimera/run14_1e-2_1e3_1')
RESULTS_DIR = os.path.join(OUTPUT_DIR, 'results')
MODEL_PATH = os.path.join(OUTPUT_DIR, 'checkpoints', 'best.model.pth')
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
BATCH_SIZE = 25
MAX_EPOCHS = 100
CACHE_POPULATED = True
LEARNING_RATE = 1e-3
PATIENCE = 5
GRAD_NORM = 1e-2
shutil.rmtree(os.path.join(RESULTS_DIR), ignore_errors=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
shutil.rmtree(os.path.join(OUTPUT_DIR, 'tensorboard'), ignore_errors=True)
def construct_transforms(cache_location):
# stft will be 32ms wlen, 8ms hop, sqrt-hann, at 8khz sample rate by default
tfm = datasets.transforms.Compose([
datasets.transforms.MagnitudeSpectrumApproximation(), # take stfts and get ibm
datasets.transforms.MagnitudeWeights(), # get magnitude weights
datasets.transforms.ToSeparationModel(), # convert to tensors
datasets.transforms.Cache(cache_location), # up to here gets cached
datasets.transforms.GetExcerpt(400) # get 400 frame excerpts (3.2 seconds)
])
return tfm
def cache_dataset(_dataset):
cache_dataloader = torch.utils.data.DataLoader(
_dataset, num_workers=NUM_WORKERS, batch_size=BATCH_SIZE)
ml.train.cache_dataset(cache_dataloader)
_dataset.cache_populated = True
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'tr'))
dataset = datasets.WHAM(WHAM_ROOT, split='tr', transform=tfm,
cache_populated=CACHE_POPULATED)
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'cv'))
val_dataset = datasets.WHAM(WHAM_ROOT, split='cv', transform=tfm,
cache_populated=CACHE_POPULATED)
if not CACHE_POPULATED:
# cache datasets for speed
cache_dataset(dataset)
cache_dataset(val_dataset)
# ----------------------------------------------------
# -------------------- TRAINING ----------------------
# ----------------------------------------------------
# reload after caching
train_sampler = torch.utils.data.sampler.RandomSampler(dataset)
val_sampler = torch.utils.data.sampler.RandomSampler(val_dataset)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=train_sampler)
val_dataloader = torch.utils.data.DataLoader(val_dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=val_sampler)
n_features = dataset[0]['mix_magnitude'].shape[1]
# builds a baseline model with 4 recurrent layers, 600 hidden units, bidirectional
# and 20 dimensional embedding
config = ml.networks.builders.build_recurrent_chimera(
n_features, 600, 4, True, 0.3, 20, ['sigmoid', 'unit_norm'],
2, ['sigmoid'], normalization_class='BatchNorm'
)
model = ml.SeparationModel(config).to(DEVICE)
logging.info(model)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=PATIENCE)
# set up the loss function
loss_dictionary = {
'PermutationInvariantLoss': {'args': ['L1Loss'], 'weight': 1e3},
'DeepClusteringLoss': {'weight': 1.0}
}
# set up closures for the forward and backward pass on one batch
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
# set up engines for training and validation
trainer, validator = ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=DEVICE)
# attach handlers for visualizing output and saving the model
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(
OUTPUT_DIR, model, optimizer, dataset,
trainer, val_data=val_dataloader, validator=validator)
ml.train.add_tensorboard_handler(OUTPUT_DIR, trainer)
# add a handler to set up patience
@trainer.on(ml.train.ValidationEvents.VALIDATION_COMPLETED)
def step_scheduler(trainer):
val_loss = trainer.state.epoch_history['validation/loss'][-1]
scheduler.step(val_loss)
# add a handler to set up gradient clipping
@trainer.on(ml.train.BackwardsEvents.BACKWARDS_COMPLETED)
def clip_gradient(trainer):
torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_NORM)
# train the model
trainer.run(dataloader, max_epochs=MAX_EPOCHS)
# ----------------------------------------------------
# ------------------- EVALUATION ---------------------
# ----------------------------------------------------
test_dataset = datasets.WHAM(WHAM_ROOT, sample_rate=8000, split='tt')
# make a deep clustering separator with an empty audio signal initially
# this one will live on gpu and be used in a threadpool for speed
dme = separation.deep.DeepMaskEstimation(
nussl.AudioSignal(), model_path=MODEL_PATH, device='cuda')
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
dme.audio_signal = audio_signal
masks = dme.forward()
return masks
def separate_and_evaluate(item, masks):
separator = separation.deep.DeepMaskEstimation(item['mix'])
estimates = separator(masks)
evaluator = evaluation.BSSEvalScale(
list(item['sources'].values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
output_path = os.path.join(RESULTS_DIR, f"{item['mix'].file_name}.json")
with open(output_path, 'w') as f:
json.dump(scores, f)
pool = ThreadPoolExecutor(max_workers=NUM_WORKERS)
for i, item in enumerate(tqdm.tqdm(test_dataset)):
masks = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, masks)
else:
pool.submit(separate_and_evaluate, item, masks)
pool.shutdown(wait=True)
json_files = glob.glob(f"{RESULTS_DIR}/*.json")
df = evaluation.aggregate_score_files(json_files)
overall = df.mean()
headers = ["", f"OVERALL (N = {df.shape[0]})", ""]
metrics = ["SAR", "SDR", "SIR"]
data = np.array(df.mean()).T
data = [metrics, data]
termtables.print(data, header=headers, padding=(0, 1), alignment="ccc")
| 7,122 | 35.716495 | 90 | py |
nussl | nussl-master/recipes/wham/evaluate_dpcl.py | """
This recipe trains and evaluates a deep clustering model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
┌───────────────────┬────────────────────┬────────────────────┐
│ │ OVERALL (N = 6000) │ │
╞═══════════════════╪════════════════════╪════════════════════╡
│ SAR │ SDR │ SIR │
├───────────────────┼────────────────────┼────────────────────┤
│ 11.07829052874508 │ 10.737156798640111 │ 23.704177123014816 │
└───────────────────┴────────────────────┴────────────────────┘
Last run on 3/20/20.
"""
import os
import multiprocessing
import logging
import shutil
import json
import glob
from concurrent.futures import ThreadPoolExecutor
import torch
from torch import optim
import tqdm
import numpy as np
import termtables
import nussl
from nussl import ml, datasets, utils, separation, evaluation
# ----------------------------------------------------
# ------------------- SETTING UP ---------------------
# ----------------------------------------------------
# seed this recipe for reproducibility
utils.seed(0)
# set up logging
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO)
# make sure this is set to WHAM root directory
WHAM_ROOT = os.getenv("WHAM_ROOT")
CACHE_ROOT = os.getenv("CACHE_ROOT")
NUM_WORKERS = multiprocessing.cpu_count() // 2
OUTPUT_DIR = os.path.expanduser('~/.nussl/recipes/wham_chimera/run10_1e-4_.5_.5/')
RESULTS_DIR = os.path.join(OUTPUT_DIR, 'results')
MODEL_PATH = os.path.join(OUTPUT_DIR, 'checkpoints', 'best.model.pth')
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
BATCH_SIZE = 25
MAX_EPOCHS = 100
CACHE_POPULATED = True
LEARNING_RATE = 1e-3
PATIENCE = 5
GRAD_NORM = 1e-2
shutil.rmtree(os.path.join(RESULTS_DIR), ignore_errors=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
# ----------------------------------------------------
# ------------------- EVALUATION ---------------------
# ----------------------------------------------------
test_dataset = datasets.WHAM(WHAM_ROOT, sample_rate=8000, split='tt')
# make a deep clustering separator with an empty audio signal initially
# this one will live on gpu and be used in a threadpool for speed
dpcl = separation.deep.DeepClustering(
nussl.AudioSignal(), num_sources=2, model_path=MODEL_PATH, device='cuda')
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
dpcl.audio_signal = audio_signal
features = dpcl.extract_features()
return features
def separate_and_evaluate(item, features):
separator = separation.deep.DeepClustering(item['mix'], num_sources=2)
estimates = separator(features)
evaluator = evaluation.BSSEvalScale(
list(item['sources'].values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
output_path = os.path.join(RESULTS_DIR, f"{item['mix'].file_name}.json")
with open(output_path, 'w') as f:
json.dump(scores, f)
pool = ThreadPoolExecutor(max_workers=NUM_WORKERS)
for i, item in enumerate(tqdm.tqdm(test_dataset)):
features = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, features)
else:
pool.submit(separate_and_evaluate, item, features)
pool.shutdown(wait=True)
json_files = glob.glob(f"{RESULTS_DIR}/*.json")
df = evaluation.aggregate_score_files(json_files)
overall = df.mean()
headers = ["", f"OVERALL (N = {df.shape[0]})", ""]
metrics = ["SAR", "SDR", "SIR"]
data = np.array(df.mean()).T
data = [metrics, data]
termtables.print(data, header=headers, padding=(0, 1), alignment="ccc")
| 3,803 | 32.368421 | 90 | py |
nussl | nussl-master/recipes/wham/deep_clustering.py | """
This recipe trains and evaluates a deep clustering model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
┌───────────────────┬────────────────────┬────────────────────┐
│ │ OVERALL (N = 6000) │ │
╞═══════════════════╪════════════════════╪════════════════════╡
│ SAR │ SDR │ SIR │
├───────────────────┼────────────────────┼────────────────────┤
│ 11.07829052874508 │ 10.737156798640111 │ 23.704177123014816 │
└───────────────────┴────────────────────┴────────────────────┘
Last run on 3/20/20.
"""
import os
import multiprocessing
import logging
import shutil
import json
import glob
from concurrent.futures import ThreadPoolExecutor
import torch
from torch import optim
import tqdm
import numpy as np
import termtables
import nussl
from nussl import ml, datasets, utils, separation, evaluation
# ----------------------------------------------------
# ------------------- SETTING UP ---------------------
# ----------------------------------------------------
# seed this recipe for reproducibility
utils.seed(0)
# set up logging
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO)
# make sure this is set to WHAM root directory
WHAM_ROOT = os.getenv("WHAM_ROOT")
CACHE_ROOT = os.getenv("CACHE_ROOT")
NUM_WORKERS = multiprocessing.cpu_count() // 2
OUTPUT_DIR = os.path.expanduser('~/.nussl/recipes/wham_dpcl/run3_1e-2')
RESULTS_DIR = os.path.join(OUTPUT_DIR, 'results')
MODEL_PATH = os.path.join(OUTPUT_DIR, 'checkpoints', 'best.model.pth')
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
BATCH_SIZE = 25
MAX_EPOCHS = 100
CACHE_POPULATED = True
LEARNING_RATE = 1e-3
PATIENCE = 5
GRAD_NORM = 1e-2
shutil.rmtree(os.path.join(RESULTS_DIR), ignore_errors=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
shutil.rmtree(os.path.join(OUTPUT_DIR, 'tensorboard'), ignore_errors=True)
def construct_transforms(cache_location):
# stft will be 32ms wlen, 8ms hop, sqrt-hann, at 8khz sample rate by default
tfm = datasets.transforms.Compose([
datasets.transforms.MagnitudeSpectrumApproximation(), # take stfts and get ibm
datasets.transforms.MagnitudeWeights(), # get magnitude weights
datasets.transforms.ToSeparationModel(), # convert to tensors
datasets.transforms.Cache(cache_location), # up to here gets cached
datasets.transforms.GetExcerpt(400) # get 400 frame excerpts (3.2 seconds)
])
return tfm
def cache_dataset(_dataset):
cache_dataloader = torch.utils.data.DataLoader(
_dataset, num_workers=NUM_WORKERS, batch_size=BATCH_SIZE)
ml.train.cache_dataset(cache_dataloader)
_dataset.cache_populated = True
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'tr'))
dataset = datasets.WHAM(WHAM_ROOT, split='tr', transform=tfm,
cache_populated=CACHE_POPULATED)
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'cv'))
val_dataset = datasets.WHAM(WHAM_ROOT, split='cv', transform=tfm,
cache_populated=CACHE_POPULATED)
if not CACHE_POPULATED:
# cache datasets for speed
cache_dataset(dataset)
cache_dataset(val_dataset)
# ----------------------------------------------------
# -------------------- TRAINING ----------------------
# ----------------------------------------------------
# reload after caching
train_sampler = torch.utils.data.sampler.RandomSampler(dataset)
val_sampler = torch.utils.data.sampler.RandomSampler(val_dataset)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=train_sampler)
val_dataloader = torch.utils.data.DataLoader(val_dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=val_sampler)
n_features = dataset[0]['mix_magnitude'].shape[1]
# builds a baseline model with 4 recurrent layers, 600 hidden units, bidirectional
# and 20 dimensional embedding
config = ml.networks.builders.build_recurrent_dpcl(
n_features, 600, 4, True, 0.3, 20, ['sigmoid', 'unit_norm'],
normalization_class='BatchNorm')
model = ml.SeparationModel(config).to(DEVICE)
logging.info(model)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=PATIENCE)
# set up the loss function
loss_dictionary = {
'DeepClusteringLoss': {'weight': 1.0}
}
# set up closures for the forward and backward pass on one batch
train_closure = ml.train.closures.TrainClosure(loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(loss_dictionary, model)
# set up engines for training and validation
trainer, validator = ml.train.create_train_and_validation_engines(train_closure, val_closure,
device=DEVICE)
# attach handlers for visualizing output and saving the model
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(OUTPUT_DIR, model, optimizer, dataset,
trainer, val_data=val_dataloader, validator=validator)
ml.train.add_tensorboard_handler(OUTPUT_DIR, trainer)
# add a handler to set up patience
@trainer.on(ml.train.ValidationEvents.VALIDATION_COMPLETED)
def step_scheduler(trainer):
val_loss = trainer.state.epoch_history['validation/loss'][-1]
scheduler.step(val_loss)
# add a handler to set up gradient clipping
@trainer.on(ml.train.BackwardsEvents.BACKWARDS_COMPLETED)
def clip_gradient(trainer):
torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_NORM)
# train the model
trainer.run(dataloader, max_epochs=MAX_EPOCHS)
# ----------------------------------------------------
# ------------------- EVALUATION ---------------------
# ----------------------------------------------------
test_dataset = datasets.WHAM(WHAM_ROOT, sample_rate=8000, split='tt')
# make a deep clustering separator with an empty audio signal initially
# this one will live on gpu and be used in a threadpool for speed
dpcl = separation.deep.DeepClustering(
nussl.AudioSignal(), num_sources=2, model_path=MODEL_PATH, device='cuda')
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
dpcl.audio_signal = audio_signal
features = dpcl.extract_features()
return features
def separate_and_evaluate(item, features):
separator = separation.deep.DeepClustering(item['mix'], num_sources=2)
estimates = separator(features)
evaluator = evaluation.BSSEvalScale(
list(item['sources'].values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
output_path = os.path.join(RESULTS_DIR, f"{item['mix'].file_name}.json")
with open(output_path, 'w') as f:
json.dump(scores, f)
pool = ThreadPoolExecutor(max_workers=NUM_WORKERS)
for i, item in enumerate(tqdm.tqdm(test_dataset)):
features = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, features)
else:
pool.submit(separate_and_evaluate, item, features)
pool.shutdown(wait=True)
json_files = glob.glob(f"{RESULTS_DIR}/*.json")
df = evaluation.aggregate_score_files(json_files)
overall = df.mean()
headers = ["", f"OVERALL (N = {df.shape[0]})", ""]
metrics = ["SAR", "SDR", "SIR"]
data = np.array(df.mean()).T
data = [metrics, data]
termtables.print(data, header=headers, padding=(0, 1), alignment="ccc")
| 7,718 | 35.239437 | 93 | py |
nussl | nussl-master/recipes/wham/mask_inference.py | """
This recipe trains and evaluates a mask infeerence model
on the clean data from the WHAM dataset with 8k. It's divided into
three big chunks: data preparation, training, and evaluation.
Final output of this script:
┌────────────────────┬────────────────────┬───────────────────┐
│ │ OVERALL (N = 6000) │ │
╞════════════════════╪════════════════════╪═══════════════════╡
│ SAR │ SDR │ SIR │
├────────────────────┼────────────────────┼───────────────────┤
│ 11.184634122040006 │ 10.030014257966346 │ 16.82237234679051 │
└────────────────────┴────────────────────┴───────────────────┘
Last run on 3/20/20.
"""
import nussl
from nussl import ml, datasets, utils, separation, evaluation
import os
import torch
import multiprocessing
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor
from torch import optim
import logging
import matplotlib.pyplot as plt
import shutil
import json
import tqdm
import glob
import numpy as np
import termtables
# ----------------------------------------------------
# ------------------- SETTING UP ---------------------
# ----------------------------------------------------
# seed this recipe for reproducibility
utils.seed(0)
# set up logging
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S', level=logging.INFO)
# make sure this is set to WHAM root directory
WHAM_ROOT = os.getenv("WHAM_ROOT")
CACHE_ROOT = os.getenv("CACHE_ROOT")
NUM_WORKERS = multiprocessing.cpu_count() // 4
OUTPUT_DIR = os.path.expanduser('~/.nussl/recipes/wham_mi/run2')
RESULTS_DIR = os.path.join(OUTPUT_DIR, 'results')
MODEL_PATH = os.path.join(OUTPUT_DIR, 'checkpoints', 'best.model.pth')
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
BATCH_SIZE = 25
MAX_EPOCHS = 100
CACHE_POPULATED = True
LEARNING_RATE = 1e-3
PATIENCE = 5
GRAD_NORM = 2e-5
shutil.rmtree(os.path.join(RESULTS_DIR), ignore_errors=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
shutil.rmtree(os.path.join(OUTPUT_DIR, 'tensorboard'), ignore_errors=True)
def construct_transforms(cache_location):
# stft will be 32ms wlen, 8ms hop, sqrt-hann, at 8khz sample rate by default
tfm = datasets.transforms.Compose([
datasets.transforms.MagnitudeSpectrumApproximation(), # take stfts and get ibm
datasets.transforms.MagnitudeWeights(), # get magnitude weights
datasets.transforms.ToSeparationModel(), # convert to tensors
datasets.transforms.Cache(cache_location), # up to here gets cached
datasets.transforms.GetExcerpt(400) # get 400 frame excerpts (3.2 seconds)
])
return tfm
def cache_dataset(_dataset):
cache_dataloader = torch.utils.data.DataLoader(
_dataset, num_workers=NUM_WORKERS, batch_size=BATCH_SIZE)
ml.train.cache_dataset(cache_dataloader)
_dataset.cache_populated = True
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'tr'))
dataset = datasets.WHAM(WHAM_ROOT, split='tr', transform=tfm,
cache_populated=CACHE_POPULATED)
tfm = construct_transforms(os.path.join(CACHE_ROOT, 'cv'))
val_dataset = datasets.WHAM(WHAM_ROOT, split='cv', transform=tfm,
cache_populated=CACHE_POPULATED)
if not CACHE_POPULATED:
# cache datasets for speed
cache_dataset(dataset)
cache_dataset(val_dataset)
# ----------------------------------------------------
# -------------------- TRAINING ----------------------
# ----------------------------------------------------
# reload after caching
train_sampler = torch.utils.data.sampler.RandomSampler(dataset)
val_sampler = torch.utils.data.sampler.RandomSampler(val_dataset)
dataloader = torch.utils.data.DataLoader(dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=train_sampler)
val_dataloader = torch.utils.data.DataLoader(val_dataset, num_workers=NUM_WORKERS,
batch_size=BATCH_SIZE, sampler=val_sampler)
n_features = dataset[0]['mix_magnitude'].shape[1]
# builds a baseline model with 4 recurrent layers, 600 hidden units, bidirectional
config = ml.networks.builders.build_recurrent_mask_inference(
n_features, 600, 4, True, 0.3, 2, ['sigmoid'],
normalization_class='BatchNorm'
)
model = ml.SeparationModel(config).to(DEVICE)
logging.info(model)
optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(
optimizer, factor=0.5, patience=PATIENCE)
# set up the loss function
loss_dictionary = {
'PermutationInvariantLoss': {'args': ['L1Loss'], 'weight': 1.0}
}
# set up closures for the forward and backward pass on one batch
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
# set up engines for training and validation
trainer, validator = ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=DEVICE)
# attach handlers for visualizing output and saving the model
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(
OUTPUT_DIR, model, optimizer, dataset,
trainer, val_data=val_dataloader, validator=validator)
ml.train.add_tensorboard_handler(OUTPUT_DIR, trainer)
# add a handler to set up patience
@trainer.on(ml.train.ValidationEvents.VALIDATION_COMPLETED)
def step_scheduler(trainer):
val_loss = trainer.state.epoch_history['validation/loss'][-1]
scheduler.step(val_loss)
# add a handler to set up gradient clipping
@trainer.on(ml.train.BackwardsEvents.BACKWARDS_COMPLETED)
def clip_gradient(trainer):
torch.nn.utils.clip_grad_norm_(model.parameters(), GRAD_NORM)
# train the model
trainer.run(dataloader, max_epochs=MAX_EPOCHS)
# ----------------------------------------------------
# ------------------- EVALUATION ---------------------
# ----------------------------------------------------
test_dataset = datasets.WHAM(WHAM_ROOT, sample_rate=8000, split='tt')
# make a deep clustering separator with an empty audio signal initially
# this one will live on gpu and be used in a threadpool for speed
dme = separation.deep.DeepMaskEstimation(
nussl.AudioSignal(), model_path=MODEL_PATH, device='cuda')
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
dme.audio_signal = audio_signal
masks = dme.forward()
return masks
def separate_and_evaluate(item, masks):
separator = separation.deep.DeepMaskEstimation(item['mix'])
estimates = separator(masks)
evaluator = evaluation.BSSEvalScale(
list(item['sources'].values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
output_path = os.path.join(RESULTS_DIR, f"{item['mix'].file_name}.json")
with open(output_path, 'w') as f:
json.dump(scores, f)
pool = ThreadPoolExecutor(max_workers=NUM_WORKERS)
for i, item in enumerate(tqdm.tqdm(test_dataset)):
masks = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, masks)
else:
pool.submit(separate_and_evaluate, item, masks)
pool.shutdown(wait=True)
json_files = glob.glob(f"{RESULTS_DIR}/*.json")
df = evaluation.aggregate_score_files(json_files)
# Add the metrics summary to the model and save it for future use.
model = evaluation.associate_metrics(model, df, test_dataset)
model.save()
overall = df.mean()
headers = ["", f"OVERALL (N = {df.shape[0]})", ""]
metrics = ["SAR", "SDR", "SIR"]
data = np.array(df.mean()).T
data = [metrics, data]
termtables.print(data, header=headers, padding=(0, 1), alignment="ccc")
| 7,759 | 35.093023 | 89 | py |
nussl | nussl-master/tests/conftest.py | import pytest
from nussl import efz_utils
import tempfile
import os
import musdb
import zipfile
import scaper
import random
import glob
import nussl
from nussl.datasets import transforms
from nussl import datasets
import numpy as np
import torch
import json
def _unzip(path_to_zip, target_path):
with zipfile.ZipFile(path_to_zip, 'r') as zip_ref:
zip_ref.extractall(target_path)
fix_dir = os.path.expanduser('~/.nussl/tests/')
os.makedirs(fix_dir, exist_ok=True)
OVERWRITE_REGRESSION_DATA = False
@pytest.fixture(scope="module")
def benchmark_audio():
audio_files = {}
keys = ['K0140.wav', 'K0149.wav', 'dev1_female3_inst_mix.wav']
with tempfile.TemporaryDirectory() as tmp_dir:
_dir = tmp_dir if fix_dir is None else fix_dir
for k in keys:
audio_files[k] = efz_utils.download_audio_file(k, _dir)
yield audio_files
@pytest.fixture(scope="module")
def musdb_tracks():
with tempfile.TemporaryDirectory() as tmp_dir:
_dir = tmp_dir if fix_dir is None else fix_dir
db = musdb.DB(root=_dir, download=True)
yield db
@pytest.fixture(scope="module")
def toy_datasets():
dataset_locations = {}
keys = ['babywsj_oW0F0H9.zip']
with tempfile.TemporaryDirectory() as tmp_dir:
_dir = tmp_dir if fix_dir is None else fix_dir
for k in keys:
target_folder = os.path.join(_dir, os.path.splitext(k)[0])
data = efz_utils.download_benchmark_file(k, _dir)
_unzip(data, target_folder)
dataset_locations[k] = target_folder
yield dataset_locations
@pytest.fixture(scope="module")
def mix_source_folder(toy_datasets):
wsj_sources = toy_datasets['babywsj_oW0F0H9.zip']
audio_files = glob.glob(
f"{wsj_sources}/**/*.wav", recursive=True)
n_sources = 2
n_mixtures = 10
with tempfile.TemporaryDirectory() as tmp_dir:
_dir = tmp_dir if fix_dir is None else fix_dir
_dir = os.path.join(_dir, 'mix_source_folder')
for i in range(n_mixtures):
sources = []
for n in range(n_sources):
path = random.choice(audio_files)
source = nussl.AudioSignal(path)
sources.append(source)
min_length = min([s.signal_length for s in sources])
for n in range(n_sources):
output_path = os.path.join(_dir, f's{n}', f'{i}.wav')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
sources[n].truncate_samples(min_length)
sources[n].write_audio_to_file(output_path)
mix = sum(sources)
output_path = os.path.join(_dir, 'mix', f'{i}.wav')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
mix.write_audio_to_file(output_path)
yield _dir
@pytest.fixture(scope="module")
def scaper_folder(toy_datasets):
wsj_sources = toy_datasets['babywsj_oW0F0H9.zip']
fg_path = os.path.join(
wsj_sources, 'babywsj', 'dev')
n_sources = 2
n_mixtures = 10
duration = 3
ref_db = -40
with tempfile.TemporaryDirectory() as tmp_dir:
_dir = tmp_dir if fix_dir is None else fix_dir
_dir = os.path.join(_dir, 'scaper')
os.makedirs(_dir, exist_ok=True)
for i in range(n_mixtures):
sc = scaper.Scaper(
duration, fg_path, fg_path, random_state=i)
sc.ref_db = ref_db
sc.sr = 16000
for j in range(n_sources):
sc.add_event(
label=('choose', []),
source_file=('choose', []),
source_time=('const', 0),
event_time=('const', 0),
event_duration=('const', duration),
snr=('const', 0),
pitch_shift=None,
time_stretch=None
)
audio_path = os.path.join(_dir, f'{i}.wav')
jams_path = os.path.join(_dir, f'{i}.jams')
sc.generate(audio_path, jams_path, save_isolated_events=True)
yield _dir
@pytest.fixture(scope="module")
def mix_and_sources(scaper_folder):
dataset = datasets.Scaper(scaper_folder)
item = dataset[0]
return item['mix'], item['sources']
@pytest.fixture(scope="module")
def music_mix_and_sources(musdb_tracks):
dataset = datasets.MUSDB18(
folder=musdb_tracks.root, download=False,
transform=transforms.SumSources(
[['drums', 'bass', 'other']]))
item = dataset[0]
return item['mix'], item['sources']
@pytest.fixture(scope="module")
def drum_and_vocals(musdb_tracks):
dataset = datasets.MUSDB18(
folder=musdb_tracks.root, download=False)
item = dataset[0]
return item['sources']['drums'], item['sources']['vocals']
@pytest.fixture(scope="module")
def random_noise():
def _random_noise(duration, ch, kind):
if kind == 'ones':
x = np.ones((int(ch), int(duration * 44100)))
elif kind == 'random':
x = np.random.randn(int(ch), int(duration * 44100))
signal = nussl.AudioSignal(
audio_data_array=x,
sample_rate=44100
)
signal.peak_normalize()
return signal
return _random_noise
@pytest.fixture(scope="module")
def bad_scaper_folder(toy_datasets):
wsj_sources = toy_datasets['babywsj_oW0F0H9.zip']
fg_path = os.path.join(
wsj_sources, 'babywsj', 'dev')
n_sources = 2
n_mixtures = 10
duration = 3
ref_db = -40
with tempfile.TemporaryDirectory() as tmp_dir:
_dir = tmp_dir if fix_dir is None else fix_dir
_dir = os.path.join(_dir, 'bad_scaper')
os.makedirs(_dir, exist_ok=True)
for i in range(n_mixtures):
sc = scaper.Scaper(
duration, fg_path, fg_path, random_state=i)
sc.ref_db = ref_db
sc.sr = 16000
for j in range(n_sources):
sc.add_event(
label=('choose', []),
source_file=('choose', []),
source_time=('const', 0),
event_time=('const', 0),
event_duration=('const', duration),
snr=('const', 0),
pitch_shift=None,
time_stretch=None
)
audio_path = os.path.join(_dir, f'{i}.wav')
jams_path = os.path.join(_dir, f'{i}.jams')
sc.generate(audio_path, jams_path, save_isolated_events=False)
yield _dir
@pytest.fixture(scope="module")
def one_item(scaper_folder):
stft_params = nussl.STFTParams(
window_length=512,
hop_length=128
)
tfms = transforms.Compose([
transforms.PhaseSensitiveSpectrumApproximation(),
transforms.GetAudio(),
transforms.ToSeparationModel()
])
dataset = nussl.datasets.Scaper(
scaper_folder, transform=tfms, stft_params=stft_params)
i = np.random.randint(len(dataset))
data = dataset[i]
for k in data:
# fake a batch dimension
if torch.is_tensor(data[k]):
data[k] = data[k].unsqueeze(0)
yield data
@pytest.fixture(scope="module")
def check_against_regression_data():
def check(scores, path, atol=1e-4):
if not os.path.exists(path):
with open(path, 'w') as f:
json.dump(scores, f, indent=4)
if OVERWRITE_REGRESSION_DATA:
with open(path, 'w') as f:
json.dump(scores, f, indent=4)
else:
with open(path, 'r') as f:
reg_scores = json.load(f)
for key in scores:
if key not in ['permutation', 'combination']:
for metric in scores[key]:
if metric in reg_scores[key]:
assert np.allclose(
scores[key][metric],
reg_scores[key][metric],
atol=atol
)
return check
| 8,129 | 30.511628 | 74 | py |
nussl | nussl-master/tests/evaluation/test_evaluation.py | import nussl
import pytest
from nussl.core.masks import SoftMask, BinaryMask
import numpy as np
from nussl.evaluation.evaluation_base import AudioSignalListMismatchError
import torch
import json
import tempfile
import os
import glob
@pytest.fixture(scope='module')
def estimated_and_true_sources(musdb_tracks):
i = np.random.randint(len(musdb_tracks))
track = musdb_tracks[i]
mixture = nussl.AudioSignal(
audio_data_array=track.audio,
sample_rate=track.rate)
mixture.stft()
stems = track.stems
oracle_sources = []
random_sources = []
true_sources = []
random_masks = []
oracle_masks = []
keys = []
for k, v in sorted(track.sources.items(), key=lambda x: x[1].stem_id):
true_sources.append(nussl.AudioSignal(
audio_data_array=stems[v.stem_id],
sample_rate=track.rate
))
keys.append(k)
mask_data = np.random.rand(*mixture.stft_data.shape)
random_mask = SoftMask(mask_data)
random_source = mixture.apply_mask(random_mask)
random_source.istft(truncate_to_length=mixture.signal_length)
random_sources.append(random_source)
random_masks.append(random_mask)
source_stft = true_sources[-1].stft()
mask_data = (
(np.abs(source_stft) + 1e-8) /
(np.maximum(np.abs(mixture.stft_data), np.abs(source_stft)) + 1e-8)
)
oracle_mask = SoftMask(mask_data)
oracle_source = mixture.apply_mask(oracle_mask)
oracle_source.istft(truncate_to_length=mixture.signal_length)
oracle_sources.append(oracle_source)
oracle_masks.append(oracle_mask)
yield {
'oracle': oracle_sources,
'random': random_sources,
'true': true_sources,
'keys': keys,
'oracle_masks': oracle_masks,
'random_masks': random_masks,
}
def fake_preprocess_a(self):
references = np.random.rand(44100, 2, 4)
estimates = np.random.rand(44100, 2, 4)
return references, estimates
def fake_preprocess_b(self):
references = np.random.rand(44100, 2, 2)
estimates = np.random.rand(44100, 2, 4)
return references, estimates
def fake_evaluate_helper(self, references, estimates):
n_sources = references.shape[-1]
n_channels = references.shape[-2]
scores = []
for i in range(n_sources):
score = {
'metric1': [np.random.rand()] * n_channels,
'metric2': [np.random.rand()] * n_channels,
'metric3': [np.random.rand()] * n_channels,
}
scores.append(score)
return scores
def test_evaluation_base(estimated_and_true_sources):
true_sources = estimated_and_true_sources['true']
estimated_sources = estimated_and_true_sources['random']
keys = estimated_and_true_sources['keys']
default_source_labels = [f'source_{i}' for i in range(len(true_sources))]
evaluator = nussl.evaluation.EvaluationBase(true_sources, estimated_sources)
assert (
evaluator.source_labels == default_source_labels)
for k, t in zip(keys, true_sources):
t.path_to_input_file = k
evaluator = nussl.evaluation.EvaluationBase(true_sources, estimated_sources)
assert evaluator.source_labels == keys
source_labels = [f'mysource_{i}' for i in range(len(keys))]
evaluator = nussl.evaluation.EvaluationBase(
true_sources, estimated_sources, source_labels=source_labels)
assert evaluator.source_labels == source_labels
source_labels = [f'mysource_{i}' for i in range(len(keys) - 2)]
evaluator = nussl.evaluation.EvaluationBase(
true_sources, estimated_sources, source_labels=source_labels)
assert evaluator.source_labels == source_labels + keys[2:]
for k, t in zip(keys, true_sources):
t.path_to_input_file = None
evaluator = nussl.evaluation.EvaluationBase(
true_sources, estimated_sources, source_labels=source_labels)
assert evaluator.source_labels == source_labels + default_source_labels[2:]
assert evaluator.scores == {}
def test_evaluation_run(estimated_and_true_sources, monkeypatch):
monkeypatch.setattr(
nussl.evaluation.EvaluationBase, 'preprocess', fake_preprocess_a)
monkeypatch.setattr(
nussl.evaluation.EvaluationBase, 'evaluate_helper', fake_evaluate_helper)
true_sources = estimated_and_true_sources['true']
estimated_sources = estimated_and_true_sources['random']
keys = estimated_and_true_sources['keys']
for k, t in zip(keys, true_sources):
t.path_to_input_file = k
evaluator = nussl.evaluation.EvaluationBase(true_sources, estimated_sources)
candidates = evaluator.get_candidates()
assert len(candidates[0]) == 1
assert len(candidates[1]) == 1
evaluator.evaluate()
check_scores(evaluator)
evaluator = nussl.evaluation.EvaluationBase(true_sources, estimated_sources,
compute_permutation=True)
candidates = evaluator.get_candidates()
# should be 4 choose 2
assert len(candidates[0]) == 1
assert len(candidates[1]) == 24
evaluator.evaluate()
check_scores(evaluator)
evaluator = nussl.evaluation.EvaluationBase(true_sources, estimated_sources,
compute_permutation=True,
best_permutation_key='metric2')
candidates = evaluator.get_candidates()
# should be 1 * 4! = 24
assert len(candidates[0]) == 1
assert len(candidates[1]) == 24
evaluator.evaluate()
check_scores(evaluator)
monkeypatch.setattr(
nussl.evaluation.EvaluationBase, 'preprocess', fake_preprocess_b)
evaluator = nussl.evaluation.EvaluationBase(true_sources[:2], estimated_sources,
compute_permutation=True)
candidates = evaluator.get_candidates()
# should be (4 choose 2) * 2! = 12
assert len(candidates[0]) == 6
assert len(candidates[1]) == 2
evaluator.evaluate()
check_scores(evaluator)
def check_scores(evaluator):
assert evaluator.scores is not None
assert isinstance(evaluator.scores, dict)
assert 'combination' in evaluator.scores.keys()
assert 'permutation' in evaluator.scores.keys()
for source_label in evaluator.source_labels:
assert source_label in evaluator.scores
def test_bss_evaluation_base(estimated_and_true_sources, monkeypatch):
monkeypatch.setattr(
nussl.evaluation.EvaluationBase, 'evaluate_helper', fake_evaluate_helper)
true_sources = estimated_and_true_sources['true']
estimated_sources = estimated_and_true_sources['random']
keys = estimated_and_true_sources['keys']
for k, t in zip(keys, true_sources):
t.path_to_input_file = k
evaluator = nussl.evaluation.BSSEvaluationBase(
true_sources, estimated_sources)
references, estimates = evaluator.preprocess()
n_samples = true_sources[0].signal_length
n_channels = true_sources[0].num_channels
n_sources = len(true_sources)
assert references.shape == (n_samples, n_channels, n_sources)
assert estimates.shape == (n_samples, n_channels, n_sources)
def test_bss_eval_v4(estimated_and_true_sources):
true_sources = estimated_and_true_sources['true']
estimated_sources = estimated_and_true_sources['random']
keys = estimated_and_true_sources['keys']
for k, t in zip(keys, true_sources):
t.path_to_input_file = k
evaluator = nussl.evaluation.BSSEvalV4(
true_sources, estimated_sources)
references, estimates = evaluator.preprocess()
scores = evaluator.evaluate_helper(references, estimates)
assert isinstance(scores, list)
random_scores = evaluator.evaluate()
check_scores(evaluator)
estimated_sources = estimated_and_true_sources['oracle']
evaluator = nussl.evaluation.BSSEvalV4(
true_sources, estimated_sources)
oracle_scores = evaluator.evaluate()
# the oracle score should beat the random score by a lot on average
# for SDR and SIR
for key in evaluator.source_labels:
for metric in ['SDR', 'SIR']:
_oracle = oracle_scores[key][metric]
_random = random_scores[key][metric]
assert np.alltrue(_oracle > _random)
def test_scale_bss_eval(estimated_and_true_sources):
true_sources = estimated_and_true_sources['true']
estimated_sources = estimated_and_true_sources['oracle']
random_sources = estimated_and_true_sources['random']
evaluator = nussl.evaluation.BSSEvalScale(
true_sources, estimated_sources)
references, estimates = evaluator.preprocess()
_references = references[:, 0, :]
_estimates = estimates[:, 0, :]
mixture = sum(true_sources).audio_data.T
mixture -= mixture.mean(axis=0)
tSISDR, tSISIR, tSISAR, tSDSDR, tSNR, tSRR, tSDRi, tSDSDRi, tSNRi, _, _, _ = (
nussl.evaluation.scale_bss_eval(
_references, _estimates[..., 0], mixture[..., 0], 0,
)
)
evaluator = nussl.evaluation.BSSEvalScale(
true_sources, random_sources)
references, estimates = evaluator.preprocess()
_references = references[:, 0, :]
_estimates = estimates[:, 0, :]
rSISDR, rSISIR, rSISAR, rSDSDR, rSNR, rSRR, tSDRi, tSDSDRi, tSNRi, _, _, _ = (
nussl.evaluation.scale_bss_eval(
_references, _estimates[..., 0], mixture[..., 0], 0,
)
)
assert tSISDR > rSISDR
assert tSISIR > rSISIR
assert tSDSDR > rSDSDR
assert tSNR > rSNR
def test_bss_eval_scale(estimated_and_true_sources):
compute_sir_sar = [True, False]
for _compute_sir_sar in compute_sir_sar:
with tempfile.TemporaryDirectory() as tmpdir:
true_sources = estimated_and_true_sources['true']
estimated_sources = estimated_and_true_sources['random']
keys = estimated_and_true_sources['keys']
for k, t in zip(keys, true_sources):
t.path_to_input_file = k
evaluator = nussl.evaluation.BSSEvalScale(
true_sources, estimated_sources,
compute_sir_sar=_compute_sir_sar)
references, estimates = evaluator.preprocess()
scores = evaluator.evaluate_helper(
references, estimates,
compute_sir_sar=_compute_sir_sar)
assert isinstance(scores, list)
random_scores = evaluator.evaluate()
check_scores(evaluator)
estimated_sources = estimated_and_true_sources['oracle']
evaluator = nussl.evaluation.BSSEvalScale(
true_sources, estimated_sources,
compute_sir_sar=_compute_sir_sar)
oracle_scores = evaluator.evaluate()
# the oracle score should beat the random score by a lot on average
# for SDR and SIR
for key in evaluator.source_labels:
for metric in ['SI-SDR', 'SNR', 'SD-SDR']:
_oracle = oracle_scores[key][metric]
_random = random_scores[key][metric]
assert np.alltrue(_oracle > _random)
save_scores(tmpdir, oracle_scores, f'oracle.json')
save_scores(tmpdir, random_scores, f'random.json')
check_aggregate(tmpdir)
check_associate_metrics(tmpdir)
def save_scores(directory, scores, name):
with open(os.path.join(directory, f'{name}.json'), 'w') as f:
json.dump(scores, f)
def check_aggregate(directory):
json_files = glob.glob(f"{directory}/*.json")
df = nussl.evaluation.aggregate_score_files(json_files)
nussl.separation.deep.DeepMaskEstimation(nussl.AudioSignal())
report_card = nussl.evaluation.report_card(df, 'Testing notes', decimals=5)
assert 'Testing notes' in report_card
report_card_overall = nussl.evaluation.report_card(
df, 'Testing notes', report_each_source=False)
print(report_card_overall)
assert len(report_card_overall) < len(report_card)
def check_associate_metrics(directory):
json_files = glob.glob(f"{directory}/*.json")
df = nussl.evaluation.aggregate_score_files(json_files)
n_sources = 2
duration = 3
sample_rate = 44100
min_freq, max_freq = 110, 1000
def make_mix(dataset, i):
sources = {}
freqs = []
for i in range(n_sources):
freq = np.random.randint(min_freq, max_freq)
freqs.append(freq)
dt = 1 / sample_rate
source_data = np.arange(0.0, duration, dt)
source_data = np.sin(2 * np.pi * freq * source_data)
source_signal = dataset._load_audio_from_array(
audio_data=source_data, sample_rate=sample_rate)
sources[f'sine{i}'] = source_signal * 1 / n_sources
mix = sum(sources.values())
output = {
'mix': mix,
'sources': sources,
'metadata': {
'frequencies': freqs
}
}
return output
dataset = nussl.datasets.OnTheFly(make_mix, 10)
n_features = 257
mi_config = nussl.ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 1, False, 0.0, 2, 'sigmoid',
)
model = nussl.ml.SeparationModel(mi_config)
model = nussl.evaluation.associate_metrics(model, df, dataset)
assert 'evaluation' in model.metadata.keys()
assert 'source' not in model.metadata['evaluation'].keys()
assert 'file' not in model.metadata['evaluation'].keys()
stats_keys = ['mean', 'median', 'std']
for metric in model.metadata['evaluation'].values():
assert all(s in metric.keys() for s in stats_keys)
assert 'test_dataset' in model.metadata.keys()
sm_keys = ['name', 'stft_params', 'sample_rate',
'num_channels', 'folder', 'transforms']
assert all(k in model.metadata['test_dataset'].keys() for k in sm_keys)
def test_eval_permutation(estimated_and_true_sources):
true_sources = estimated_and_true_sources['true'][:2]
estimated_sources = estimated_and_true_sources['oracle'][:2]
keys = estimated_and_true_sources['keys']
for k, t in zip(keys, true_sources):
t.path_to_input_file = k
evaluator = nussl.evaluation.BSSEvalV4(
true_sources, estimated_sources[::-1],
compute_permutation=True)
scores = evaluator.evaluate()
assert scores['permutation'] == (1, 0)
true_sources = estimated_and_true_sources['true']
estimated_sources = estimated_and_true_sources['oracle']
evaluator = nussl.evaluation.BSSEvalScale(
true_sources, estimated_sources[::-1],
compute_permutation=True)
scores = evaluator.evaluate()
assert scores['permutation'] == (3, 2, 1, 0)
oracle_masks = estimated_and_true_sources['oracle_masks']
estimated_masks = estimated_and_true_sources['oracle_masks'][::-1]
oracle_masks = [o.mask_to_binary() for o in oracle_masks]
estimated_masks = [r.mask_to_binary() for r in estimated_masks]
nussl.evaluation.PrecisionRecallFScore(
oracle_masks, estimated_masks)
scores = evaluator.evaluate()
assert scores['permutation'] == (3, 2, 1, 0)
def test_eval_precision_recall_fscore(estimated_and_true_sources):
oracle_masks = estimated_and_true_sources['oracle_masks']
random_masks = estimated_and_true_sources['random_masks']
pytest.raises(ValueError,
nussl.evaluation.PrecisionRecallFScore, oracle_masks, random_masks
)
random_extra_mask = [BinaryMask(np.random.rand(100, 10, 2) > .5)]
oracle_masks = [o.mask_to_binary() for o in oracle_masks]
random_masks = [r.mask_to_binary() for r in random_masks]
pytest.raises(ValueError,
nussl.evaluation.PrecisionRecallFScore,
oracle_masks + random_extra_mask,
random_masks + random_extra_mask
)
evaluator = nussl.evaluation.PrecisionRecallFScore(
oracle_masks[0], random_masks[0])
references, estimates = evaluator.preprocess()
shape = (
oracle_masks[0].mask.shape[0] * oracle_masks[0].mask.shape[1],
oracle_masks[0].num_channels, 1)
assert references.shape == shape
assert estimates.shape == shape
evaluator = nussl.evaluation.PrecisionRecallFScore(
oracle_masks, random_masks)
references, estimates = evaluator.preprocess()
shape = (
oracle_masks[0].mask.shape[0] * oracle_masks[0].mask.shape[1],
oracle_masks[0].num_channels, len(oracle_masks))
assert references.shape == shape
assert estimates.shape == shape
scores = evaluator.evaluate()
check_scores(evaluator)
| 16,758 | 33.412731 | 84 | py |
nussl | nussl-master/tests/core/test_utils.py | import nussl
import numpy as np
from nussl.separation.base import MaskSeparationBase, SeparationBase
from nussl.core.masks import BinaryMask, SoftMask, MaskBase
import pytest
import torch
import random
import matplotlib.pyplot as plt
import os
import tempfile
def test_utils_seed():
seeds = [0, 123, 666, 15, 2]
def _get_random():
r1 = torch.randn(100, 10)
r2 = np.random.rand(100, 10)
r3 = random.randint(10, 10000)
return r1, r2, r3
for seed in seeds:
nussl.utils.seed(seed)
t1 = _get_random()
nussl.utils.seed(seed)
t2 = _get_random()
for first, second in zip(t1, t2):
assert np.allclose(first, second)
other_seed = 10
nussl.utils.seed(other_seed)
t3 = _get_random()
for first, second in zip(t1, t3):
assert not np.allclose(first, second)
# do it again with set_cudnn = True
for seed in seeds:
nussl.utils.seed(seed, set_cudnn=True)
t1 = _get_random()
nussl.utils.seed(seed, set_cudnn=True)
t2 = _get_random()
for first, second in zip(t1, t2):
assert np.allclose(first, second)
other_seed = 10
nussl.utils.seed(other_seed, set_cudnn=True)
t3 = _get_random()
for first, second in zip(t1, t3):
assert not np.allclose(first, second)
def test_utils_find_peak_indices():
array = np.arange(0, 100)
peak = nussl.utils.find_peak_indices(array, 1)[0]
assert peak == 99
array = np.arange(0, 100).reshape(10, 10)
peak = nussl.utils.find_peak_indices(array, 3, min_dist=0)
assert peak == [[9, 9], [9, 8], [9, 7]]
peak = nussl.utils.find_peak_indices(array, 3, min_dist=(0,))
assert peak == [[9, 9], [9, 8], [9, 7]]
peak = nussl.utils.find_peak_indices(array, 3, min_dist=(0, 0))
assert peak == [[9, 9], [9, 8], [9, 7]]
peak = nussl.utils.find_peak_indices(array - np.mean(array), 3, min_dist=0)
assert peak == [[9, 9], [9, 8], [9, 7]]
pytest.raises(
ValueError, nussl.utils.find_peak_indices, array, 10, threshold=1.1)
pytest.warns(
UserWarning, nussl.utils.find_peak_indices, array, 1000, threshold=1.0)
pytest.raises(
ValueError, nussl.utils.find_peak_indices, np.ones((10, 10, 10)), 3, min_dist=0)
def test_utils_complex_randn():
mat = nussl.utils.complex_randn((100, 100))
assert (mat.shape == (100, 100))
assert (mat.dtype == np.complex128)
def test_utils_audio_signal_list(benchmark_audio):
path = benchmark_audio['dev1_female3_inst_mix.wav']
signals = [nussl.AudioSignal(path) for i in range(3)]
assert signals == nussl.utils.verify_audio_signal_list_strict(signals)
assert signals == nussl.utils.verify_audio_signal_list_lax(signals)
assert [signals[0]] == nussl.utils.verify_audio_signal_list_lax(signals[0])
dur = signals[0].signal_duration
signals = [
nussl.AudioSignal(
path,
duration=np.random.rand() * dur) for i in range(3)
for i in range(10)
]
pytest.raises(
ValueError, nussl.utils.verify_audio_signal_list_strict, signals)
signals = [nussl.AudioSignal(path) for i in range(3)]
signals[-1].resample(8000)
pytest.raises(
ValueError, nussl.utils.verify_audio_signal_list_strict, signals)
signals = [nussl.AudioSignal(path) for i in range(3)]
signals[-1].to_mono()
pytest.raises(
ValueError, nussl.utils.verify_audio_signal_list_strict, signals)
signals = [nussl.AudioSignal(path) for i in range(3)]
signals[-1] = [0, 1, 2]
pytest.raises(
ValueError, nussl.utils.verify_audio_signal_list_lax, signals)
signals = [nussl.AudioSignal(path) for i in range(3)]
signals[-1] = nussl.AudioSignal()
pytest.raises(
ValueError, nussl.utils.verify_audio_signal_list_lax, signals)
pytest.raises(
ValueError, nussl.utils.verify_audio_signal_list_lax, {'test': 'garbage'})
def test_utils_audio_signals_to_musdb_track(musdb_tracks):
track = musdb_tracks[0]
mixture = nussl.AudioSignal(
audio_data_array=track.audio,
sample_rate=track.rate)
mixture.stft()
stems = track.stems
true_sources = {}
fake_sources = {}
for k, v in sorted(track.sources.items(), key=lambda x: x[1].stem_id):
true_sources[k] = nussl.AudioSignal(
audio_data_array=stems[v.stem_id],
sample_rate=track.rate
)
mask_data = np.random.rand(*mixture.stft_data.shape)
soft_mask = SoftMask(mask_data)
_source = mixture.apply_mask(soft_mask)
_source.istft(truncate_to_length=mixture.signal_length)
fake_sources[k] = _source
separated_track = nussl.utils.audio_signals_to_musdb_track(
mixture, fake_sources, nussl.constants.STEM_TARGET_DICT
)
reconstructed_track = nussl.utils.audio_signals_to_musdb_track(
mixture, true_sources, nussl.constants.STEM_TARGET_DICT
)
assert np.allclose(track.stems, reconstructed_track.stems)
assert track.stems.shape == separated_track.stems.shape
def test_utils_musdb_track_to_audio_signals(musdb_tracks):
track = musdb_tracks[0]
stems = track.stems
mixture, sources = nussl.utils.musdb_track_to_audio_signals(track)
assert np.allclose(mixture.audio_data, track.audio.T)
assert mixture.sample_rate == track.rate
for k, v in sorted(track.sources.items(), key=lambda x: x[1].stem_id):
assert np.allclose(sources[k].audio_data, stems[v.stem_id].T)
assert sources[k].sample_rate == track.rate
assert k in sources[k].path_to_input_file
def test_utils_format():
_in = '0123~5aBc'
_gt = '01235abc'
_est = nussl.utils._format(_in)
assert _gt == _est
def test_utils_get_axis():
mat = np.random.rand(100, 10, 1)
_out = nussl.utils._get_axis(mat, 0, 0)
assert _out.shape == (10, 1)
_out = nussl.utils._get_axis(mat, 1, 0)
assert _out.shape == (100, 1)
_out = nussl.utils._get_axis(mat, 2, 0)
assert _out.shape == (100, 10)
def test_utils_slice_along_dim():
data = [
np.random.rand(10, 10, 10, 10, 10),
torch.rand(10, 10, 10, 10, 10)
]
for _data in data:
dims = range(len(_data.shape))
for d in dims:
_first = np.random.randint(_data.shape[d])
_second = np.random.randint(_data.shape[d])
start = min(_first, _second)
end = max(_first, _second)
if d > 3:
pytest.raises(ValueError,
nussl.utils._slice_along_dim,
_data, d, start, end)
else:
sliced_data = nussl.utils._slice_along_dim(
_data, d, start, end)
expected_shape = list(_data.shape)
expected_shape[d] = end - start
expected_shape = tuple(expected_shape)
assert sliced_data.shape == expected_shape
data = np.random.rand(10, 10)
pytest.raises(ValueError, nussl.utils._slice_along_dim,
data, 2, 0, 10)
PLOT_DIRECTORY = 'tests/utils/plots'
os.makedirs(PLOT_DIRECTORY, exist_ok=True)
def test_utils_visualize_spectrogram(music_mix_and_sources):
mix, sources = music_mix_and_sources
plt.figure(figsize=(10, 9))
plt.subplot(211)
nussl.utils.visualize_spectrogram(mix)
plt.subplot(212)
nussl.utils.visualize_spectrogram(mix, do_mono=True)
plt.subplot(313)
nussl.utils.visualize_spectrogram(mix, y_axis='mel')
OUTPUT = os.path.join(PLOT_DIRECTORY, 'viz_spectrogram.png')
plt.tight_layout()
plt.savefig(OUTPUT)
def test_utils_visualize_waveplot(music_mix_and_sources):
mix, sources = music_mix_and_sources
plt.figure(figsize=(10, 6))
plt.subplot(211)
nussl.utils.visualize_waveform(mix)
plt.subplot(212)
nussl.utils.visualize_waveform(mix, do_mono=True)
OUTPUT = os.path.join(PLOT_DIRECTORY, 'viz_waveform.png')
plt.tight_layout()
plt.savefig(OUTPUT)
def test_utils_visualize_sources(music_mix_and_sources):
mix, sources = music_mix_and_sources
colors = None
plt.figure(figsize=(10, 6))
plt.subplot(211)
nussl.utils.visualize_sources_as_masks(
sources, db_cutoff=-70, alpha_amount=2.0,
y_axis='mel', colors=colors)
plt.subplot(212)
nussl.utils.visualize_sources_as_waveform(
sources, colors=colors, show_legend=True)
OUTPUT = os.path.join(PLOT_DIRECTORY, 'viz_sources_dict.png')
plt.tight_layout()
plt.savefig(OUTPUT)
sources = list(sources.values())
colors = ['blue', 'red']
plt.figure(figsize=(10, 6))
plt.subplot(211)
nussl.utils.visualize_sources_as_masks(
sources, db_cutoff=-70, alpha_amount=2.0,
y_axis='mel', do_mono=True, colors=colors)
plt.subplot(212)
nussl.utils.visualize_sources_as_waveform(
sources, do_mono=True, colors=colors,
show_legend=False)
OUTPUT = os.path.join(PLOT_DIRECTORY, 'viz_sources_list.png')
plt.tight_layout()
plt.savefig(OUTPUT)
def test_close_temp_files():
'''
Create a bunch of temp files and then make sure they've been closed and
deleted. This test is taken wholesale from Scaper.
'''
# With delete=True
tmpfiles = []
with nussl.utils._close_temp_files(tmpfiles):
for _ in range(5):
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=True))
for tf in tmpfiles:
assert tf.file.closed
assert not os.path.isfile(tf.name)
# With delete=False
tmpfiles = []
with nussl.utils._close_temp_files(tmpfiles):
for _ in range(5):
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=False))
for tf in tmpfiles:
assert tf.file.closed
assert not os.path.isfile(tf.name)
# with an exception before exiting
try:
tmpfiles = []
with nussl.utils._close_temp_files(tmpfiles):
tmpfiles.append(
tempfile.NamedTemporaryFile(suffix='.wav', delete=True))
raise ValueError
except ValueError:
for tf in tmpfiles:
assert tf.file.closed
assert not os.path.isfile(tf.name)
else:
assert False, 'Exception was not reraised.'
| 10,429 | 29.408163 | 88 | py |
nussl | nussl-master/tests/separation/test_deep.py | from nussl.separation.base import DeepMixin, SeparationException
from nussl.separation.base.deep_mixin import OMITTED_TRANSFORMS
from nussl import datasets, ml, separation, evaluation
import nussl
import torch
from torch import optim
import tempfile
import pytest
import os
import numpy as np
fix_dir = 'tests/local/trainer'
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
EPOCH_LENGTH = 500 if DEVICE == 'cuda' else 1 # use 1 for quick testing, 500 for actual testing
SDR_CUTOFF = 5 if DEVICE == 'cuda' else -20 # use -10 for quick testing, 5 for actual testing
@pytest.fixture(scope="module")
def overfit_model(scaper_folder):
nussl.utils.seed(0)
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.MagnitudeWeights(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.GetExcerpt(100)
])
dataset = datasets.Scaper(
scaper_folder, transform=tfms)
dataset.items = [dataset.items[5]]
dataloader = torch.utils.data.DataLoader(dataset)
n_features = dataset[0]['mix_magnitude'].shape[1]
config = ml.networks.builders.build_recurrent_chimera(
n_features, 50, 1, True, 0.3, 20, 'sigmoid', 2, 'sigmoid',
normalization_class='InstanceNorm'
)
model = ml.SeparationModel(config)
model = model.to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=1e-2)
loss_dictionary = {
'DeepClusteringLoss': {
'weight': 0.2
},
'PermutationInvariantLoss': {
'args': ['L1Loss'],
'weight': 0.8
}
}
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
trainer, _ = ml.train.create_train_and_validation_engines(
train_closure, device=DEVICE
)
with tempfile.TemporaryDirectory() as tmpdir:
_dir = fix_dir if fix_dir else tmpdir
ml.train.add_stdout_handler(trainer)
ml.train.add_validate_and_checkpoint(
_dir, model, optimizer, dataset, trainer)
trainer.run(dataloader, max_epochs=1, epoch_length=EPOCH_LENGTH)
model_path = os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.model.pth')
yield model_path, dataset.process_item(dataset.items[0])
def test_deep_mixin(overfit_model):
model_path, item = overfit_model
deep_mixin = separation.deep.DeepClustering(
item['mix'], 2, model_path, mask_type='binary')
deep_mixin.load_model(model_path)
deep_mixin.audio_signal = item['mix']
deep_mixin.channel_dim = -1
assert not isinstance(deep_mixin.transform, OMITTED_TRANSFORMS)
if isinstance(deep_mixin.transform, datasets.transforms.Compose):
for t in deep_mixin.transform.transforms:
assert not isinstance(t, OMITTED_TRANSFORMS)
mix_item = {'mix': item['mix']}
deep_mixin._get_input_data_for_model()
assert deep_mixin.metadata['stft_params'] == deep_mixin.audio_signal.stft_params
for key, val in deep_mixin.input_data.items():
if torch.is_tensor(val):
assert val.shape[0] == deep_mixin.metadata['num_channels']
output = deep_mixin.model(deep_mixin.input_data)
output_tfm = deep_mixin._get_transforms(
datasets.transforms.MagnitudeWeights())
output_tfm = deep_mixin._get_transforms(
datasets.transforms.MagnitudeSpectrumApproximation())
assert isinstance(
output_tfm, datasets.transforms.MagnitudeSpectrumApproximation)
item['mix'].resample(8000)
deep_mixin.audio_signal = item['mix']
deep_mixin._get_input_data_for_model()
assert deep_mixin.audio_signal.sample_rate == deep_mixin.metadata['sample_rate']
dummy_data = {'one_hot': np.random.rand(100)}
input_data = deep_mixin._get_input_data_for_model(**dummy_data)
assert 'one_hot' in input_data
def test_deep_mixin_metadata(overfit_model):
model_path, item = overfit_model
deep_mixin = separation.deep.DeepClustering(
item['mix'], 2, model_path, mask_type='binary')
deep_mixin.load_model(model_path)
metadata = deep_mixin.metadata
deep_mixin.metadata = None
with pytest.raises(ValueError):
deep_mixin.get_metadata()
deep_mixin.metadata = metadata
assert type(deep_mixin.get_metadata()) == dict
assert type(deep_mixin.get_metadata(to_str=True)) == str
result = deep_mixin.get_metadata(for_upload=True)
keys = ['train_dataset', 'val_dataset']
for k in keys:
assert 'folder' not in result[k]
assert 'trainer.state.epoch_history' not in result
print(deep_mixin.get_metadata(to_str=True))
def test_separation_deep_clustering(overfit_model):
model_path, item = overfit_model
dpcl = separation.deep.DeepClustering(
item['mix'], 2, model_path, mask_type='binary')
dpcl.forward() # calls extract_features, for coverage
item['mix'].write_audio_to_file('tests/local/dpcl_mix.wav')
sources = item['sources']
estimates = dpcl()
for i, e in enumerate(estimates):
e.write_audio_to_file(f'tests/local/dpcl_overfit{i}.wav')
evaluator = evaluation.BSSEvalScale(
list(sources.values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
for key in evaluator.source_labels:
for metric in ['SI-SDR', 'SI-SIR']:
_score = scores[key][metric]
for val in _score:
assert val > SDR_CUTOFF
dpcl.model.output_keys = []
pytest.raises(SeparationException, dpcl.extract_features)
def test_separation_deep_mask_estimation(overfit_model):
model_path, item = overfit_model
for mask_type in ['soft', 'binary']:
dme = separation.deep.DeepMaskEstimation(
item['mix'], model_path, mask_type=mask_type)
pytest.raises(SeparationException, dme.confidence)
item['mix'].write_audio_to_file('tests/local/dme_mix.wav')
sources = item['sources']
estimates = dme()
for i, e in enumerate(estimates):
e.write_audio_to_file(f'tests/local/dme_overfit{i}.wav')
evaluator = evaluation.BSSEvalScale(
list(sources.values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
for key in evaluator.source_labels:
for metric in ['SI-SDR', 'SI-SIR']:
_score = scores[key][metric]
for val in _score:
assert val > SDR_CUTOFF
confidence = dme.confidence()
dme.model.output_keys = ['mask']
dme()
pytest.raises(SeparationException, dme.confidence)
dme.model.output_keys = []
pytest.raises(SeparationException, dme.run)
@pytest.fixture(scope="module")
def overfit_audio_model(scaper_folder):
nussl.utils.seed(0)
tfms = datasets.transforms.Compose([
datasets.transforms.GetAudio(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.GetExcerpt(
32000, time_dim=1, tf_keys=['mix_audio', 'source_audio'])
])
dataset = datasets.Scaper(
scaper_folder, transform=tfms)
dataset.items = [dataset.items[5]]
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=1)
config = ml.networks.builders.build_recurrent_end_to_end(
256, 256, 64, 'sqrt_hann', 50, 2,
True, 0.3, 2, 'sigmoid', num_audio_channels=1,
mask_complex=False, rnn_type='lstm',
mix_key='mix_audio')
model = ml.SeparationModel(config)
model = model.to(DEVICE)
optimizer = optim.Adam(model.parameters(), lr=1e-3)
loss_dictionary = {
'PermutationInvariantLoss': {
'args': ['SISDRLoss'],
'weight': 1.0,
'keys': {'audio': 'estimates', 'source_audio': 'targets'}
}
}
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
trainer, _ = ml.train.create_train_and_validation_engines(
train_closure, device=DEVICE
)
with tempfile.TemporaryDirectory() as tmpdir:
_dir = os.path.join(fix_dir, 'dae') if fix_dir else tmpdir
ml.train.add_stdout_handler(trainer)
ml.train.add_validate_and_checkpoint(
_dir, model, optimizer, dataset, trainer)
ml.train.add_progress_bar_handler(trainer)
trainer.run(dataloader, max_epochs=1, epoch_length=EPOCH_LENGTH)
model_path = os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.model.pth')
yield model_path, dataset.process_item(dataset.items[0])
def test_separation_deep_audio_estimation(overfit_audio_model):
model_path, item = overfit_audio_model
dae = separation.deep.DeepAudioEstimation(item['mix'], model_path)
item['mix'].write_audio_to_file('tests/local/dae_mix.wav')
sources = item['sources']
estimates = dae()
for i, e in enumerate(estimates):
e.write_audio_to_file(f'tests/local/dae_overfit{i}.wav')
evaluator = evaluation.BSSEvalScale(
list(sources.values()), estimates, compute_permutation=True)
scores = evaluator.evaluate()
for key in evaluator.source_labels:
for metric in ['SI-SDR', 'SI-SIR']:
_score = scores[key][metric]
for val in _score:
assert val > SDR_CUTOFF
dae.model.output_keys = []
pytest.raises(SeparationException, dae.run)
| 9,458 | 32.661922 | 96 | py |
nussl | nussl-master/tests/datasets/test_base_dataset.py | import pytest
from nussl.datasets import BaseDataset, transforms
from nussl.datasets.base_dataset import DataSetException
import nussl
from nussl import STFTParams
import numpy as np
import soundfile as sf
import itertools
import tempfile
import os
import torch
class BadTransform(object):
def __init__(self, fake=None):
self.fake = fake
def __call__(self, data):
return 'not a dictionary'
class BadDataset(BaseDataset):
def get_items(self, folder):
return {'anything': 'not a list'}
def process_item(self, item):
return 'not a dictionary'
def dummy_process_item(self, item):
audio = self._load_audio_file(item)
output = {
'mix': audio,
'sources': {'key': audio}
}
return output
def dummy_process_item_by_audio(self, item):
data, sr = sf.read(item)
audio = self._load_audio_from_array(data, sr)
output = {
'mix': audio,
'sources': {'key': audio}
}
return output
def initialize_bad_dataset_and_run():
_bad_transform = BadTransform()
_bad_dataset = BadDataset('test', transform=_bad_transform)
_ = _bad_dataset[0]
def test_dataset_base(benchmark_audio, monkeypatch):
keys = [benchmark_audio[k] for k in benchmark_audio]
def dummy_get(self, folder):
return keys
pytest.raises(DataSetException, initialize_bad_dataset_and_run)
monkeypatch.setattr(BadDataset, 'get_items', dummy_get)
pytest.raises(DataSetException, initialize_bad_dataset_and_run)
monkeypatch.setattr(BadDataset, 'process_item', dummy_process_item)
pytest.raises(transforms.TransformException, initialize_bad_dataset_and_run)
monkeypatch.setattr(BaseDataset, 'get_items', dummy_get)
monkeypatch.setattr(BaseDataset, 'process_item', dummy_process_item)
_dataset = BaseDataset('test')
assert len(_dataset) == len(keys)
audio_signal = nussl.AudioSignal(keys[0])
assert _dataset[0]['mix'] == audio_signal
_dataset = BaseDataset('test', transform=BadTransform())
pytest.raises(transforms.TransformException, _dataset.__getitem__, 0)
psa = transforms.MagnitudeSpectrumApproximation()
_dataset = BaseDataset('test', transform=psa)
output = _dataset[0]
assert 'source_magnitudes' in output
assert 'mix_magnitude' in output
assert 'ideal_binary_mask' in output
monkeypatch.setattr(
BaseDataset, 'process_item', dummy_process_item_by_audio)
psa = transforms.MagnitudeSpectrumApproximation()
_dataset = BaseDataset('test', transform=psa)
output = _dataset[0]
assert 'source_magnitudes' in output
assert 'mix_magnitude' in output
assert 'ideal_binary_mask' in output
_dataset.transform = transforms.Compose([
transforms.MagnitudeSpectrumApproximation(),
transforms.ToSeparationModel()
])
dataloader = torch.utils.data.DataLoader(_dataset, shuffle=False, num_workers=8)
assert len(list(dataloader)) == len(_dataset)
for idx, batch in enumerate(dataloader):
assert torch.allclose(batch['mix_magnitude'][0], _dataset[idx]['mix_magnitude'])
def test_dataset_base_filter(benchmark_audio, monkeypatch):
keys = [benchmark_audio[k] for k in benchmark_audio]
def dummy_get(self, folder):
return keys
monkeypatch.setattr(BaseDataset, 'get_items', dummy_get)
monkeypatch.setattr(BaseDataset, 'process_item', dummy_process_item)
_dataset = BaseDataset('test')
min_length = 7 # in seconds
# self here refers to the dataset
def remove_short_audio(self, item):
processed_item = self.process_item(item)
mix_length = processed_item['mix'].signal_duration
if mix_length < min_length:
return False
return True
_dataset.filter_items_by_condition(remove_short_audio)
for item in _dataset:
assert item['mix'].signal_duration >= min_length
def bad_filter_func(self, item):
return 'not a bool!'
pytest.raises(
DataSetException, _dataset.filter_items_by_condition, bad_filter_func)
def test_dataset_base_audio_signal_params(benchmark_audio, monkeypatch):
keys = [benchmark_audio[k] for k in benchmark_audio]
def dummy_get(self, folder):
return keys
monkeypatch.setattr(BaseDataset, 'get_items', dummy_get)
monkeypatch.setattr(
BaseDataset, 'process_item', dummy_process_item_by_audio)
stft_params = [
STFTParams(
window_length=256,
hop_length=32,
window_type='triang'),
None
]
sample_rates = [4000, None]
num_channels = [1, 2, None]
strict_sample_rate = [False, True]
product = itertools.product(
stft_params, sample_rates, num_channels, strict_sample_rate)
def _get_outputs(dset):
outputs = []
for i in range(len(dset)):
outputs.append(dset[i])
return outputs
for s, sr, nc, s_sr in product:
if s_sr and sr is not None:
pytest.raises(
DataSetException, BaseDataset, 'test', stft_params=s,
sample_rate=sr, num_channels=nc, strict_sample_rate=s_sr)
continue
_dataset = BaseDataset(
'test', stft_params=s,
sample_rate=sr, num_channels=nc,
strict_sample_rate=s_sr)
outputs = _get_outputs(_dataset)
# they should all have the same sample rate and stft
_srs = []
_stfts = []
for i, o in enumerate(outputs):
if sr:
assert o['mix'].sample_rate == sr
if s:
assert o['mix'].stft_params == s
if nc:
if o['mix'].num_channels < nc:
assert pytest.warns(UserWarning, _dataset.__getitem__, i)
else:
assert o['mix'].num_channels == nc
_srs.append(o['mix'].sample_rate)
_stfts.append(o['mix'].stft_params)
for _sr, _stft in zip(_srs, _stfts):
assert _sr == _srs[0]
assert _stft == _stfts[0]
def test_dataset_base_with_caching(benchmark_audio, monkeypatch):
keys = [benchmark_audio[k] for k in benchmark_audio]
def dummy_get(self, folder):
return keys
monkeypatch.setattr(BaseDataset, 'get_items', dummy_get)
monkeypatch.setattr(
BaseDataset, 'process_item', dummy_process_item_by_audio)
with tempfile.TemporaryDirectory() as tmpdir:
tfm = transforms.Cache(
os.path.join(tmpdir, 'cache'), overwrite=True)
_dataset = BaseDataset('test', transform=tfm, cache_populated=False)
assert tfm.cache_size == len(_dataset)
_data_a = _dataset[0]
_dataset.cache_populated = True
pytest.raises(transforms.TransformException,
_dataset.__getitem__, 1) # haven't written to this yet!
assert len(_dataset.post_cache_transforms.transforms) == 1
_data_b = _dataset[0]
for key in _data_a:
assert _data_a[key] == _data_b[key]
_dataset.cache_populated = False
outputs_a = []
outputs_b = []
for i in range(len(_dataset)):
outputs_a.append(_dataset[i])
_dataset.cache_populated = True
for i in range(len(_dataset)):
outputs_b.append(_dataset[i])
for _data_a, _data_b in zip(outputs_a, outputs_b):
for key in _data_a:
assert _data_a[key] == _data_b[key]
with tempfile.TemporaryDirectory() as tmpdir:
tfm = transforms.Compose([
transforms.MagnitudeSpectrumApproximation(),
transforms.ToSeparationModel(),
transforms.Cache(
os.path.join(tmpdir, 'cache'), overwrite=True),
])
_dataset = BaseDataset('test', transform=tfm, cache_populated=False)
assert tfm.transforms[-1].cache_size == len(_dataset)
_data_a = _dataset[0]
_dataset.cache_populated = True
pytest.raises(transforms.TransformException,
_dataset.__getitem__, 1) # haven't written to this yet!
assert len(_dataset.post_cache_transforms.transforms) == 1
_data_b = _dataset[0]
for key in _data_a:
if torch.is_tensor(_data_a[key]):
assert torch.allclose(_data_a[key], _data_b[key])
else:
assert _data_a[key] == _data_b[key]
_dataset.cache_populated = False
outputs_a = []
outputs_b = []
for i in range(len(_dataset)):
outputs_a.append(_dataset[i])
_dataset.cache_populated = True
for i in range(len(_dataset)):
outputs_b.append(_dataset[i])
for _data_a, _data_b in zip(outputs_a, outputs_b):
for key in _data_a:
if torch.is_tensor(_data_a[key]):
assert torch.allclose(_data_a[key], _data_b[key])
else:
assert _data_a[key] == _data_b[key]
for L in [100, 400, 1000]:
with tempfile.TemporaryDirectory() as tmpdir:
tfm = transforms.Compose([
transforms.MagnitudeSpectrumApproximation(),
transforms.ToSeparationModel(),
transforms.Cache(
os.path.join(tmpdir, 'cache'), overwrite=True),
transforms.GetExcerpt(L)
])
_dataset = BaseDataset('test', transform=tfm, cache_populated=False)
assert tfm.transforms[-2].cache_size == len(_dataset)
assert len(_dataset.post_cache_transforms.transforms) == 2
for i in range(len(_dataset)):
_ = _dataset[i]
_dataset.cache_populated = True
outputs = []
for i in range(len(_dataset)):
outputs.append(_dataset[i])
for _output in outputs:
for key, val in _output.items():
if torch.is_tensor(val):
assert val.shape[0] == L
| 10,049 | 30.40625 | 88 | py |
nussl | nussl-master/tests/datasets/test_transforms.py | import pytest
from nussl.datasets import transforms
from nussl.datasets.transforms import TransformException
import nussl
from nussl import STFTParams, evaluation
import numpy as np
from nussl.core.masks import BinaryMask, SoftMask
import itertools
import copy
import torch
import tempfile
import os
stft_tol = 1e-6
def separate_and_evaluate(mix, sources, mask_data):
estimates = []
mask_data = normalize_masks(mask_data)
for i in range(mask_data.shape[-1]):
mask = SoftMask(mask_data[..., i])
estimate = mix.apply_mask(mask)
estimate.istft()
estimates.append(estimate)
assert np.allclose(
sum(estimates).audio_data, mix.audio_data, atol=stft_tol)
sources = [sources[k] for k in sources]
evaluator = evaluation.BSSEvalScale(
sources, estimates)
scores = evaluator.evaluate()
return scores
def normalize_masks(mask_data):
mask_data = (
mask_data /
np.sum(mask_data, axis=-1, keepdims=True) + 1e-8
)
return mask_data
def test_transform_msa_psa(musdb_tracks):
track = musdb_tracks[10]
mix, sources = nussl.utils.musdb_track_to_audio_signals(track)
data = {
'mix': mix,
'sources': sources
}
msa = transforms.MagnitudeSpectrumApproximation()
assert isinstance(str(msa), str)
psa = transforms.PhaseSensitiveSpectrumApproximation()
assert isinstance(str(psa), str)
assert msa.__class__.__name__ in str(msa)
assert psa.__class__.__name__ in str(psa)
pytest.raises(TransformException, psa, {'sources': 'blah'})
pytest.raises(TransformException, msa, {'sources': 'blah'})
_data = {'mix': mix}
output = msa(_data)
assert np.allclose(output['mix_magnitude'], np.abs(mix.stft()))
output = msa(data)
assert np.allclose(output['mix_magnitude'], np.abs(mix.stft()))
assert list(data['sources'].keys()) == sorted(list(sources.keys()))
masks = []
estimates = []
shape = mix.stft_data.shape + (len(sources),)
mix_masks = np.ones(shape)
mix_scores = separate_and_evaluate(mix, data['sources'], mix_masks)
ibm_scores = separate_and_evaluate(
mix, data['sources'], data['ideal_binary_mask'])
output['source_magnitudes'] += 1e-8
mask_data = (
output['source_magnitudes'] /
np.maximum(
output['mix_magnitude'][..., None],
output['source_magnitudes'])
)
msa_scores = separate_and_evaluate(mix, data['sources'], mask_data)
_data = {'mix': mix}
output = psa(_data)
assert np.allclose(output['mix_magnitude'], np.abs(mix.stft()))
output = psa(data)
assert np.allclose(output['mix_magnitude'], np.abs(mix.stft()))
assert list(data['sources'].keys()) == sorted(list(sources.keys()))
output['source_magnitudes'] += 1e-8
mask_data = (
output['source_magnitudes'] /
np.maximum(
output['mix_magnitude'][..., None],
output['source_magnitudes'])
)
psa_scores = separate_and_evaluate(mix, data['sources'], mask_data)
for key in msa_scores:
if key in ['SI-SDR', 'SI-SIR', 'SI-SAR']:
diff = np.array(psa_scores[key]) - np.array(mix_scores[key])
assert diff.mean() > 10
def test_transform_sum_sources(musdb_tracks):
track = musdb_tracks[10]
mix, sources = nussl.utils.musdb_track_to_audio_signals(track)
data = {
'mix': mix,
'sources': sources
}
groups = itertools.combinations(data['sources'].keys(), 3)
tfm = None
for group in groups:
_data = copy.deepcopy(data)
tfm = transforms.SumSources([group])
assert isinstance(str(tfm), str)
_data = tfm(_data)
for g in group:
assert g not in _data['sources']
assert '+'.join(group) in _data['sources']
summed_sources = sum([sources[k] for k in group])
assert np.allclose(
_data['sources']['+'.join(group)].audio_data,
summed_sources.audio_data
)
pytest.raises(TransformException, tfm, {'no_key'})
pytest.raises(TransformException,
transforms.SumSources, 'test')
pytest.raises(TransformException,
transforms.SumSources,
[['vocals', 'test'], ['test2', 'test3']],
['mygroup']
)
def test_transform_compose(musdb_tracks):
track = musdb_tracks[10]
mix, sources = nussl.utils.musdb_track_to_audio_signals(track)
data = {
'mix': mix,
'sources': sources,
'metadata': {
'labels': ['bass', 'drums', 'other', 'vocals']
}
}
class _BadTransform(object):
def __call__(self, data):
return 'not a dictionary'
com = transforms.Compose([_BadTransform()])
pytest.raises(TransformException, com, data)
msa = transforms.MagnitudeSpectrumApproximation()
tfm = transforms.SumSources(
[['other', 'drums', 'bass']],
group_names=['accompaniment']
)
assert isinstance(str(tfm), str)
com = transforms.Compose([tfm, msa])
assert msa.__class__.__name__ in str(com)
assert tfm.__class__.__name__ in str(com)
data = com(data)
assert np.allclose(data['mix_magnitude'], np.abs(mix.stft()))
assert data['metadata']['labels'] == [
'bass', 'drums', 'other', 'vocals', 'accompaniment']
mask_data = (
data['source_magnitudes'] /
np.maximum(
data['mix_magnitude'][..., None],
data['source_magnitudes'])
)
msa_scores = separate_and_evaluate(mix, data['sources'], mask_data)
shape = mix.stft_data.shape + (len(sources),)
mask_data = np.ones(shape)
mix_scores = separate_and_evaluate(mix, data['sources'], mask_data)
for key in msa_scores:
if key in ['SI-SDR', 'SI-SIR', 'SI-SAR']:
diff = np.array(msa_scores[key]) - np.array(mix_scores[key])
assert diff.mean() > 10
def test_transform_to_separation_model(musdb_tracks):
track = musdb_tracks[10]
mix, sources = nussl.utils.musdb_track_to_audio_signals(track)
data = {
'mix': mix,
'sources': sources,
'metadata': {'labels': []}
}
msa = transforms.MagnitudeSpectrumApproximation()
tdl = transforms.ToSeparationModel()
assert tdl.__class__.__name__ in str(tdl)
com = transforms.Compose([msa, tdl])
data = com(data)
accepted_keys = ['mix_magnitude', 'source_magnitudes']
rejected_keys = ['mix', 'sources', 'metadata']
for a in accepted_keys:
assert a in data
for r in rejected_keys:
assert r not in data
for key in data:
assert torch.is_tensor(data[key])
assert data[key].shape[0] == mix.stft().shape[1]
assert data[key].shape[1] == mix.stft().shape[0]
def test_transform_get_excerpt(musdb_tracks):
track = musdb_tracks[10]
mix, sources = nussl.utils.musdb_track_to_audio_signals(track)
msa = transforms.MagnitudeSpectrumApproximation()
tdl = transforms.ToSeparationModel()
excerpt_lengths = [400, 1000, 2000]
for excerpt_length in excerpt_lengths:
data = {
'mix': mix,
'sources': sources,
'metadata': {'labels': []}
}
exc = transforms.GetExcerpt(excerpt_length=excerpt_length)
assert isinstance(str(exc), str)
com = transforms.Compose([msa, tdl, exc])
data = com(data)
for key in data:
assert torch.is_tensor(data[key])
assert data[key].shape[0] == excerpt_length
assert data[key].shape[1] == mix.stft().shape[0]
assert torch.mean((data['source_magnitudes'].sum(dim=-1) -
data['mix_magnitude']) ** 2).item() < 1e-5
data = {
'mix': mix,
'sources': sources,
'metadata': {'labels': []}
}
exc = transforms.GetExcerpt(excerpt_length=excerpt_length)
assert isinstance(str(exc), str)
com = transforms.Compose([msa, tdl])
data = com(data)
for key in data:
data[key] = data[key].cpu().data.numpy()
data = exc(data)
for key in data:
assert data[key].shape[0] == excerpt_length
assert data[key].shape[1] == mix.stft().shape[0]
assert np.mean((data['source_magnitudes'].sum(axis=-1) -
data['mix_magnitude']) ** 2) < 1e-5
data = {
'mix_magnitude': 'not an array or tensor'
}
pytest.raises(TransformException, exc, data)
excerpt_lengths = [1009, 16000, 612140]
ga = transforms.GetAudio()
for excerpt_length in excerpt_lengths:
data = {
'mix': sum(sources.values()),
'sources': sources,
'metadata': {'labels': []}
}
exc = transforms.GetExcerpt(
excerpt_length=excerpt_length,
tf_keys = ['mix_audio', 'source_audio'],
time_dim=1,
)
com = transforms.Compose([ga, tdl, exc])
data = com(data)
for key in data:
assert torch.is_tensor(data[key])
assert data[key].shape[1] == excerpt_length
assert torch.allclose(
data['source_audio'].sum(dim=-1), data['mix_audio'], atol=1e-3)
def test_transform_cache(musdb_tracks):
track = musdb_tracks[10]
mix, sources = nussl.utils.musdb_track_to_audio_signals(track)
data = {
'mix': mix,
'sources': sources,
'metadata': {'labels': sorted(list(sources.keys()))},
'index': 0
}
with tempfile.TemporaryDirectory() as tmpdir:
tfm = transforms.Cache(
os.path.join(tmpdir, 'cache'), cache_size=2, overwrite=True)
_data_a = tfm(data)
_info_a = tfm.info
tfm.overwrite = False
_data_b = tfm({'index': 0})
pytest.raises(TransformException, tfm, {})
pytest.raises(TransformException, tfm, {'index': 1})
for key in _data_a:
assert _data_a[key] == _data_b[key]
com = transforms.Compose([
transforms.MagnitudeSpectrumApproximation(),
transforms.ToSeparationModel(),
transforms.Cache(
os.path.join(tmpdir, 'cache'),
overwrite=True),
])
_data_a = com(data)
com.transforms[-1].overwrite = False
_data_b = com.transforms[-1]({'index': 0})
for key in _data_a:
if torch.is_tensor(_data_a[key]):
assert torch.allclose(_data_a[key], _data_b[key])
else:
assert _data_a[key] == _data_b[key]
def test_transforms_labels_to_one_hot(mix_source_folder, scaper_folder):
dataset = nussl.datasets.MixSourceFolder(mix_source_folder)
item = dataset[0]
tfm = transforms.LabelsToOneHot()
assert isinstance(str(tfm), str)
one_hots = tfm(item)['one_hot_labels']
assert np.allclose(one_hots, np.eye(2))
item['sources'].pop('s0')
one_hots = tfm(item)['one_hot_labels']
assert np.allclose(one_hots, np.array([0, 1]))
dataset = nussl.datasets.Scaper(scaper_folder)
item = dataset[0]
one_hots = tfm(item)['one_hot_labels']
assert one_hots.shape[-1] == len(item['metadata']['labels'])
item['metadata'].pop('labels')
pytest.raises(TransformException, tfm, item)
item.pop('metadata')
pytest.raises(TransformException, tfm, item)
def test_transforms_magnitude_weights(mix_source_folder):
dataset = nussl.datasets.MixSourceFolder(mix_source_folder)
item = dataset[0]
tfm = transforms.MagnitudeWeights()
assert isinstance(str(tfm), str)
pytest.raises(TransformException, tfm, {'sources': []})
item_from_mix = tfm(item)
msa = transforms.MagnitudeSpectrumApproximation()
item = tfm(msa(item))
assert item['weights'].shape == item['mix_magnitude'].shape
assert np.allclose(item_from_mix['weights'], item['weights'])
def test_transforms_index_sources(mix_source_folder):
dataset = nussl.datasets.MixSourceFolder(mix_source_folder)
item = dataset[0]
index = 1
tfm = transforms.IndexSources('source_magnitudes', index)
assert isinstance(str(tfm), str)
pytest.raises(TransformException, tfm, {'sources': []})
pytest.raises(TransformException, tfm,
{'source_magnitudes': np.random.randn(100, 100, 1)})
msa = transforms.MagnitudeSpectrumApproximation()
msa_output = copy.deepcopy(msa(item))
item = tfm(msa(item))
assert (
np.allclose(
item['source_magnitudes'],
msa_output['source_magnitudes'][..., index, None])
)
def test_transform_get_audio(mix_source_folder):
dataset = nussl.datasets.MixSourceFolder(mix_source_folder)
item = dataset[0]
index = 1
tfm = transforms.GetAudio()
assert isinstance(str(tfm), str)
pytest.raises(TransformException, tfm, {'sources': []})
ga_output = tfm(item)
assert np.allclose(
ga_output['mix_audio'], item['mix'].audio_data)
source_names = sorted(list(item['sources'].keys()))
for i, key in enumerate(source_names):
assert np.allclose(
ga_output['source_audio'][..., i], item['sources'][key].audio_data)
item.pop('sources')
item.pop('source_audio')
ga_output = tfm(item)
assert np.allclose(
ga_output['mix_audio'], item['mix'].audio_data)
| 13,537 | 28.239741 | 79 | py |
nussl | nussl-master/tests/ml/test_gaussian_mixture.py | from nussl.ml.unfold import GaussianMixtureTorch
import torch
import numpy as np
from torch import nn
from sklearn.metrics import adjusted_mutual_info_score
from sklearn import mixture, cluster
def test_ml_gaussian_mixture():
loc = torch.randn(1, 1, 3, 2)
cov = torch.eye(2).view(1, 1, 1, 2, 2)
cov = cov.repeat(1, 1, 3, 1, 1)
for i in range(loc.shape[2]):
loc[:, :, i, :] += (i * 10)
cov[:, :, i, :, :] *= .1
n_components = 3
covariance_types = ['spherical', 'diag', 'tied', 'full']
for covariance_type in covariance_types:
mv = torch.distributions.MultivariateNormal(loc, covariance_matrix=cov)
X = mv.sample((10, 1000)).view(10, 3000, 1, -1)
labels = mv.log_prob(X).cpu().numpy().reshape(10, -1, n_components)
labels = np.argmax(labels, axis=-1)
X = X.view(10, 3000, -1)
gmm = GaussianMixtureTorch(
n_components=n_components, covariance_type=covariance_type)
_loc = loc.view(1, 3, 2).expand(10, -1, -1)
_cov = cov.view(1, 3, 2, 2).expand(10, -1, -1, -1)
means, covariances = gmm.init_params(X, _loc, _cov)
# with known means and covariances, ami should be perfect
resp, prob = gmm._e_step(X, means, covariances)
predictions = resp.cpu().numpy().reshape(10, -1, n_components)
predictions = np.argmax(predictions, axis=-1)
for nb in range(predictions.shape[0]):
ami = adjusted_mutual_info_score(labels[nb], predictions[nb])
assert np.allclose(ami, 1.0)
# with random init, we compare ami with sklearn impl.
# covariance_type = 'full' has some issues, i think due to init.
if covariance_type != 'full':
means, covariances = gmm.init_params(X)
for i in range(50):
assert (means.shape == (X.shape[0], n_components, X.shape[-1]))
assert (covariances.shape == (
X.shape[0], n_components, X.shape[-1], X.shape[-1]))
resp, prob = gmm._e_step(X, means, covariances)
assert torch.allclose(
resp.sum(dim=-1, keepdims=True), torch.ones_like(resp))
means, covariances, prior = gmm._m_step(X, resp)
resp, prob = gmm._e_step(X, means, covariances)
predictions = resp.cpu().numpy().reshape(10, -1, n_components)
predictions = np.argmax(predictions, axis=-1)
comps = []
for nb in range(predictions.shape[0]):
nussl_ami = adjusted_mutual_info_score(labels[nb], predictions[nb])
sklearn_gmm = mixture.GaussianMixture(
n_components=n_components, covariance_type=covariance_type
)
npX = X[nb].cpu().numpy().reshape(-1, 2)
sklearn_gmm.fit(npX)
sklearn_predictions = sklearn_gmm.predict(npX)
sklearn_ami = adjusted_mutual_info_score(
labels[nb].reshape(-1), sklearn_predictions)
comps.append(nussl_ami >= sklearn_ami)
assert sum(comps) >= len(comps) * .7
forward_pass = gmm(X)
assert forward_pass['resp'].shape == (X.shape[:-1] + (n_components,))
assert forward_pass['log_prob'].shape == (X.shape[:-1] + (n_components,))
| 3,347 | 37.045455 | 83 | py |
nussl | nussl-master/tests/ml/test_trainer.py | from nussl import ml, datasets
import tempfile
from torch import optim
import numpy as np
import logging
import os
import torch
# uncomment if you want to see the trainer/engine logs
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO
)
fix_dir = 'tests/local/trainer'
def test_create_engine(mix_source_folder):
# load dataset with transforms
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.Cache(os.path.join(fix_dir, 'cache'))])
dataset = datasets.MixSourceFolder(
mix_source_folder, transform=tfms)
# create the model, based on the first item in the dataset
# second bit of the shape is the number of features
n_features = dataset[0]['mix_magnitude'].shape[1]
mi_config = ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 2, True, 0.3, 2, 'softmax',
)
model = ml.SeparationModel(mi_config)
# create optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# dummy function for processing a batch through the model
def train_batch(engine, data):
loss = -engine.state.iteration
return {'loss': loss}
# building the training and validation engines and running them
# the validation engine runs within the training engine run
with tempfile.TemporaryDirectory() as tmpdir:
_dir = fix_dir if fix_dir else tmpdir
# _dir = tmpdir
trainer, validator = ml.train.create_train_and_validation_engines(
train_batch, train_batch
)
# add handlers to engine
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(_dir, model, optimizer, dataset,
trainer, dataset, validator,
save_by_epoch=1)
ml.train.add_tensorboard_handler(_dir, trainer, every_iteration=True)
ml.train.add_progress_bar_handler(trainer)
# run engine
trainer.run(dataset, max_epochs=3)
assert os.path.exists(trainer.state.output_folder)
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'latest.model.pth'))
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.model.pth'))
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'latest.optimizer.pth'))
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.optimizer.pth'))
for i in range(1, 4):
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints',
f'epoch{i}.model.pth')
)
assert len(trainer.state.epoch_history['train/loss']) == 3
assert len(trainer.state.iter_history['loss']) == 10
# try resuming
model_path = os.path.join(
trainer.state.output_folder, 'checkpoints', 'latest.model.pth')
optimizer_path = os.path.join(
trainer.state.output_folder, 'checkpoints', 'latest.optimizer.pth')
opt_state_dict = torch.load(
optimizer_path, map_location=lambda storage, loc: storage)
state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage)
optimizer.load_state_dict(opt_state_dict)
model.load_state_dict(state_dict['state_dict'])
# make sure the cache got removed in saved transforms bc it's not a portable
# transform
for t in state_dict['metadata']['train_dataset']['transforms'].transforms:
assert not isinstance(t, datasets.transforms.Cache)
new_trainer, new_validator = (
ml.train.create_train_and_validation_engines(train_batch)
)
# add handlers to engine
ml.train.add_stdout_handler(new_trainer)
ml.train.add_validate_and_checkpoint(
trainer.state.output_folder, model, optimizer, dataset,
new_trainer)
ml.train.add_tensorboard_handler(
trainer.state.output_folder, new_trainer)
new_trainer.load_state_dict(state_dict['metadata']['trainer.state_dict'])
assert new_trainer.state.epoch == trainer.state.epoch
new_trainer.run(dataset, max_epochs=3)
def test_trainer_data_parallel(mix_source_folder):
# load dataset with transforms
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel()])
dataset = datasets.MixSourceFolder(
mix_source_folder, transform=tfms)
# create the model, based on the first item in the dataset
# second bit of the shape is the number of features
n_features = dataset[0]['mix_magnitude'].shape[1]
mi_config = ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 2, True, 0.3, 2, 'softmax',
)
model = ml.SeparationModel(mi_config)
model = torch.nn.DataParallel(model)
# create optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3)
# dummy function for processing a batch through the model
def train_batch(engine, data):
loss = np.random.rand()
return {'loss': loss}
# building the training and validation engines and running them
# the validation engine runs within the training engine run
with tempfile.TemporaryDirectory() as tmpdir:
_dir = fix_dir if fix_dir else tmpdir
trainer, validator = ml.train.create_train_and_validation_engines(
train_batch, train_batch
)
# add handlers to engine
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(_dir, model, optimizer, dataset,
trainer, dataset, validator)
ml.train.add_tensorboard_handler(_dir, trainer)
# run engine
trainer.run(dataset, max_epochs=3)
assert os.path.exists(trainer.state.output_folder)
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'latest.model.pth'))
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.model.pth'))
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'latest.optimizer.pth'))
assert os.path.exists(os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.optimizer.pth'))
assert len(trainer.state.epoch_history['train/loss']) == 3
assert len(trainer.state.iter_history['loss']) == 10
def test_cache_dataset(mix_source_folder):
with tempfile.TemporaryDirectory() as tmpdir:
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel(),
])
chc = datasets.transforms.Cache(
os.path.join(tmpdir, 'cache'), overwrite=True)
# no cache
dataset = datasets.MixSourceFolder(
mix_source_folder,
transform=tfms)
outputs_a = []
for i in range(len(dataset)):
outputs_a.append(dataset[i])
# now add a cache
tfms.transforms.append(chc)
dataset = datasets.MixSourceFolder(
mix_source_folder,
transform=tfms,
cache_populated=False)
assert (
tfms.transforms[-1].cache.nchunks_initialized == 0)
ml.train.cache_dataset(dataset)
assert (
tfms.transforms[-1].cache.nchunks_initialized == len(dataset))
# now make sure the cached stuff matches
dataset.cache_populated = True
outputs_b = []
for i in range(len(dataset)):
outputs_b.append(dataset[i])
for _data_a, _data_b in zip(outputs_a, outputs_b):
for key in _data_a:
if torch.is_tensor(_data_a[key]):
assert torch.allclose(_data_a[key], _data_b[key])
else:
assert _data_a[key] == _data_b[key]
def test_cache_dataset_with_dataloader(mix_source_folder):
with tempfile.TemporaryDirectory() as tmpdir:
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.Cache(
os.path.join(tmpdir, 'cache'), overwrite=True),
datasets.transforms.GetExcerpt(400)
])
dataset = datasets.MixSourceFolder(
mix_source_folder,
transform=tfms,
cache_populated=False)
dataloader = torch.utils.data.DataLoader(
dataset, shuffle=True, batch_size=2)
ml.train.cache_dataset(dataloader)
assert (
tfms.transforms[-2].cache.nchunks_initialized == len(dataset))
| 9,232 | 37.152893 | 89 | py |
nussl | nussl-master/tests/ml/test_separation_model.py | import nussl
import torch
from torch import nn
from nussl.ml.networks import SeparationModel, modules, builders
from nussl import datasets
import pytest
import json
import tempfile
import copy
n_features = 257
mi_config = builders.build_recurrent_mask_inference(
n_features, 50, 2, True, 0.3, 2, 'softmax',
)
dpcl_config = builders.build_recurrent_dpcl(
n_features, 50, 2, False, 0.3, 20, ['sigmoid', 'unit_norm']
)
chimera_config = builders.build_recurrent_chimera(
n_features, 50, 2, True, 0.3, 20, ['sigmoid', 'unit_norm'],
2, 'softmax',
)
open_unmix_like_config = builders.build_open_unmix_like(
n_features, 50, 2, True, .4, 2, 1, add_embedding=True,
embedding_size=20, embedding_activation=['sigmoid', 'unit_norm'],
)
end_to_end_real_config = builders.build_recurrent_end_to_end(
512, 512, 128, 'sqrt_hann', 50, 2,
True, 0.3, 2, 'softmax', num_audio_channels=1,
mask_complex=False, rnn_type='lstm',
mix_key='mix_audio')
dual_path_recurrent_config = builders.build_dual_path_recurrent_end_to_end(
64, 16, 8, 60, 30, 50, 2, True, 25, 2, 'sigmoid',
)
end_to_end_complex_config = builders.build_recurrent_end_to_end(
512, 512, 128, 'sqrt_hann', 50, 2,
True, 0.3, 2, 'softmax', num_audio_channels=1,
mask_complex=True, rnn_type='lstm',
mix_key='mix_audio')
gmm_unfold_config = copy.deepcopy(dpcl_config)
gmm_unfold_config['modules']['mask'] = {
'class': 'GaussianMixtureTorch',
'args': {
'n_components': 2
}
}
gmm_unfold_config['modules']['estimates'] = {'class': 'Mask',}
gmm_unfold_config['connections'].extend([
['mask', ['embedding', {'means': 'init_means'}]],
['estimates', ['mask:resp', 'mix_magnitude',]]
])
gmm_unfold_config['output'].append('estimates')
add_torch_module_config = copy.deepcopy(dpcl_config)
add_torch_module_config['modules']['mask'] = {
'class': 'Linear',
'args': {
'in_features': 20,
'out_features': 2
}
}
add_torch_module_config['connections'].extend(
[['mask', ['embedding']]]
)
add_torch_module_config['output'].append('mask')
split_config = copy.deepcopy(mi_config)
split_config['modules']['split'] = {
'class': 'Split',
'args': {
'split_sizes': (100, 157),
'dim': 2
}
}
split_config['connections'].extend([
['split', ['estimates',]],
])
split_config['output'].append('split:0')
split_config['output'].append('split:1')
class MyModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, data=None, flip=False, **kwargs):
if flip:
data = -data
return data
def test_separation_model_init():
bad_config = {'not': {'the right keys'}}
pytest.raises(ValueError, SeparationModel, bad_config)
bad_config = {
'name': 'BadModel',
'modules': ['should be a dict'],
'connections': [],
'output': []
}
pytest.raises(ValueError, SeparationModel, bad_config)
bad_config = {
'name': 'BadModel',
'modules': mi_config['modules'],
'connections': {'should be a list'},
'output': []
}
pytest.raises(ValueError, SeparationModel, bad_config)
no_name = {
'modules': mi_config['modules'],
'connections': mi_config['connections'],
'output': []
}
pytest.raises(ValueError, SeparationModel, no_name)
bad_name = {
'name': 12345,
'modules': mi_config['modules'],
'connections': mi_config['connections'],
'output': []
}
pytest.raises(ValueError, SeparationModel, bad_name)
bad_config = {
'name': 'BadModel',
'modules': mi_config['modules'],
'connections': mi_config['connections'],
'output': {'should be a list'}
}
pytest.raises(ValueError, SeparationModel, bad_config)
def test_separation_model_mask_inference(one_item):
n_features = one_item['mix_magnitude'].shape[2]
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(mi_config, f)
configs = [mi_config, tmp.name, json.dumps(mi_config)]
for config in configs:
model = SeparationModel(config)
bad_item = copy.deepcopy(one_item)
bad_item.pop('mix_magnitude')
pytest.raises(ValueError, model, bad_item)
output = model(one_item)
assert (
output['estimates'].shape == (
one_item['mix_magnitude'].shape + (2,))
)
assert (
torch.allclose(
output['estimates'].sum(dim=-1),
one_item['mix_magnitude']))
def test_separation_model_dpcl(one_item):
n_features = one_item['mix_magnitude'].shape[2]
# dpcl network
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(dpcl_config, f)
configs = [dpcl_config, tmp.name, json.dumps(dpcl_config)]
for config in configs:
model = SeparationModel(config)
output = model(one_item)
assert (
output['embedding'].shape == (
one_item['mix_magnitude'].shape + (20,)))
def test_separation_end_to_end(one_item):
for c in [end_to_end_real_config, end_to_end_complex_config]:
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(c, f)
configs = [
c,
tmp.name,
json.dumps(c)
]
for config in configs:
model = SeparationModel(config)
output = model(one_item)
assert (
output['audio'].shape == one_item['source_audio'].shape
)
def test_separation_dprnn(one_item):
# dprnn network
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(dual_path_recurrent_config, f)
configs = [
dual_path_recurrent_config,
tmp.name,
json.dumps(dual_path_recurrent_config)
]
for config in configs:
model = SeparationModel(config, verbose=True)
output = model(one_item)
assert (
output['audio'].shape == one_item['source_audio'].shape
)
def test_separation_model_chimera(one_item):
n_features = one_item['mix_magnitude'].shape[2]
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(chimera_config, f)
configs = [chimera_config, tmp.name, json.dumps(chimera_config)]
for config in configs:
model = SeparationModel(config)
output = model(one_item)
assert (
output['estimates'].shape == (
one_item['mix_magnitude'].shape + (2,))
)
assert (
torch.allclose(
output['estimates'].sum(dim=-1),
one_item['mix_magnitude']))
assert (
output['embedding'].shape == (
one_item['mix_magnitude'].shape + (20,)))
def test_separation_model_open_unmix_like(one_item):
n_features = one_item['mix_magnitude'].shape[2]
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(open_unmix_like_config, f)
configs = [
open_unmix_like_config,
tmp.name,
json.dumps(open_unmix_like_config)
]
for config in configs:
model = SeparationModel(config)
output = model(one_item)
assert (
output['estimates'].shape == (
one_item['mix_magnitude'].shape + (2,))
)
assert (
output['mask'].shape == (
one_item['mix_magnitude'].shape + (2,))
)
assert (
output['embedding'].shape == (
one_item['mix_magnitude'].shape + (20,)))
def test_separation_model_gmm_unfold(one_item):
n_features = one_item['mix_magnitude'].shape[2]
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(gmm_unfold_config, f)
configs = [gmm_unfold_config, tmp.name, json.dumps(gmm_unfold_config)]
for config in configs:
model = SeparationModel(config)
one_item['init_means'] = torch.randn(
one_item['mix_magnitude'].shape[0], 2, 20
).to(one_item['mix_magnitude'].device)
output = model(one_item)
assert (
output['estimates'].shape == (
one_item['mix_magnitude'].shape + (2,))
)
assert (
torch.allclose(
output['estimates'].sum(dim=-1),
one_item['mix_magnitude']))
assert (
output['embedding'].shape == (
one_item['mix_magnitude'].shape + (20,)))
def test_separation_model_split(one_item):
n_features = one_item['mix_magnitude'].shape[2]
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(split_config, f)
configs = [split_config, tmp.name, json.dumps(split_config)]
for config in configs:
model = SeparationModel(config)
output = model(one_item)
assert (
output['estimates'].shape == (
one_item['mix_magnitude'].shape + (2,))
)
assert (
torch.allclose(
output['estimates'].sum(dim=-1),
one_item['mix_magnitude']))
assert (
output['split:0'].shape[2] == 100)
assert (
output['split:1'].shape[2] == 157)
def test_separation_model_add_torch(one_item):
n_features = one_item['mix_magnitude'].shape[2]
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
with open(tmp.name, 'w') as f:
json.dump(add_torch_module_config, f)
configs = [
add_torch_module_config,
tmp.name,
json.dumps(add_torch_module_config)
]
for config in configs:
model = SeparationModel(config)
output = model(one_item)
assert (
output['mask'].shape == (
one_item['mix_magnitude'].shape + (2,))
)
assert (
output['embedding'].shape == (
one_item['mix_magnitude'].shape + (20,)))
def test_separation_model_extra_modules(one_item):
with tempfile.NamedTemporaryFile(suffix='.json', delete=True) as tmp:
dpcl_config['modules']['test'] = {
'class': 'MyModule'
}
dpcl_config['connections'].append(
('test', ('mix_magnitude', {
'embedding': 'embedding',
'flip': False
}))
)
dpcl_config['output'].append('test')
with open(tmp.name, 'w') as f:
json.dump(dpcl_config, f)
configs = [dpcl_config, tmp.name, json.dumps(dpcl_config)]
nussl.ml.register_module(MyModule)
for config in configs:
model = SeparationModel(config)
output = model(one_item)
assert (
output['embedding'].shape == (
one_item['mix_magnitude'].shape + (20,)))
assert torch.allclose(
one_item['mix_magnitude'], output['test']
)
model = SeparationModel(config)
copy_one_item = copy.deepcopy(one_item)
output = model(copy_one_item, flip=True)
assert torch.allclose(
one_item['mix_magnitude'], -output['test']
)
def test_separation_model_save_and_load():
model = SeparationModel(dpcl_config)
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.Cache('tests/local/sep_model/cache')
])
class DummyData:
def __init__(self):
self.stft_params = None
self.sample_rate = None
self.num_channels = None
self.metadata = {'transforms': tfms}
class DummyState:
def __init__(self):
self.epoch = 0
self.epoch_length = 100
self.max_epochs = 100
self.output = None
self.metrics = {}
self.seed = None
self.epoch_history = {}
class DummyTrainer:
def __init__(self):
self.state = DummyState()
dummy_data = DummyData()
dummy_trainer = DummyTrainer()
with tempfile.NamedTemporaryFile(suffix='.pth', delete=True) as tmp:
loc = model.save(tmp.name, train_data=dummy_data,
val_data=dummy_data, trainer=dummy_trainer)
new_model, metadata = SeparationModel.load(tmp.name)
assert metadata['nussl_version'] == nussl.__version__
new_model_params = {}
old_model_params = {}
for name, param in new_model.named_parameters():
new_model_params[name] = param
for name, param in model.named_parameters():
old_model_params[name] = param
for key in new_model_params:
assert torch.allclose(
new_model_params[key],
old_model_params[key]
)
def test_separation_model_expose():
class Model(nn.Module):
def __init__(self, x):
super().__init__()
self.x = x
def forward(self, y):
return self.x + y
nussl.ml.register_module(Model)
config = {
'modules': {
'model': {
'class': 'Model',
'args': {
'x': 10
},
'expose_forward': True
}
},
'connections': [],
'output': [],
'name': 'Model',
}
separation_model = nussl.ml.SeparationModel(config)
assert separation_model(y=5) == 15
def test_separation_model_repr_and_verbose(one_item):
model = SeparationModel(end_to_end_real_config, verbose=True)
print(model)
model(one_item)
| 14,816 | 29.054767 | 78 | py |
nussl | nussl-master/tests/ml/test_loss.py | import torch
import nussl
from nussl import ml
from torch import nn
import numpy as np
from itertools import permutations
import random
import copy
def test_register_loss():
class ExampleLoss(nn.Module):
DEFAULT_KEYS = {'key1': 'arg1', 'key2': 'arg2'}
def forward(self, arg1, arg2):
return 0
assert ExampleLoss.__name__ not in (dir(ml.train.loss))
ml.register_loss(ExampleLoss)
assert ExampleLoss.__name__ in (dir(ml.train.loss))
def test_deep_clustering_loss():
n_batch = 40
n_time = 400
n_freq = 129
n_sources = 4
n_embedding = 20
embedding = torch.rand(n_batch, n_time, n_freq, n_embedding)
embedding = torch.nn.functional.normalize(
embedding, dim=-1, p=2)
assignments = torch.rand(n_batch, n_time, n_freq, n_sources) > .5
assignments = assignments.float()
weights = torch.ones(n_batch, n_time, n_freq)
LossDPCL = ml.train.loss.DeepClusteringLoss()
_loss_a = LossDPCL(assignments, assignments, weights).item()
assert _loss_a == 0
_loss_b = LossDPCL(embedding, assignments, weights).item()
assert _loss_b > _loss_a
assert _loss_b <= 1
def test_whitened_kmeans_loss():
n_batch = 40
n_time = 400
n_freq = 129
n_sources = 4
n_embedding = 20
embedding = torch.rand(n_batch, n_time, n_freq, n_embedding)
embedding = torch.nn.functional.normalize(
embedding, dim=-1, p=2)
assignments = torch.rand(n_batch, n_time, n_freq, n_sources) > .5
assignments = assignments.float()
weights = torch.ones(n_batch, n_time, n_freq)
LossWKM = ml.train.loss.WhitenedKMeansLoss()
_loss_a = LossWKM(assignments, assignments, weights).item()
_loss_b = LossWKM(embedding, assignments, weights).item()
assert _loss_b > _loss_a
def test_permutation_invariant_loss_tf():
n_batch = 10
n_time = 400
n_freq = 129
n_sources = 4
sources = torch.randn(n_batch, n_time, n_freq, n_sources)
LossPIT = ml.train.loss.PermutationInvariantLoss(
loss_function=ml.train.loss.L1Loss())
LossL1 = ml.train.loss.L1Loss()
_loss_a = LossL1(sources, sources).item()
for shift in range(n_sources):
sources_a = sources[:, :, :, shift:]
sources_b = sources[:, :, :, :shift]
shifted_sources = torch.cat(
[sources_a, sources_b], dim=-1)
_loss_b = LossPIT(shifted_sources, sources).item()
assert np.allclose(_loss_a, _loss_b, atol=1e-3)
def test_combination_invariant_loss_tf():
n_batch = 40
n_time = 400
n_freq = 129
n_sources = 2
sources = torch.randn(n_batch, n_time, n_freq, n_sources)
LossCPIT = ml.train.loss.CombinationInvariantLoss(
loss_function=nn.L1Loss())
LossL1 = nn.L1Loss()
_loss_a = LossL1(sources, sources).item()
for shift in range(n_sources):
sources_a = sources[:, :, :, shift:]
sources_b = sources[:, :, :, :shift]
sources_c = torch.randn(n_batch, n_time, n_freq, n_sources)
shifted_sources = torch.cat(
[sources_a, sources_b, sources_c], dim=-1)
_loss_b = LossCPIT(shifted_sources, sources).item()
assert np.allclose(_loss_a, _loss_b, atol=1e-3)
def test_sdr_loss():
n_batch = 40
n_samples = 16000
n_sources = 2
references = torch.randn(n_batch, n_samples, n_sources)
noise_amount = [0.01, 0.05, 0.1, 0.2, 0.5, 1]
LossSDR = ml.train.loss.SISDRLoss(zero_mean=True)
prev_loss = -np.inf
for n in noise_amount:
references = copy.deepcopy(references)
estimates = references + n * torch.randn(n_batch, n_samples, n_sources)
_loss = LossSDR(estimates, references).item()
assert _loss > prev_loss
prev_loss = _loss
references = torch.randn(n_batch, n_samples, n_sources)
LossSDR = ml.train.loss.SISDRLoss(zero_mean=False, reduction='none')
prev_loss = -np.inf
for n in noise_amount:
references = copy.deepcopy(references)
estimates = references + n * torch.randn(n_batch, n_samples, n_sources)
_loss = LossSDR(estimates, references)
assert _loss.sum().item() > prev_loss
prev_loss = _loss.sum().item()
for idx in range(n_batch):
idx = np.random.randint(n_batch)
_numpy_si_sdr = nussl.evaluation.scale_bss_eval(
references.data.numpy()[idx],
estimates.data.numpy()[idx, ..., 0],
references.data.numpy()[idx].sum(axis=-1),
0
)[0]
_torch_loss_on_one = -1 * _loss[idx][0]
assert np.allclose(_torch_loss_on_one.item(), _numpy_si_sdr, atol=1e-3)
LossSDR = ml.train.loss.SISDRLoss(
zero_mean=False, reduction='none', return_scaling=True)
LossSDR(estimates, references)
clip_min = -30.0
LossSDR = ml.train.loss.SISDRLoss(reduction='none', clip_min=clip_min)
losses = LossSDR(references, references)
assert all(l >= clip_min for l in losses.flatten())
def test_permutation_invariant_loss_sdr():
n_batch = 40
n_samples = 16000
n_sources = 2
references = torch.randn(n_batch, n_samples, n_sources)
noise_amount = [0.01, 0.05, 0.1, 0.5, 1.0]
LossPIT = ml.train.loss.PermutationInvariantLoss(
loss_function=ml.train.loss.SISDRLoss())
LossSDR = ml.train.loss.SISDRLoss()
LossSumSDR = ml.train.loss.SISDRLoss(reduction='sum')
for n in noise_amount:
estimates = references + n * torch.randn(n_batch, n_samples, n_sources)
_loss_a = LossSDR(estimates, references).item()
_loss_sum_a = LossSumSDR(estimates, references).item()
assert np.allclose(n_batch * n_sources * _loss_a, _loss_sum_a)
for shift in range(n_sources):
sources_a = estimates[..., shift:]
sources_b = estimates[..., :shift]
shifted_sources = torch.cat(
[sources_a, sources_b], dim=-1)
_loss_b = LossPIT(shifted_sources, references).item()
assert np.allclose(_loss_a, _loss_b, atol=1e-4)
def test_combination_invariant_loss_sdr():
n_batch = 40
n_samples = 16000
n_sources = 2
references = torch.randn(n_batch, n_samples, n_sources)
noise_amount = [0.01, 0.05, 0.1, 0.5, 1.0]
LossCPIT = ml.train.loss.CombinationInvariantLoss(
loss_function=ml.train.loss.SISDRLoss())
LossSDR = ml.train.loss.SISDRLoss()
for n in noise_amount:
estimates = references + n * torch.randn(n_batch, n_samples, n_sources)
_loss_a = LossSDR(estimates, references).item()
for shift in range(n_sources):
sources_a = estimates[..., shift:]
sources_b = estimates[..., :shift]
sources_c = torch.rand_like(estimates)
shifted_sources = torch.cat(
[sources_a, sources_b, sources_c], dim=-1)
_loss_b = LossCPIT(shifted_sources, references).item()
assert np.allclose(_loss_a, _loss_b, atol=1e-4)
| 7,004 | 31.133028 | 83 | py |
nussl | nussl-master/tests/ml/test_modules.py | import torch
import nussl
from nussl.datasets import transforms
from nussl import ml
import pytest
import numpy as np
import librosa
import itertools
def test_register_module():
class ExampleModule(torch.nn.Module):
def forward(self, data):
data = data * 2
return data
assert ExampleModule.__name__ not in (dir(ml.networks.modules))
ml.register_module(ExampleModule)
assert ExampleModule.__name__ in (dir(ml.networks.modules))
def test_ml_amplitude_to_db(one_item):
module = ml.networks.modules.AmplitudeToDB()
output = module(one_item['mix_magnitude'])
librosa_output = librosa.amplitude_to_db(
one_item['mix_magnitude'].cpu().numpy()[0],
amin=1e-4,
ref=1.0,
)
output = output.cpu().numpy()
assert np.allclose(output, librosa_output)
def test_shift_and_scale():
data = torch.randn(100)
shifter = ml.networks.modules.ShiftAndScale()
_data = shifter(data)
assert torch.allclose(data, _data)
shifter.scale.data[0] = 10
_data = shifter(data)
assert torch.allclose(10 * data, _data)
shifter.shift.data[0] = -10
_data = shifter(data)
assert torch.allclose(10 * data - 10, _data)
def test_ml_batch_instance_norm(one_item):
module = ml.networks.modules.BatchNorm()
output = module(one_item['mix_magnitude'])
assert one_item['mix_magnitude'].shape == output.shape
module = ml.networks.modules.InstanceNorm(eps=1e-10)
output = module(one_item['mix_magnitude'])
assert one_item['mix_magnitude'].shape == output.shape
_output = output.cpu().numpy()
assert np.abs(np.mean(_output) - 0.0) < 1e-7
assert np.abs(np.std(_output) - 1.0) < 1e-3
def test_ml_group_norm(one_item):
shape = one_item['mix_magnitude'].shape
module = ml.networks.modules.GroupNorm(shape[2])
output = module(one_item['mix_magnitude'])
assert one_item['mix_magnitude'].shape == output.shape
module = ml.networks.modules.GroupNorm(shape[2])
output = module(one_item['mix_magnitude'])
assert one_item['mix_magnitude'].shape == output.shape
def test_ml_layer_norm(one_item):
shape = one_item['mix_magnitude'].shape
for c in range(1, len(shape)):
dim_combos = list(itertools.combinations(range(len(shape)), c))
for combo in dim_combos:
_shape = [shape[x] for x in combo]
module = ml.networks.modules.LayerNorm(_shape[-1], feature_dims=combo)
output = module(one_item['mix_magnitude'])
assert one_item['mix_magnitude'].shape == output.shape
module = ml.networks.modules.LayerNorm(_shape[0], feature_dims=combo[::-1])
output = module(one_item['mix_magnitude'])
assert one_item['mix_magnitude'].shape == output.shape
def test_ml_mel_projection(one_item):
n_mels = [64, 128, 150]
data = one_item['mix_magnitude']
num_frequencies = data.shape[2]
pytest.raises(ValueError, ml.networks.modules.MelProjection,
16000, num_frequencies, 128, direction='neither')
for n_mel in n_mels:
forward = ml.networks.modules.MelProjection(
16000, num_frequencies, n_mel, direction='forward'
)
backward = ml.networks.modules.MelProjection(
16000, num_frequencies, n_mel, direction='backward'
)
backward_clamp = ml.networks.modules.MelProjection(
16000, num_frequencies, n_mel, direction='backward',
clamp=True
)
mel_spec = forward(data).cpu().numpy()[0, :, :, 0].T
filters = librosa.filters.mel(
16000, 2 * (num_frequencies - 1), n_mel)
filters = (
filters.T / (filters.sum(axis=1) + 1e-8)).T
assert np.allclose(
forward.transform.weight.cpu().numpy(),
filters
)
assert np.allclose(
backward.transform.weight.cpu().numpy(),
np.linalg.pinv(filters)
)
_data = data.cpu().numpy()[0, :, :, 0]
librosa_output = (_data @ filters.T).T
assert mel_spec.shape[0] == n_mel
assert np.allclose(mel_spec, librosa_output)
recon = backward(forward(data))
_data = data.cpu().numpy()[0, :, :, 0]
_recon = recon.cpu().numpy()[0, :, :, 0]
assert np.mean((_data - _recon) ** 2) < 1e-7
mask = backward_clamp(forward(data)).cpu().numpy()
assert mask.min() >= 0.0
assert mask.max() <= 1.0
def test_ml_embedding(one_item):
data = one_item['mix_magnitude']
num_frequencies = data.shape[2]
rnn = ml.networks.modules.RecurrentStack(
num_frequencies, 50, 1, bidirectional=False, dropout=0.0
)
activations = ['sigmoid', 'tanh', 'relu', 'softmax', ['gated_tanh', 'sigmoid']]
embedding_sizes = [1, 5, 10, 20, 100]
for a in activations:
for e in embedding_sizes:
module = ml.networks.modules.Embedding(
num_frequencies, 50, e, a,
dim_to_embed=-1
)
output = module(rnn(data))
if a == 'sigmoid':
assert output.min() >= 0
assert output.max() <= 1
elif a == 'tanh':
assert output.min() >= -1
assert output.max() <= 1
elif a == 'relu':
assert output.min() >= 0
elif a == 'softmax':
assert output.min() >= 0
assert output.max() <= 1
_sum = torch.sum(output, dim=-1)
assert torch.allclose(_sum, torch.ones_like(_sum))
_a = [a, 'unit_norm']
module = ml.networks.modules.Embedding(
num_frequencies, 50, e, _a,
dim_to_embed=-1
)
output = module(rnn(data))
_norm = torch.norm(output, p=2, dim=-1)
# relu sends entire vectors to zero, so their norm is 0.
# only check nonzero norm values.
_norm = _norm[_norm > 0]
assert torch.allclose(_norm, torch.ones_like(_norm))
_a = [a, 'l1_norm']
module = ml.networks.modules.Embedding(
num_frequencies, 50, e, _a,
dim_to_embed=-1
)
output = module(rnn(data))
_norm = torch.norm(output, p=1, dim=-1)
# relu sends entire vectors to zero, so their norm is 0.
# only check nonzero norm values.
_norm = _norm[_norm > 0]
assert torch.allclose(_norm, torch.ones_like(_norm))
def test_ml_mask(one_item):
data = one_item['mix_magnitude']
mask = torch.randn(data.shape + (4,))
masked_data = mask * data.unsqueeze(-1)
module = ml.networks.modules.Mask()
output = module(mask, data)
assert torch.allclose(output, masked_data)
data = one_item['mix_magnitude']
ibm = one_item['ideal_binary_mask']
masked_data = ibm * data.unsqueeze(-1)
module = ml.networks.modules.Mask()
output = module(ibm, data)
assert torch.allclose(output, masked_data)
def test_ml_concatenate(one_item):
data = one_item['mix_magnitude']
dims = range(len(data.shape))
for dim in dims:
module = ml.networks.modules.Concatenate(dim=dim)
output = module(data, data)
assert output.shape[dim] == 2 * data.shape[dim]
def test_ml_split(one_item):
data = one_item['mix_magnitude']
dims = range(len(data.shape))
for dim in dims:
split_point = np.random.randint(data.shape[dim])
split_sizes = (split_point, data.shape[dim] - split_point)
if split_sizes[-1] > 0:
module = ml.networks.modules.Split(
split_sizes=split_sizes, dim=dim)
output = module(data)
assert len(output) == len(split_sizes)
for i, o in enumerate(output):
assert o.shape[dim] == split_sizes[i]
def test_ml_expand():
tensor_a = torch.rand(100, 10, 5)
tensor_b = torch.rand(100, 10)
module = ml.networks.modules.Expand()
tensor_b = module(tensor_a, tensor_b)
assert tensor_b.ndim == tensor_a.ndim
bad_tensor = torch.rand(100, 10, 5, 2)
pytest.raises(ValueError, module, tensor_a, bad_tensor)
def test_ml_alias():
modules = {
'split': {
'class': 'Split',
'args': {
'split_sizes': (3, 7),
'dim': -1
}
},
'split_zero': {
'class': 'Alias',
}
}
connections = [
('split', ('data',)),
('split_zero', ('split:0',))
]
outputs = ['split:0', 'split_zero']
config = {
'name': 'AliasModel',
'modules': modules,
'connections': connections,
'output': outputs
}
model = ml.SeparationModel(config)
data = {'data': torch.randn(100, 10)}
output = model(data)
assert 'split_zero' in output
assert torch.allclose(
output['split:0'], output['split_zero']
)
def test_ml_recurrent_stack(one_item):
data = one_item['mix_magnitude']
num_features = data.shape[2]
pytest.raises(ValueError, ml.networks.modules.RecurrentStack,
1, 1, 1, True, .3, 'not_lstm_or_gru'
)
rnn_types = ['gru', 'lstm']
bidirectional = [True, False]
num_features = [num_features]
hidden_size = [50, 100]
num_layers = [1, 2]
dropout = [.3]
products = itertools.product(
num_features, hidden_size, num_layers, bidirectional, dropout,
rnn_types)
for _product in products:
module = ml.networks.modules.RecurrentStack(*_product)
output = module(data)
dim = 2 * _product[1] if _product[-3] else _product[1]
assert output.shape == (data.shape[0], data.shape[1], dim)
def test_ml_conv_stack(one_item):
data = one_item['mix_magnitude']
num_features = data.shape[2]
in_channels = 1
channels = [10, 32, 8, 10]
dilations = [1, 1, 1, 1]
filter_shapes = [7, 3, 5, 3]
residuals = [True, False, False, True]
batch_norm = True
use_checkpointing = False
pytest.raises(ValueError, ml.networks.modules.ConvolutionalStack2D,
in_channels, [channels[0]], dilations, filter_shapes, residuals,
batch_norm=batch_norm, use_checkpointing=use_checkpointing
)
pytest.warns(UserWarning, ml.networks.modules.ConvolutionalStack2D,
in_channels, channels, [1, 2, 4, 8], filter_shapes, residuals,
batch_norm=batch_norm, use_checkpointing=use_checkpointing
)
module = ml.networks.modules.ConvolutionalStack2D(
in_channels, channels, dilations, filter_shapes, residuals,
batch_norm=batch_norm, use_checkpointing=use_checkpointing
)
output = module(data)
assert (output.shape == (
data.shape[0], data.shape[1], data.shape[2], channels[-1]))
module = ml.networks.modules.ConvolutionalStack2D(
in_channels, channels, dilations, filter_shapes, residuals,
batch_norm=batch_norm, use_checkpointing=True
)
output = module(data)
assert (output.shape == (
data.shape[0], data.shape[1], data.shape[2], channels[-1]))
def test_dual_path(one_item):
recurrent_stack = {
'class': 'RecurrentStack',
'args': {
'num_features': 100,
'hidden_size': 50,
'num_layers': 1,
'bidirectional': True,
'dropout': 0.3,
'rnn_type': 'lstm',
'batch_first': False
}
}
dual_path = ml.networks.modules.DualPath(
2, 100, 50, 257, 100, hidden_size=100,
intra_processor=recurrent_stack,
inter_processor=recurrent_stack
)
output = dual_path(one_item['mix_magnitude'])
assert output.shape == one_item['mix_magnitude'].shape
linear_layer = {
'class': 'Linear',
'args': {
'in_features': 100,
'out_features': 100,
}
}
dual_path = ml.networks.modules.DualPath(
2, 100, 50, 257, 100, hidden_size=100,
intra_processor=linear_layer,
inter_processor=linear_layer,
skip_connection=True
)
output = dual_path(one_item['mix_magnitude'])
assert output.shape == one_item['mix_magnitude'].shape
nonexisting_layer = {
'class': 'NoExist',
'args': {
'in_features': 100,
'out_features': 100,
}
}
pytest.raises(ValueError, ml.networks.modules.DualPath,
2, 100, 50, 257, 100, hidden_size=100,
intra_processor=nonexisting_layer,
inter_processor=nonexisting_layer
)
| 12,748 | 29.573141 | 87 | py |
nussl | nussl-master/tests/ml/test_filterbank.py | from nussl import ml, datasets
from nussl.core.constants import ALL_WINDOWS
import nussl
import pytest
import torch
import itertools
from scipy.signal import check_COLA
import numpy as np
def test_filter_bank(one_item, monkeypatch):
pytest.raises(
NotImplementedError, ml.networks.modules.FilterBank, 2048)
def dummy_filters(self):
num_filters = (1 + self.filter_length // 2) * 2
random_basis = torch.randn(
self.filter_length, num_filters)
return random_basis.float()
def dummy_inverse(self):
num_filters = (1 + self.filter_length // 2) * 2
random_basis = torch.randn(
self.filter_length, num_filters)
return random_basis.float().T
monkeypatch.setattr(
ml.networks.modules.FilterBank,
'get_transform_filters',
dummy_filters
)
monkeypatch.setattr(
ml.networks.modules.FilterBank,
'get_inverse_filters',
dummy_inverse
)
representation = ml.networks.modules.FilterBank(
512, hop_length=128)
data = one_item['mix_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
one_sided_shape = list(
encoded.squeeze(0).shape)
one_sided_shape[1] = one_sided_shape[1] // 2
assert tuple(one_sided_shape) == tuple(one_item['mix_magnitude'].shape[1:])
data = one_item['source_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
assert decoded.shape == data.shape
def test_filter_bank_alignment(one_item):
# if we construct a signal with an impulse at a random
# offset, it should stay in the same place after the
# stft
win_length = 256
hop_length = 64
win_type = 'sqrt_hann'
representation = ml.networks.modules.STFT(
win_length, hop_length=hop_length, window_type=win_type)
data = torch.zeros_like(one_item['source_audio'])
for _ in range(20):
offset = np.random.randint(0, data.shape[-2])
data[..., offset, 0] = 1
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
assert torch.allclose(decoded, data, atol=1e-6)
sr = nussl.constants.DEFAULT_SAMPLE_RATE
# Define my window lengths to be powers of 2, ranging from 128 to 2048 samples
win_min = 7 # 2 ** 7 = 128
win_max = 11 # 2 ** 11 = 2048
win_lengths = [2 ** i for i in range(win_min, win_max + 1)]
win_length_32ms = int(2 ** (np.ceil(np.log2(nussl.constants.DEFAULT_WIN_LEN_PARAM * sr))))
win_lengths.append(win_length_32ms)
hop_length_ratios = [0.5, .25]
window_types = ALL_WINDOWS
signals = []
combos = itertools.product(win_lengths, hop_length_ratios, window_types)
@pytest.mark.parametrize("combo", combos)
def test_stft_module(combo, one_item):
win_length = combo[0]
hop_length = int(combo[0] * combo[1])
win_type = combo[2]
window = nussl.AudioSignal.get_window(combo[2], win_length)
stft_params = nussl.STFTParams(
window_length=win_length, hop_length=hop_length, window_type=win_type
)
representation = ml.networks.modules.STFT(
win_length, hop_length=hop_length, window_type=win_type)
if not check_COLA(window, win_length, win_length - hop_length):
assert True
data = one_item['mix_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
encoded = encoded.squeeze(0).permute(1, 0, 2)
assert (decoded - data).abs().max() < 1e-5
audio_signal = nussl.AudioSignal(
audio_data_array=data.squeeze(0).numpy(), sample_rate=16000, stft_params=stft_params
)
nussl_magnitude = np.abs(audio_signal.stft())
_encoded = encoded.squeeze(0)
cutoff = _encoded.shape[0] // 2
_encoded = _encoded[:cutoff, ...]
assert (_encoded - nussl_magnitude).abs().max() < 1e-6
def test_learned_filterbank(one_item):
representation = ml.networks.modules.LearnedFilterBank(
512, hop_length=128, requires_grad=True)
data = one_item['mix_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
data = one_item['source_audio']
encoded = representation(data, 'transform')
decoded = representation(encoded, 'inverse')
assert decoded.shape == data.shape
| 4,363 | 29.305556 | 92 | py |
nussl | nussl-master/tests/ml/test_confidence.py | from nussl import ml
import nussl
import torch
import numpy as np
from sklearn import datasets
import pytest
import copy
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
@pytest.fixture(scope="module")
def simple_sine_data():
nussl.utils.seed(0)
folder = 'ignored'
stft_params = nussl.STFTParams(window_length=256, hop_length=64)
tfm = nussl.datasets.transforms.Compose([
nussl.datasets.transforms.PhaseSensitiveSpectrumApproximation(),
nussl.datasets.transforms.MagnitudeWeights(),
])
tensorize = nussl.datasets.transforms.ToSeparationModel()
sine_wave_dataset = SineWaves(
folder, sample_rate=8000, stft_params=stft_params,
transform=tfm, num_sources=2)
item = sine_wave_dataset[0]
tensorized = tensorize(copy.deepcopy(item))
for key in tensorized:
if torch.is_tensor(tensorized[key]):
tensorized[key] = tensorized[key].to(
DEVICE).float().unsqueeze(0).contiguous()
return item, tensorized
@pytest.fixture(scope="module")
def simple_model(simple_sine_data):
item, tensor_data = simple_sine_data
num_features = 129 # number of frequencies in STFT
embedding_size = 20 # how many sources to estimate
activation = ['sigmoid', 'unit_norm'] # activation function for embedding
num_audio_channels = 1 # number of audio channels
modules = {
'mix_magnitude': {},
'log_spectrogram': {
'class': 'AmplitudeToDB'
},
'normalization': {
'class': 'BatchNorm',
},
'embedding': {
'class': 'Embedding',
'args': {
'num_features': num_features,
'hidden_size': num_features,
'embedding_size': embedding_size,
'activation': activation,
'num_audio_channels': num_audio_channels,
'dim_to_embed': [2, 3] # embed the frequency dimension (2) for all audio channels (3)
}
},
}
connections = [
['log_spectrogram', ['mix_magnitude', ]],
['normalization', ['log_spectrogram', ]],
['embedding', ['normalization', ]],
]
output = ['embedding']
config = {
'name': 'SimpleModel',
'modules': modules,
'connections': connections,
'output': output
}
model = ml.SeparationModel(config).to(DEVICE)
untrained = ml.SeparationModel(config).to(DEVICE)
loss_dictionary = {
'EmbeddingLoss': {
'class': 'WhitenedKMeansLoss'
}
}
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
closure = ml.train.closures.TrainClosure(loss_dictionary, optimizer, model)
for i in range(10):
loss_val = closure(None, tensor_data)
return model, untrained, item, tensor_data
def test_js_divergence():
n_samples = 1000
blobs, _ = datasets.make_blobs(n_samples=n_samples, random_state=8)
one_component_a = ml.cluster.GaussianMixture(1)
one_component_b = ml.cluster.GaussianMixture(1)
two_component = ml.cluster.GaussianMixture(2)
one_component_a.fit(blobs)
one_component_b.fit(blobs)
two_component.fit(blobs)
confidence_2v1 = ml.confidence.jensen_shannon_divergence(
one_component_a, two_component)
confidence_1v1 = ml.confidence.jensen_shannon_divergence(
one_component_a, one_component_b)
assert confidence_2v1 > confidence_1v1
def test_get_loud_bins_mask(scaper_folder):
dataset = nussl.datasets.Scaper(scaper_folder)
item = dataset[0]
representation = np.abs(item['mix'].stft())
mask, _ = ml.confidence._get_loud_bins_mask(0, item['mix'])
assert representation[mask].sum() == representation.sum()
mask, _ = ml.confidence._get_loud_bins_mask(100, item['mix'])
assert not representation[mask]
mask, _= ml.confidence._get_loud_bins_mask(0, representation=representation)
assert representation[mask].sum() == representation.sum()
mask, _ = ml.confidence._get_loud_bins_mask(100, representation=representation)
assert not representation[mask]
def test_jensen_shannon_confidence(simple_model):
model, untrained, item, tensor_data = simple_model
tr_features = model(tensor_data)['embedding']
tr_features = tr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
utr_features = untrained(tensor_data)['embedding']
utr_features = utr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
tr_js_confidence = ml.confidence.jensen_shannon_confidence(
item['mix'], tr_features, 2
)
utr_js_confidence = ml.confidence.jensen_shannon_confidence(
item['mix'], utr_features, 2
)
assert tr_js_confidence > utr_js_confidence
def test_posterior_confidence(simple_model):
model, untrained, item, tensor_data = simple_model
tr_features = model(tensor_data)['embedding']
tr_features = tr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
utr_features = untrained(tensor_data)['embedding']
utr_features = utr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
tr_confidence = ml.confidence.posterior_confidence(
item['mix'], tr_features, 2
)
utr_confidence = ml.confidence.posterior_confidence(
item['mix'], utr_features, 2
)
assert tr_confidence > utr_confidence
def test_silhouette_confidence(simple_model):
model, untrained, item, tensor_data = simple_model
tr_features = model(tensor_data)['embedding']
tr_features = tr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
utr_features = untrained(tensor_data)['embedding']
utr_features = utr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
tr_confidence = ml.confidence.silhouette_confidence(
item['mix'], tr_features, 2
)
utr_confidence = ml.confidence.silhouette_confidence(
item['mix'], utr_features, 2
)
assert tr_confidence > utr_confidence
def test_loudness_confidence(simple_model):
model, untrained, item, tensor_data = simple_model
tr_features = model(tensor_data)['embedding']
tr_features = tr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
utr_features = untrained(tensor_data)['embedding']
utr_features = utr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
tr_confidence = ml.confidence.loudness_confidence(
item['mix'], tr_features, 2
)
utr_confidence = ml.confidence.loudness_confidence(
item['mix'], utr_features, 2
)
assert tr_confidence > utr_confidence
def test_whitened_kmeans_confidence(simple_model):
model, untrained, item, tensor_data = simple_model
tr_features = model(tensor_data)['embedding']
tr_features = tr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
utr_features = untrained(tensor_data)['embedding']
utr_features = utr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
tr_confidence = ml.confidence.whitened_kmeans_confidence(
item['mix'], tr_features, 2
)
utr_confidence = ml.confidence.whitened_kmeans_confidence(
item['mix'], utr_features, 2
)
assert tr_confidence > utr_confidence
def test_dpcl_classic_confidence(simple_model):
model, untrained, item, tensor_data = simple_model
tr_features = model(tensor_data)['embedding']
tr_features = tr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
utr_features = untrained(tensor_data)['embedding']
utr_features = utr_features.squeeze(0).transpose(0, 1).data.cpu().numpy()
tr_confidence = ml.confidence.dpcl_classic_confidence(
item['mix'], tr_features, 2
)
utr_confidence = ml.confidence.dpcl_classic_confidence(
item['mix'], utr_features, 2
)
assert tr_confidence > utr_confidence
def make_sine_wave(freq, sample_rate, duration):
dt = 1 / sample_rate
x = np.arange(0.0, duration, dt)
x = np.sin(2 * np.pi * freq * x)
return x
def make_clicks(sample_rate, duration):
x = np.zeros(sample_rate * duration)
for i in range(100):
idx = np.random.randint(x.shape[0])
x[idx-20:idx+20] = 1
return x
class SineWaves(nussl.datasets.BaseDataset):
def __init__(self, *args, num_sources=3, num_frequencies=20, **kwargs):
self.num_sources = num_sources
self.frequencies = np.random.choice(
np.arange(110, 4000, 100), num_frequencies,
replace=False)
super().__init__(*args, **kwargs)
def get_items(self, folder):
# ignore folder and return a list
# 100 items in this dataset
items = list(range(100))
return items
def process_item(self, item):
# we're ignoring items and making
# sums of random sine waves
sources = {}
freqs = []
freqs = np.random.choice(
self.frequencies, self.num_sources,
replace=False)
for i in range(self.num_sources-1):
freq = freqs[i]
_data = make_sine_wave(freq, self.sample_rate, 2)
# this is a helper function in BaseDataset for
# making an audio signal from data
signal = self._load_audio_from_array(_data)
signal.path_to_input_file = f'{item}.wav'
sources[f'sine{i}'] = signal * 1 / self.num_sources
_data = make_clicks(self.sample_rate, 2)
signal = self._load_audio_from_array(_data)
signal.path_to_input_file = 'click.wav'
sources['click'] = signal * 1 / self.num_sources
mix = sum(sources.values())
metadata = {
'frequencies': freqs
}
output = {
'mix': mix,
'sources': sources,
'metadata': metadata
}
return output
| 9,796 | 31.440397 | 101 | py |
nussl | nussl-master/tests/ml/test_overfit.py | from nussl import ml, datasets, evaluation
import tempfile
from torch import optim
import numpy as np
import logging
import os
import torch
from matplotlib import pyplot as plt
logging.basicConfig(
format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.INFO
)
fix_dir = 'tests/local/trainer'
def test_overfit_a(mix_source_folder):
tfms = datasets.transforms.Compose([
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.Cache('~/.nussl/tests/cache', overwrite=True),
datasets.transforms.GetExcerpt(400)
])
dataset = datasets.MixSourceFolder(
mix_source_folder, transform=tfms)
ml.train.cache_dataset(dataset)
dataset.cache_populated = True
dataloader = torch.utils.data.DataLoader(
dataset, shuffle=True, batch_size=len(dataset), num_workers=2)
# create the model, based on the first item in the dataset
# second bit of the shape is the number of features
n_features = dataset[0]['mix_magnitude'].shape[1]
mi_config = ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 1, False, 0.0, 2, 'sigmoid',
)
model = ml.SeparationModel(mi_config)
device = 'cuda' if torch.cuda.is_available() else 'cpu'
if device == 'cuda':
epoch_length = 100
else:
epoch_length = 10
model = model.to(device)
# create optimizer
optimizer = optim.Adam(model.parameters(), lr=1e-3)
loss_dictionary = {
'L1Loss': {
'weight': 1.0
}
}
train_closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
val_closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
with tempfile.TemporaryDirectory() as tmpdir:
_dir = fix_dir if fix_dir else tmpdir
os.makedirs(os.path.join(_dir, 'plots'), exist_ok=True)
trainer, validator = ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=device
)
# add handlers to engine
ml.train.add_stdout_handler(trainer, validator)
ml.train.add_validate_and_checkpoint(
_dir, model, optimizer, dataset,
trainer, val_data=dataloader, validator=validator)
ml.train.add_tensorboard_handler(_dir, trainer)
# run engine
trainer.run(dataloader, max_epochs=5, epoch_length=epoch_length)
model_path = os.path.join(
trainer.state.output_folder, 'checkpoints', 'best.model.pth')
state_dict = torch.load(
model_path, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict['state_dict'])
history = state_dict['metadata']['trainer.state.epoch_history']
for key in history:
plt.figure(figsize=(10, 4))
plt.title(f"epoch:{key}")
plt.plot(np.array(history[key]).reshape(-1, ))
plt.savefig(os.path.join(
trainer.state.output_folder, 'plots',
f"epoch:{key.replace('/', ':')}.png"))
| 3,197 | 31.969072 | 89 | py |
nussl | nussl-master/tests/ml/test_closures.py | from nussl import datasets, ml
from torch import optim, nn
import torch
import numpy as np
from nussl.ml.train.closures import ClosureException
import pytest
import nussl
def test_base_closure():
n_batch = 40
n_time = 400
n_freq = 129
n_sources = 4
n_embedding = 20
embedding = torch.rand(n_batch, n_time, n_freq, n_embedding)
embedding = torch.nn.functional.normalize(
embedding, dim=-1, p=2)
assignments = torch.rand(n_batch, n_time, n_freq, n_sources) > .5
assignments = assignments.float()
weights = torch.ones(n_batch, n_time, n_freq)
output = {
'embedding': embedding,
'weights': weights,
'estimates': torch.rand(n_batch, n_time, n_freq, n_sources)
}
target = {
'ideal_binary_mask': assignments,
'source_magnitudes': torch.rand(n_batch, n_time, n_freq, n_sources)
}
loss_dictionary = {
'DeepClusteringLoss': {
'weight': .2,
},
'L1Loss': {
'weight': .8
}
}
closure = ml.train.closures.Closure(
loss_dictionary, combination_approach='combine_by_multiply')
loss_a = closure.compute_loss(output, target)
weighted_product = 1
for key in loss_dictionary:
assert key in loss_a
weighted_product *= loss_a[key].item() * loss_dictionary[key]['weight']
assert 'loss' in loss_a
assert np.allclose(loss_a['loss'].item(), weighted_product, atol=1e-4)
loss_dictionary = {
'DeepClusteringLoss': {
'weight': .2,
},
'PermutationInvariantLoss': {
'weight': .8,
'args': ['L1Loss']
}
}
closure = ml.train.closures.Closure(loss_dictionary)
loss_a = closure.compute_loss(output, target)
weighted_sum = 0
for key in loss_dictionary:
assert key in loss_a
weighted_sum += loss_a[key].item() * loss_dictionary[key]['weight']
assert 'loss' in loss_a
assert np.allclose(loss_a['loss'].item(), weighted_sum, atol=1e-4)
# checking validation
pytest.raises(ClosureException, ml.train.closures.Closure, ['not a dict'])
pytest.raises(ClosureException, ml.train.closures.Closure, {'no_matching_loss': {}})
pytest.raises(ClosureException, ml.train.closures.Closure,
{
'DeepClusteringLoss': ['not a dict']
}
)
pytest.raises(ClosureException, ml.train.closures.Closure,
{
'DeepClusteringLoss': {
'bad_val_key': []
}
}
)
pytest.raises(ClosureException, ml.train.closures.Closure,
{
'DeepClusteringLoss': {
'weight': 'not a float or int'
}
}
)
pytest.raises(ClosureException, ml.train.closures.Closure,
{
'DeepClusteringLoss': {
'weight': 1,
'args': {'not a list': 'woo'}
}
}
)
pytest.raises(ClosureException, ml.train.closures.Closure,
{
'DeepClusteringLoss': {
'weight': 1,
'args': [],
'kwargs': ['not a dict']
}
}
)
closure = ml.train.closures.Closure(loss_dictionary)
# doing it twice should work
closure = ml.train.closures.Closure(loss_dictionary)
loss_b = closure.compute_loss(output, target)
weighted_sum = 0
for key in loss_dictionary:
assert key in loss_b
weighted_sum += loss_b[key].item() * loss_dictionary[key]['weight']
assert 'loss' in loss_b
assert np.allclose(loss_b['loss'].item(), weighted_sum, atol=1e-4)
assert np.allclose(loss_a['loss'].item(), loss_b['loss'].item(), atol=1e-2)
loss_dictionary = {
'PITLoss': {
'class': 'PermutationInvariantLoss',
'keys': {'audio': 'estimates', 'source_audio': 'targets'},
'args': [{
'class': 'SISDRLoss',
'kwargs': {'scaling': False}
}]
}
}
closure = ml.train.closures.Closure(loss_dictionary)
# doing it twice should work
closure = ml.train.closures.Closure(loss_dictionary)
audio = torch.rand(n_batch, 44100, 2)
source_audio = torch.rand(n_batch, 44100, 2)
output = {
'audio': audio,
}
target = {
'source_audio': source_audio,
}
loss_b = closure.compute_loss(output, target)
class CustomLoss:
DEFAULT_KEYS = {}
pass
custom_loss_dictionary = {
'CustomLoss': {
'weight': .8,
}
}
ml.register_loss(CustomLoss)
closure = ml.train.closures.Closure(custom_loss_dictionary)
assert isinstance(closure.losses[0][0], CustomLoss)
def test_multitask_combination():
nussl.utils.seed(0)
n_batch = 40
n_time = 400
n_freq = 129
n_sources = 4
output = {
'estimates_a': 3 * torch.rand(n_batch, n_time, n_freq, n_sources),
'estimates_b': .1 * torch.rand(n_batch, n_time, n_freq, n_sources)
}
target = {
'source_magnitudes_a': 3 * torch.rand(n_batch, n_time, n_freq, n_sources),
'source_magnitudes_b': .1 * torch.rand(n_batch, n_time, n_freq, n_sources)
}
weights = nn.ParameterList([
nn.Parameter(torch.zeros(1)) for i in range(2)
])
optimizer = optim.Adam(weights.parameters(), lr=1e-1)
loss_dictionary = {
'BigLoss': {
'class': 'L1Loss',
'weight': weights[0],
'keys': {
'estimates_a': 'input',
'source_magnitudes_a': 'target',
}
},
'SmallLoss': {
'class': 'L1Loss',
'weight': weights[1],
'keys': {
'estimates_b': 'input',
'source_magnitudes_b': 'target',
}
}
}
closure = ml.train.closures.Closure(
loss_dictionary, combination_approach='combine_by_multitask')
loss_a = closure.compute_loss(output, target)
for i in range(1000):
optimizer.zero_grad()
loss = closure.compute_loss(output, target)
loss['loss'].backward()
optimizer.step()
var = []
for p in weights.parameters():
var.append(np.exp(-p.item()) ** 1)
assert (
var[0] * loss['BigLoss'] -
var[1] * loss['SmallLoss']
) < 1e-2
def test_train_and_validate_closure():
n_batch = 5
n_time = 100
n_freq = 129
n_sources = 2
n_embedding = 20
chimera_config = ml.networks.builders.build_recurrent_chimera(
n_freq, 50, 2, True, 0.3, 20, ['sigmoid', 'unit_norm'],
2, 'softmax',
)
model = ml.networks.SeparationModel(chimera_config)
optimizer = optim.Adam(model.parameters(), lr=1e-2)
assignments = torch.rand(n_batch, n_time, n_freq, n_sources) > .5
assignments = assignments.float()
weights = torch.ones(n_batch, n_time, n_freq)
data = {
'mix_magnitude': torch.rand(n_batch, n_time, n_freq, 1),
'ideal_binary_mask': assignments,
'weights': weights,
'source_magnitudes': torch.rand(n_batch, n_time, n_freq, 1, n_sources)
}
loss_dictionary = {
'DeepClusteringLoss': {
'weight': .2,
},
'L1Loss': {
'weight': .8
}
}
closure = ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model)
# since closure expects an
# engine within an ignite object, make a fake one
engine = ml.train.create_train_and_validation_engines(closure)[0]
init_loss = closure(engine, data)
loss = None
for i in range(100):
loss = closure(engine, data)
last_loss = loss
for key in last_loss:
assert last_loss[key] < init_loss[key]
closure = ml.train.closures.ValidationClosure(
loss_dictionary, model)
for i in range(1):
loss = closure(engine, data)
for key in loss:
assert np.allclose(loss[key], last_loss[key], 1e-1)
| 8,352 | 27.030201 | 88 | py |
nussl | nussl-master/tests/ml/test_gradients.py | from nussl import ml, datasets, utils
import numpy as np
import torch
import matplotlib.pyplot as plt
import os
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
def test_gradients(mix_source_folder):
os.makedirs('tests/local/', exist_ok=True)
utils.seed(0)
tfms = datasets.transforms.Compose([
datasets.transforms.GetAudio(),
datasets.transforms.PhaseSensitiveSpectrumApproximation(),
datasets.transforms.MagnitudeWeights(),
datasets.transforms.ToSeparationModel(),
datasets.transforms.GetExcerpt(50),
datasets.transforms.GetExcerpt(
3136, time_dim=1, tf_keys=['mix_audio', 'source_audio'])
])
dataset = datasets.MixSourceFolder(
mix_source_folder, transform=tfms)
# create the model, based on the first item in the dataset
# second bit of the shape is the number of features
n_features = dataset[0]['mix_magnitude'].shape[1]
# make some configs
names = ['dpcl', 'mask_inference_l1', 'mask_inference_mse_loss', 'chimera',
'open_unmix', 'end_to_end', 'dual_path']
config_has_batch_norm = ['open_unmix', 'dual_path']
configs = [
ml.networks.builders.build_recurrent_dpcl(
n_features, 50, 1, True, 0.0, 20, ['sigmoid'],
normalization_class='InstanceNorm'),
ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 1, True, 0.0, 2, ['softmax'],
normalization_class='InstanceNorm'
),
ml.networks.builders.build_recurrent_mask_inference(
n_features, 50, 1, True, 0.0, 2, ['softmax'],
normalization_class='InstanceNorm'
),
ml.networks.builders.build_recurrent_chimera(
n_features, 50, 1, True, 0.0, 20, ['sigmoid'], 2,
['softmax'], normalization_class='InstanceNorm'
),
ml.networks.builders.build_open_unmix_like(
n_features, 50, 1, True, .4, 2, 1, add_embedding=True,
embedding_size=20, embedding_activation=['sigmoid', 'unit_norm'],
),
ml.networks.builders.build_recurrent_end_to_end(
256, 256, 64, 'sqrt_hann', 50, 2,
True, 0.0, 2, 'sigmoid', num_audio_channels=1,
mask_complex=False, rnn_type='lstm',
mix_key='mix_audio', normalization_class='InstanceNorm'),
ml.networks.builders.build_dual_path_recurrent_end_to_end(
64, 16, 8, 60, 30, 50, 2, True, 25, 2, 'sigmoid',
)
]
loss_dictionaries = [
{
'DeepClusteringLoss': {
'weight': 1.0
}
},
{
'L1Loss': {
'weight': 1.0
}
},
{
'MSELoss': {
'weight': 1.0
}
},
{
'DeepClusteringLoss': {
'weight': 0.2
},
'PermutationInvariantLoss': {
'args': ['L1Loss'],
'weight': 0.8
}
},
{
'DeepClusteringLoss': {
'weight': 0.2
},
'PermutationInvariantLoss': {
'args': ['L1Loss'],
'weight': 0.8
}
},
{
'SISDRLoss': {
'weight': 1.0,
'keys': {
'audio': 'estimates',
'source_audio': 'references'
}
}
},
{
'SISDRLoss': {
'weight': 1.0,
'keys': {
'audio': 'estimates',
'source_audio': 'references'
}
}
},
]
def append_keys_to_model(name, model):
if name == 'end_to_end':
model.output_keys.extend(
['audio', 'recurrent_stack', 'mask', 'estimates']
)
elif name == 'dual_path':
model.output_keys.extend(
['audio', 'mixture_weights', 'dual_path', 'mask', 'estimates']
)
for name, config, loss_dictionary in zip(names, configs, loss_dictionaries):
loss_closure = ml.train.closures.Closure(loss_dictionary)
utils.seed(0, set_cudnn=True)
model_grad = ml.SeparationModel(config, verbose=True).to(DEVICE)
append_keys_to_model(name, model_grad)
all_data = {}
for data in dataset:
for key in data:
if torch.is_tensor(data[key]):
data[key] = data[key].float().unsqueeze(0).contiguous().to(DEVICE)
if key not in all_data:
all_data[key] = data[key]
else:
all_data[key] = torch.cat([all_data[key], data[key]], dim=0)
# do a forward pass in batched mode
output_grad = model_grad(all_data)
_loss = loss_closure.compute_loss(output_grad, all_data)
# do a backward pass in batched mode
_loss['loss'].backward()
plt.figure(figsize=(10, 10))
utils.visualize_gradient_flow(model_grad.named_parameters())
plt.tight_layout()
plt.savefig(f'tests/local/{name}:batch_gradient.png')
utils.seed(0, set_cudnn=True)
model_acc = ml.SeparationModel(config).to(DEVICE)
append_keys_to_model(name, model_acc)
for i, data in enumerate(dataset):
for key in data:
if torch.is_tensor(data[key]):
data[key] = data[key].float().unsqueeze(0).contiguous().to(DEVICE)
# do a forward pass on each item individually
output_acc = model_acc(data)
for key in output_acc:
# make sure the forward pass in batch and forward pass individually match
# if they don't, then items in a minibatch are talking to each other
# somehow...
_data_a = output_acc[key]
_data_b = output_grad[key][i].unsqueeze(0)
if name not in config_has_batch_norm:
assert torch.allclose(_data_a, _data_b, atol=1e-3)
_loss = loss_closure.compute_loss(output_acc, data)
# do a backward pass on each item individually
_loss['loss'] = _loss['loss'] / len(dataset)
_loss['loss'].backward()
plt.figure(figsize=(10, 10))
utils.visualize_gradient_flow(model_acc.named_parameters())
plt.tight_layout()
plt.savefig(f'tests/local/{name}:accumulated_gradient.png')
# make sure the gradients match between batched and accumulated gradients
# if they don't, then the items in a batch are talking to each other in the loss
for param1, param2 in zip(model_grad.parameters(), model_acc.parameters()):
assert torch.allclose(param1, param2)
if name not in config_has_batch_norm:
if param1.requires_grad and param2.requires_grad:
assert torch.allclose(
param1.grad.mean(), param2.grad.mean(), atol=1e-3)
| 7,122 | 35.906736 | 89 | py |
nussl | nussl-master/docs/tutorials/training.py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Training deep models in *nussl*
# ==============================
#
# *nussl* has a tightly integrated deep learning pipeline for computer audition,
# with a focus on source separation. This pipeline includes:
#
# - Existing source separation architectures (Deep Clustering, Mask Inference, etc),
# - Building blocks for creating new architectures (Recurrent Stacks, Embedding spaces, Mask Layers,
# Mel Projection Layers, etc),
# - Handling data and common data sets (WSJ, MUSDB, etc),
# - Training architectures via an easy to use API powered by [PyTorch Ignite](
# https://pytorch.org/ignite/index.html),
# - Evaluating model performance (SDR, SI-SDR, etc),
# - Using the models on new audio signals for inference,
# - Storing and distributing trained models via the [External File Zoo](
# http://nussl.ci.northwestern.edu/).
#
# This tutorial will walk you through *nussl*'s model training capabilities on a simple
# synthetic dataset for illustration purposes. While *nussl* has support for a broad variety of
# models, we will focus on straight-forward mask inference networks.
# +
# Do our imports and setup for this tutorial.
import os
import json
import logging
import copy
import tempfile
import glob
import time
import shutil
from concurrent.futures import ThreadPoolExecutor
import torch
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import nussl
start_time = time.time()
# seed this notebook
# (this seeds python's random, np.random, and torch.random)
nussl.utils.seed(0)
# -
# SeparationModel
# ---------------
#
# At the heart of *nussl*'s deep learning pipeline is the SeparationModel class.
# SeparationModel takes in a description of the model architecture and instantiates it.
# Model architectures are described via a dictionary. A model architecture has three
# parts: the building blocks, or *modules*, how the building blocks are wired together,
# and the outputs of the model.
#
# ### Modules ##
#
# Let's take a look how a simple architecture is described. This model will be a single
# linear layer that estimates the spectra for 3 sources for every frame in the STFT.
# +
# define the building blocks
num_features = 129 # number of frequency bins in STFT
num_sources = 3 # how many sources to estimate
mask_activation = 'sigmoid' # activation function for masks
num_audio_channels = 1 # number of audio channels
modules = {
'mix_magnitude': {},
'my_log_spec': {
'class': 'AmplitudeToDB'
},
'my_norm': {
'class': 'BatchNorm',
},
'my_mask': {
'class': 'Embedding',
'args': {
'num_features': num_features,
'hidden_size': num_features,
'embedding_size': num_sources,
'activation': mask_activation,
'num_audio_channels': num_audio_channels,
'dim_to_embed': [2, 3] # embed the frequency dimension (2) for all audio channels (3)
}
},
'my_estimates': {
'class': 'Mask',
},
}
# -
# The lines above define the building blocks, or *modules* of the SeparationModel.
# There are four building blocks:
#
# - `mix_magnitude`, the input to the model (this key is not user-definable),
# - `my_log_spec`, a "layer" that converts the spectrogram to dB space,
# - `my_norm`, a BatchNorm normalization layer, and
# - `my_mask`, which outputs the resultant mask.
#
# Each module in the dictionary has a key and a
# value. The key tells SeparationModel the user-definable name of that layer in our architecture.
# For example, `my_log_spec` will be the name of a building block. The value is
# also a dictionary with two values: `class` and `args`. `class` tells SeparationModel
# what the code for this module should be. `args` tells SeparationModel what the
# arguments to the class should be when instantiating it. Finally, if the dictionary
# that the key points to is empty, then it is assumed to be something that comes from
# the input dictionary to the model. Note that we haven't fully defined the model yet! We still
# need to determine how these modules are put together.
#
# So where does the code for each of these classes live? The code for these modules
# is in `nussl.ml.modules`. The existing modules in *nussl* are as follows:
# +
def print_existing_modules():
excluded = ['checkpoint', 'librosa', 'nn', 'np', 'torch', 'warnings']
print('nussl.ml.modules contents:')
print('--------------------------')
existing_modules = [x for x in dir(nussl.ml.modules) if
x not in excluded and not x.startswith('__')]
print('\n'.join(existing_modules))
print_existing_modules()
# -
# Descriptions of each of these modules and their arguments can be found in the API docs.
# In the model we have described above, we have used:
#
# 1. `AmplitudeToDB` to compute log-magnitude spectrograms from the input `mix_magnitude`.
# 2. `BatchNorm` to normalize each spectrogram input by the mean and standard
# deviation of all the data (one mean/std for the entire spectrogram, not per feature).
# 3. `Embedding` to embed each 129-dimensional frame into 3*129-dimensional space with a
# sigmoid activation.
# 4. `Mask` to take the output of the embedding and element-wise multiply it by the input
# `mix_magnitude` to generate source estimates.
#
# ### Connections ###
#
# Now we have to define the next part of SeparationModel - how the modules are wired together.
# We do this by defining the `connections` of the model.
# define the topology
connections = [
['my_log_spec', ['mix_magnitude', ]],
['my_norm', ['my_log_spec', ]],
['my_mask', ['my_norm', ]],
['my_estimates', ['my_mask', 'mix_magnitude']]
]
# `connections` is a list of lists. Each item of `connections` has two elements. The first
# element contains the name of our module (defined in `modules`). The second element
# contains the arguments that will go into the module defined in the first element.
#
# So for example, `my_log_spec`, which corresponded to the `AmplitudeToDB`
# class takes in `my_mix_magnitude`. In the forward pass `my_mix_magnitude` corresponds to
# the data in the input dictionary. The output of `my_log_spec` (a
# log-magnitude spectrogram) is passed to the module named `my_norm`, (a `BatchNorm`
# layer). This output is then passed to the `my_mask` module, which
# constructs the masks using an `Embedding` class. Finally, the source estimates
# are constructed by passing both `mix_magnitude` and `my_mask` to the `my_estimates`
# module, which uses a `Mask` class.
#
# Complex forward passes can be defined via these connections. Connections can be
# even more detailed. Modules can take in keyword arguments by making the second
# element a dictionary. If modules also output a dictionary, then specific outputs
# can be reference in the connections via `module_name:key_in_dictionary`. For
# example, `nussl.ml.modules.GaussianMixtureTorch` (which is a differentiable
# GMM unfolded on some input data) outputs a dictionary with
# the following keys: `resp, log_prob, means, covariance, prior`. If this module
# was named `gmm`, then these outputs can be used in the second element via
# `gmm:means`, `gmm:resp`, `gmm:covariance`, etc.
#
# ### Output and forward pass ###
#
# Next, models have to actually output some data to be used later on. Let's have
# this model output the keys for `my_estimates` and `my_mask` (as defined in our `modules` dict, above) by doing this:
# define the outputs
output = ['my_estimates', 'my_mask']
# You can use these outputs directly or you can use them as a part of a
# larger deep learning pipeline. SeparationModel can be, for example, a
# first step before you do something more complicated with the output
# that doesn't fit cleanly into how SeparationModels are built.
#
# ### Putting it all together ###
#
# Finally, let's put it all together in one config dictionary. The dictionary
# must have the following keys to be valid: `modules`, `connections`, and
# `output`. If these keys don't exist, then SeparationModel will throw
# an error.
# +
# put it all together
config = {
'name': 'MyGreatModel',
'modules': modules,
'connections': connections,
'output': output
}
print(json.dumps(config, indent=2))
# -
# Let's load this config into SeparationModel and print the model
# architecture:
model = nussl.ml.SeparationModel(config)
print(model)
# Now let's put some random data through it, with the expected size.
# The expected shape is: (batch_size, n_frames, n_frequencies, n_channels)
# so: batch size is 1, 400 frames, 129 frequencies, and 1 audio channel
mix_magnitude = torch.rand(1, 400, 129, 1)
model(mix_magnitude)
# Uh oh! Putting in the data directly resulted in an error. This is because
# SeparationModel expects a *dictionary*. The dictionary must contain all of the
# input keys that were defined. Here it was `my_mix_magnitude`. So let's try
# again:
mix_magnitude = torch.rand(1, 400, 129, 1)
data = {'mix_magnitude': mix_magnitude}
output = model(data)
# Now we have passed the data through the model. Note a few things here:
#
# 1. The tensor passed through the model had the following shape:
# `(n_batch, sequence_length, num_frequencies, num_audio_channels)`. This is
# different from how STFTs for an AudioSignal are shaped. Those are shaped as:
# `(num_frequencies, sequence_length, num_audio_channels)`. We added a batch
# dimension here, and the ordering of frequency and audio channel dimensions
# were swapped. This is because recurrent networks are a popular way to process
# spectrograms, and these expect (and operate more efficiently) when sequence
# length is right after the batch dimension.
# 2. The key in the dictionary had to match what we put in the configuration
# before.
# 3. We embedded *both* the channel dimension (3) as well as the frequency dimension (2)
# when building up the configuration.
#
# Now let's take a look at what's in the output!
output.keys()
# There are two keys as expected: `my_estimates` and `my_mask`. They both have the
# same shape as `mix_magnitude` with one addition:
output['my_estimates'].shape, output['my_mask'].shape
# The last dimension is 3! Which is the number of sources we're trying to
# separate. Let's look at the first source.
# +
i = 0
plt.figure(figsize=(5, 5))
plt.imshow(output['my_estimates'][0, ..., 0, i].T.cpu().data.numpy())
plt.title("Source")
plt.show()
plt.figure(figsize=(5, 5))
plt.imshow(output['my_mask'][0, ..., 0, i].T.cpu().data.numpy())
plt.title("Mask")
plt.show()
# -
# Not much to look at!
# ### Saving and loading a model ###
#
# Now let's save this model and load it back up.
with tempfile.NamedTemporaryFile(suffix='.pth', delete=True) as f:
loc = model.save(f.name)
reloaded_dict = torch.load(f.name)
print(reloaded_dict.keys())
new_model = nussl.ml.SeparationModel(reloaded_dict['config'])
new_model.load_state_dict(reloaded_dict['state_dict'])
print(new_model)
# When models are saved, both the config AND the weights are saved. Both of these can be easily
# loaded back into a new SeparationModel object.
# Custom modules
# --------------
#
# There's also straightforward support for *custom* modules that don't
# exist in *nussl* but rather exist in the end-user code. These can be
# registered with SeparationModel easily. Let's build a custom module
# and register it with a copy of our existing model. Let's make this
# module a lambda, which takes in some arbitrary function and runs
# it on the input. We'll call it LambdaLayer:
# +
class LambdaLayer(torch.nn.Module):
def __init__(self, func):
self.func = func
super().__init__()
def forward(self, data):
return self.func(data)
def print_shape(x):
print(f'Shape is {x.shape}')
lamb = LambdaLayer(print_shape)
output = lamb(mix_magnitude)
# -
# Now let's put it into a copy of our model and update the connections so that it
# prints for every layer.
# +
# Copy our previous modules and add our new Lambda class
new_modules = copy.deepcopy(modules)
new_modules['lambda'] = {
'class': 'LambdaLayer',
'args': {
'func': print_shape
}
}
new_connections = [
['my_log_spec', ['mix_magnitude', ]],
['lambda', ['mix_magnitude', ]],
['lambda', ['my_log_spec', ]],
['my_norm', ['my_log_spec', ]],
['lambda', ['my_norm', ]],
['my_mask', ['my_norm', ]],
['lambda', ['my_mask', ]],
['my_estimates', ['my_mask', 'mix_magnitude']],
['lambda', ['my_estimates', ]]
]
new_config = {
'modules': new_modules,
'connections': new_connections,
'output': ['my_estimates', 'my_mask']
}
# -
# But right now, SeparationModel doesn't know about our LambdaLayer class! So,
# let's make it aware by registering the module with nussl:
nussl.ml.register_module(LambdaLayer)
print_existing_modules()
# Now LambdaLayer is a registered module! Let's build the SeparationModel and
# put some data through it:
verbose_model = nussl.ml.SeparationModel(new_config)
output = verbose_model(data)
# We can see the outputs of the Lambda layer recurring after each connection.
# (**Note**: that because we used a non-serializable argument (the function, ``func``)
# to the LambdaLayer, this model won't save without special handling!)
# Alright, now let's see how to use some actual audio data with our model...
# Handling data
# -------------
#
# As described in the datasets tutorial, the heart of *nussl* data handling
# is BaseDataset and its associated subclasses. We built a simple one in that
# tutorial that just produced random sine waves. Let's grab it again:
# +
def make_sine_wave(freq, sample_rate, duration):
dt = 1 / sample_rate
x = np.arange(0.0, duration, dt)
x = np.sin(2 * np.pi * freq * x)
return x
class SineWaves(nussl.datasets.BaseDataset):
def __init__(self, *args, num_sources=3, num_frequencies=20, **kwargs):
self.num_sources = num_sources
self.frequencies = np.random.choice(
np.arange(110, 4000, 100), num_frequencies,
replace=False)
super().__init__(*args, **kwargs)
def get_items(self, folder):
# ignore folder and return a list
# 100 items in this dataset
items = list(range(100))
return items
def process_item(self, item):
# we're ignoring ``items`` and making
# sums of random sine waves
sources = {}
freqs = np.random.choice(
self.frequencies, self.num_sources,
replace=False)
for i in range(self.num_sources):
freq = freqs[i]
_data = make_sine_wave(freq, self.sample_rate, 2)
# this is a helper function in BaseDataset for
# making an audio signal from data
signal = self._load_audio_from_array(_data)
signal.path_to_input_file = f'{item}.wav'
sources[f'sine{i}'] = signal * 1 / self.num_sources
mix = sum(sources.values())
metadata = {
'frequencies': freqs
}
output = {
'mix': mix,
'sources': sources,
'metadata': metadata
}
return output
# -
# As a reminder, this dataset makes random mixtures of sine waves with fundamental frequencies
# between 110 Hz and 4000 Hz. Let's now set it up with appropriate STFT parameters that result
# in 129 frequencies in the spectrogram.
# +
nussl.utils.seed(0) # make sure this does the same thing each time
# We're not reading data, so we can 'ignore' the folder
folder = 'ignored'
stft_params = nussl.STFTParams(window_length=256, hop_length=64)
sine_wave_dataset = SineWaves(
folder, sample_rate=8000, stft_params=stft_params
)
item = sine_wave_dataset[0]
def visualize_and_embed(sources, y_axis='mel'):
plt.figure(figsize=(10, 4))
plt.subplot(111)
nussl.utils.visualize_sources_as_masks(
sources, db_cutoff=-60, y_axis=y_axis)
plt.tight_layout()
plt.show()
nussl.play_utils.multitrack(sources, ext='.wav')
visualize_and_embed(item['sources'])
print(item['metadata'])
# -
# Let's check the shape of the `mix` stft:
item['mix'].stft().shape
# Great! There's 129 frequencies and 251 frames and 1 audio channel. To put it into our
# model though, we need the STFT in the right shape, and we also need some training data.
# Let's use some of *nussl*'s transforms to do this. Specifically, we'll use the
# `PhaseSensitiveSpectrumApproximation` and the `ToSeparationModel` transforms. We'll
# also use the `MagnitudeWeights` transform in case we want to use deep clustering loss
# functions.
# +
folder = 'ignored'
stft_params = nussl.STFTParams(window_length=256, hop_length=64)
tfm = nussl.datasets.transforms.Compose([
nussl.datasets.transforms.PhaseSensitiveSpectrumApproximation(),
nussl.datasets.transforms.MagnitudeWeights(),
nussl.datasets.transforms.ToSeparationModel()
])
sine_wave_dataset = SineWaves(
folder, sample_rate=8000, stft_params=stft_params,
transform=tfm
)
# Let's inspect the 0th item from the dataset
item = sine_wave_dataset[0]
item.keys()
# -
# Now the item has all the keys that SeparationModel needs. The `ToSeparationModel` transform set everything up for us: it set up the dictionary from `SineWaves.process_item()` exactly as we needed it. It swapped the frequency and sequence length dimension appropriately, and made them all torch Tensors:
item['mix_magnitude'].shape
# We still need to add a batch dimension and make everything have float type
# though. So let's do that for each key, if the key is a torch Tensor:
# +
for key in item:
if torch.is_tensor(item[key]):
item[key] = item[key].unsqueeze(0).float()
item['mix_magnitude'].shape
# -
# Now we can pass this through our model:
# +
output = model(item)
i = 0
plt.figure(figsize=(5, 5))
plt.imshow(
output['my_estimates'][0, ..., 0, i].T.cpu().data.numpy(),
origin='lower')
plt.title("Source")
plt.show()
plt.figure(figsize=(5, 5))
plt.imshow(
output['my_mask'][0, ..., 0, i].T.cpu().data.numpy(),
origin='lower')
plt.title("Mask")
plt.show()
# -
# We've now seen how to use *nussl* transforms, datasets, and SeparationModel
# together to make a forward pass. But so far our model does nothing practical; let's see how to train the model so it actually does something.
# Closures and loss functions
# ---------------------------
#
# *nussl* trains models via *closures*, which define the forward and backward passes for a
# model on a single batch. Closures use *loss functions* within them, which compute the
# loss on a single batch. There are a bunch of common loss functions already in *nussl*.
# +
def print_existing_losses():
excluded = ['nn', 'torch', 'combinations', 'permutations']
print('nussl.ml.train.loss contents:')
print('-----------------------------')
existing_losses = [x for x in dir(nussl.ml.train.loss) if
x not in excluded and not x.startswith('__')]
print('\n'.join(existing_losses))
print_existing_losses()
# -
# In addition to standard loss functions for spectrograms, like L1 Loss and MSE, there is also an SDR loss for time series audio, as well as permutation invariant versions of
# these losses for training things like speaker separation networks. See the API docs for more details on all of these loss functions. A closure uses these loss functions in a simple way. For example, here is the code for training a model with a closure:
# +
from nussl.ml.train.closures import Closure
from nussl.ml.train import BackwardsEvents
class TrainClosure(Closure):
"""
This closure takes an optimization step on a SeparationModel object given a
loss.
Args:
loss_dictionary (dict): Dictionary containing loss functions and specification.
optimizer (torch Optimizer): Optimizer to use to train the model.
model (SeparationModel): The model to be trained.
"""
def __init__(self, loss_dictionary, optimizer, model):
super().__init__(loss_dictionary)
self.optimizer = optimizer
self.model = model
def __call__(self, engine, data):
self.model.train()
self.optimizer.zero_grad()
output = self.model(data)
loss_ = self.compute_loss(output, data)
loss_['loss'].backward()
engine.fire_event(BackwardsEvents.BACKWARDS_COMPLETED)
self.optimizer.step()
loss_ = {key: loss_[key].item() for key in loss_}
return loss_
# -
# So, this closure takes some data and puts it through the model, then calls
# `self.compute_loss` on the result, fires an event on the ignite `engine`, and then steps the optimizer on the loss. This is a standard PyTorch training loop. The magic here is happening in `self.compute_loss`, which comes from the
# parent class `Closure`.
# ### Loss dictionary ###
#
# The parent class `Closure` takes a loss dictionary which defines the losses that get
# computed on the output of the model. The loss dictionary has the following format:
#
# loss_dictionary = {
# 'LossClassName': {
# 'weight': [how much to weight the loss in the sum, defaults to 1],
# 'keys': [key mapping items in dictionary to arguments to loss],
# 'args': [any positional arguments to the loss class],
# 'kwargs': [keyword arguments to the loss class],
# }
# }
#
# For example, one possible loss could be:
loss_dictionary = {
'DeepClusteringLoss': {
'weight': .2,
},
'PermutationInvariantLoss': {
'weight': .8,
'args': ['L1Loss']
}
}
# This will apply the deep clustering and a permutation invariant L1 loss to the output
# of the model. So, how does the model know what to compare? Each loss function is a
# class in *nussl*, and each class has an attribute called `DEFAULT_KEYS`, This attribute
# tells the Closure how to use the forward pass of the loss function. For example, this is
# the code for the L1 Loss:
# +
from torch import nn
class L1Loss(nn.L1Loss):
DEFAULT_KEYS = {'estimates': 'input', 'source_magnitudes': 'target'}
# -
# [L1Loss](https://pytorch.org/docs/stable/nn.html?highlight=l1%20loss#torch.nn.L1Loss)
# is defined in PyTorch and has the following example for its forward pass:
#
# >>> loss = nn.L1Loss()
# >>> input = torch.randn(3, 5, requires_grad=True)
# >>> target = torch.randn(3, 5)
# >>> output = loss(input, target)
# >>> output.backward()
#
# The arguments to the function are `input` and `target`. So the mapping from the dictionary
# provided by our dataset and model jointly is to use `my_estimates` (like we defined above) as the input and
# `source_magnitudes` (what we are trying to match) as the target. This results in
# the `DEFAULT_KEYS` you see above. Alternatively, you can pass the mapping between
# the dictionary and the arguments to the loss function directly into the loss dictionary
# like so:
loss_dictionary = {
'L1Loss': {
'weight': 1.0,
'keys': {
'my_estimates': 'input',
'source_magnitudes': 'target',
}
}
}
# Great, now let's use this loss dictionary in a Closure and see what happens.
closure = nussl.ml.train.closures.Closure(loss_dictionary)
closure.losses
# The closure was instantiated with the losses. Calling `closure.compute_loss` results
# in the following:
output = model(item)
loss_output = closure.compute_loss(output, item)
for key, val in loss_output.items():
print(key, val)
# The output is a dictionary with the `loss` item corresponding to the total
# (summed) loss and the other keys corresponding to the individual losses.
# ### Custom loss functions ###
#
# Loss functions can be registered with the Closure in the same way that
# modules are registered with SeparationModel:
# +
class MeanDifference(torch.nn.Module):
DEFAULT_KEYS = {'my_estimates': 'input', 'source_magnitudes': 'target'}
def __init__(self):
super().__init__()
def forward(self, input, target):
return torch.abs(input.mean() - target.mean())
nussl.ml.register_loss(MeanDifference)
print_existing_losses()
# -
# Now this loss can be used in a closure:
# +
new_loss_dictionary = {
'MeanDifference': {}
}
new_closure = nussl.ml.train.closures.Closure(new_loss_dictionary)
new_closure.losses
output = model(item)
loss_output = new_closure.compute_loss(output, item)
for key, val in loss_output.items():
print(key, val)
# -
# ### Optimizing the model ###
#
# We now have a loss. We can then put it backwards through the model and
# take a step forward on the model with an optimizer. Let's define
# an optimizer (we'll use Adam), and then use it to take a step on
# the model:
# +
optimizer = torch.optim.Adam(model.parameters(), lr=.001)
optimizer.zero_grad()
output = model(item)
loss_output = closure.compute_loss(output, item)
loss_output['loss'].backward()
optimizer.step()
print(loss_output)
# -
# Cool, we did a single step. Instead of manually defining this all above, we can
# instead use the TrainClosure from *nussl*.
train_closure = nussl.ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model
)
# The `__call__` function of the closure takes an `engine` as well as the batch data.
# Since we don't currently have an `engine` object (more on that below), let's just pass `None`.
# We can run this on a batch:
train_closure(None, item)
# We can run this a bunch of times and watch the loss go down.
# +
loss_history = []
n_iter = 100
for i in range(n_iter):
loss_output = train_closure(None, item)
loss_history.append(loss_output['loss'])
# -
plt.plot(loss_history)
plt.title('Train loss')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.show()
# Note that there is also a `ValidationClosure` which does not take
# an optimization step but only computes the loss.
#
# Let's look at the model output now!
# +
output = model(item)
for i in range(output['my_estimates'].shape[-1]):
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.imshow(
output['my_estimates'][0, ..., 0, i].T.cpu().data.numpy(),
origin='lower')
plt.title("Source")
plt.subplot(122)
plt.imshow(
output['my_mask'][0, ..., 0, i].T.cpu().data.numpy(),
origin='lower')
plt.title("Mask")
plt.show()
# -
# Hey! That looks a lot better! We've now overfit the model to a single item in the dataset. Now, let's do it at scale by using a PyTorch Ignite engines with the functionality in `nussl.ml.train`.
# Ignite Engines
# --------------
#
# *nussl* uses PyTorch Ignite to power its training functionality. PyTorch
# At the heart of Ingite is the *Engine* object. An Engine contains a lot
# of functionality for iterating through a dataset and feeding data to a model.
# What makes Ignite so desireable is that we can define all of the things we
# need to train a model ahead of time, the the Ignite engine will run the code
# to train the model for us. This saves us a lot of time writing boilerplate
# code for training. *nussl* also provides a lot of boilerplate code for
# training source separation models, specifically.
#
# To use Ignite with *nussl*, the only thing we need to to define is a *closure*.
# A closure defines a pass through the model for a single batch. The rest of
# the details, such as queueing up data, are taken care of by
# `torch.utils.data.DataLoader` and the engine object. All of the state
# regarding a training run, such as the epoch number, the loss history, etc,
# is kept in the engine's state at `engine.state`.
#
# *nussl* provides a helper function to build a standard engine with a lot
# of nice functionality like keeping track of
# loss history, preparing the batches properly, setting up the
# train and validation closures. This function is `create_train_and_validation_engines()`.
#
# It's also possible to add attach handlers to an Engine for further
# functionality. These handlers make use of the engine's state. *nussl*
# comes with several of these:
#
# 1. `add_validate_and_checkpoint`: Adds a pass on the validation data and
# checkpoints the model based on the validation loss to either `best`
# (if this was the lowest validation loss model) or `latest`.
# 2. `add_stdout_handler`: Prints some handy information after each epoch.
# 3. `add_tensorboard_handler`: Logs loss data to tensorboard.
#
# See the API documentation for further details on these handlers.
#
# ### Putting it all together ###
#
# Let's put this all together. Let's build the dataset, model and
# optimizer, train and validation closures, and engines. Let's also
# use the GPU if it's available.
# +
# define everything as before
modules = {
'mix_magnitude': {},
'log_spec': {
'class': 'AmplitudeToDB'
},
'norm': {
'class': 'BatchNorm',
},
'mask': {
'class': 'Embedding',
'args': {
'num_features': num_features,
'hidden_size': num_features,
'embedding_size': num_sources,
'activation': mask_activation,
'num_audio_channels': num_audio_channels,
'dim_to_embed': [2, 3] # embed the frequency dimension (2) for all audio channels (3)
}
},
'estimates': {
'class': 'Mask',
},
}
connections = [
['log_spec', ['mix_magnitude', ]],
['norm', ['log_spec', ]],
['mask', ['norm', ]],
['estimates', ['mask', 'mix_magnitude']]
]
# define the outputs
output = ['estimates', 'mask']
config = {
'modules': modules,
'connections': connections,
'output': output
}
# +
BATCH_SIZE = 5
LEARNING_RATE = 1e-3
OUTPUT_FOLDER = os.path.expanduser('~/.nussl/tutorial/sinewave')
RESULTS_DIR = os.path.join(OUTPUT_FOLDER, 'results')
NUM_WORKERS = 2
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
shutil.rmtree(os.path.join(RESULTS_DIR), ignore_errors=True)
os.makedirs(RESULTS_DIR, exist_ok=True)
os.makedirs(OUTPUT_FOLDER, exist_ok=True)
# adjust logging so we see output of the handlers
logger = logging.getLogger()
logger.setLevel(logging.INFO)
# Put together data
stft_params = nussl.STFTParams(window_length=256, hop_length=64)
tfm = nussl.datasets.transforms.Compose([
nussl.datasets.transforms.PhaseSensitiveSpectrumApproximation(),
nussl.datasets.transforms.MagnitudeWeights(),
nussl.datasets.transforms.ToSeparationModel()
])
sine_wave_dataset = SineWaves(
'ignored', sample_rate=8000, stft_params=stft_params,
transform=tfm
)
dataloader = torch.utils.data.DataLoader(
sine_wave_dataset, batch_size=BATCH_SIZE
)
# Build our simple model
model = nussl.ml.SeparationModel(config).to(DEVICE)
# Build an optimizer
optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE)
# Set up loss functions and closure
# We'll use permutation invariant loss since we don't
# care what order the sine waves get output in, just that
# they are different.
loss_dictionary = {
'PermutationInvariantLoss': {
'weight': 1.0,
'args': ['L1Loss']
}
}
train_closure = nussl.ml.train.closures.TrainClosure(
loss_dictionary, optimizer, model
)
val_closure = nussl.ml.train.closures.ValidationClosure(
loss_dictionary, model
)
# Build the engine and add handlers
train_engine, val_engine = nussl.ml.train.create_train_and_validation_engines(
train_closure, val_closure, device=DEVICE
)
nussl.ml.train.add_validate_and_checkpoint(
OUTPUT_FOLDER, model, optimizer, sine_wave_dataset, train_engine,
val_data=dataloader, validator=val_engine
)
nussl.ml.train.add_stdout_handler(train_engine, val_engine)
# -
# Cool! We built an engine! (Note the distinction between using the original dataset
# object and using the dataloader object.)
#
# Now to train it, all we have to do is `run`
# the engine. Since our SineWaves dataset makes mixes "on the fly" (i.e., every time
# we get an `item`, the dataset will return a mix of random sine waves), it is
# impossible to loop through the whole dataset, and therefore there is no concept
# of an epoch. In this case, we will instead define an arbitrary `epoch_length`
# of 1000 and pass that value to `train_engine`. After one epoch, the validation
# will be run and everything will get printed by the `stdout` handler.
#
# Let's see it run:
train_engine.run(dataloader, epoch_length=1000)
# We can check out the loss over each iteration in the single epoch
# by examining the state:
plt.plot(train_engine.state.iter_history['loss'])
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Train Loss')
plt.show()
# Let's also see what got saved in the output folder:
# !tree {OUTPUT_FOLDER}
# So the models and optimizers got saved! Let's load back one of these
# models and see what's in it.
# What's in a model?
# ------------------
#
# After we're finished training the model, it will be saved by our
# `add_validate_and_checkpoint` handler. What gets saved in our model? Let's see:
saved_model = torch.load(train_engine.state.saved_model_path)
print(saved_model.keys())
# As expected, there's the `state_dict` containing the weights of
# the trained model, the `config` containing the configuration of the model.
# There also a `metadata` key in the saved model. Let's check out the metadata...
print(saved_model['metadata'].keys())
# There's a whole bunch of stuff related to training, like the folder
# it was trained on, the state dictionary of the engine used to train the
# model, the loss history for each epoch (not each iteration - that's too big).
#
# There are also keys that are related to the parameters of the AudioSignal.
# Namely, `stft_params`, `sample_rate`, and `num_channels`. These
# are used by *nussl* to prepare an AudioSignal object to be put into a
# deep learning based separation algorithm. There's also a `transforms`
# key - this is used by *nussl* to construct the input dictionary at
# inference time on an AudioSignal so that the data going into the model
# matches how it was given during training time. Let's look at each of these:
for key in saved_model['metadata']:
print(f"{key}: {saved_model['metadata'][key]}")
#
# **Importantly**, everything saved with the model makes training it *entirely reproduceable*. We have everything we need to recreate another model exactly like this if we need to.
#
# Now that we've trained our toy model, let's move on to actually using and evaluating it.
# Using and evaluating a trained model
# ------------------------------------
#
# In this tutorial, we built very simple a deep mask estimation network. There is a
# corresponding separation algorithm in *nussl* for using
# deep mask estimation networks. Let's build our dataset
# again, this time *without* transforms, so we have access to
# the actual AudioSignal objects. Then let's instantiate the
# separation algorithm and use it to separate an item from the
# dataset.
# +
tt_dataset = SineWaves(
'ignored', sample_rate=8000
)
tt_dataset.frequencies = sine_wave_dataset.frequencies
item = tt_dataset[0] # <-- This is an AugioSignal obj
MODEL_PATH = os.path.join(OUTPUT_FOLDER, 'checkpoints/best.model.pth')
separator = nussl.separation.deep.DeepMaskEstimation(
item['mix'], model_path=MODEL_PATH
)
estimates = separator()
visualize_and_embed(estimates)
# -
# ### Evaluation in parallel ###
#
# We'll usually want to run many mixtures through the model, separate,
# and get evaluation metrics like SDR, SIR, and SAR. We can do that with
# the following bit of code:
# +
# make a separator with an empty audio signal initially
# this one will live on gpu (if one exists) and be used in a
# threadpool for speed
dme = nussl.separation.deep.DeepMaskEstimation(
nussl.AudioSignal(), model_path=MODEL_PATH, device='cuda'
)
def forward_on_gpu(audio_signal):
# set the audio signal of the object to this item's mix
dme.audio_signal = audio_signal
masks = dme.forward()
return masks
def separate_and_evaluate(item, masks):
separator = nussl.separation.deep.DeepMaskEstimation(item['mix'])
estimates = separator(masks)
evaluator = nussl.evaluation.BSSEvalScale(
list(item['sources'].values()), estimates,
compute_permutation=True,
source_labels=['sine1', 'sine2', 'sine3']
)
scores = evaluator.evaluate()
output_path = os.path.join(
RESULTS_DIR, f"{item['mix'].file_name}.json"
)
with open(output_path, 'w') as f:
json.dump(scores, f)
pool = ThreadPoolExecutor(max_workers=NUM_WORKERS)
for i, item in enumerate(tqdm.tqdm(tt_dataset)):
masks = forward_on_gpu(item['mix'])
if i == 0:
separate_and_evaluate(item, masks)
else:
pool.submit(separate_and_evaluate, item, masks)
pool.shutdown(wait=True)
json_files = glob.glob(f"{RESULTS_DIR}/*.json")
df = nussl.evaluation.aggregate_score_files(json_files)
report_card = nussl.evaluation.report_card(
df, notes="Testing on sine waves", report_each_source=True)
print(report_card)
# -
# We parallelized the evaluation across 2 workers, kept two copies of
# the separator, one of which lives on the GPU, and the other which
# lives on the CPU. The GPU one does a forward pass in its own thread
# and then hands it to the other separator which actually computes the
# estimates and evaluates the metrics in parallel. After we're done,
# we aggregate all the results (each of which was saved to a JSON file)
# using `nussl.evaluation.aggregate_score_files` and then use the
# nussl report card at `nussl.evaluation.report_card` to view the results.
# We also now have the results as a pandas DataFrame:
df
# Finally, we can look at the structure of the output folder again,
# seeing there are now 100 entries under results corresponding to each
# item in `sine_wave_dataset`:
# !tree --filelimit 20 {OUTPUT_FOLDER}
end_time = time.time()
time_taken = end_time - start_time
print(f'Time taken: {time_taken:.4f} seconds')
| 38,430 | 32.476481 | 304 | py |
nussl | nussl-master/docs/tutorials/datasets.py | # ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Handling data in *nussl*
# ========================
#
# *nussl* comes with a bunch of different useful dataset hooks,
# along with a handy base class for datasets. Let's examine
# what the base class looks like first.
#
# BaseDataset
# -----------
#
# The BaseDataset is an abstract class that has a few useful
# functions for organizing your data. If you call it, directly,
# however, it will error out:
# +
import nussl
import numpy as np
import matplotlib.pyplot as plt
import time
start_time = time.time()
folder = 'ignored'
base_dataset = nussl.datasets.BaseDataset(folder)
# -
# For the dataset to work, two functions must be implemented:
#
# 1. `self.get_items`: A function that grabs all the items that
# you will need to process.
# 2. `self.process_item`: A function that processes a single item.
#
# Let's build a dataset that returns sums of sine wavs at random frequencies.
# +
def make_sine_wave(freq, sample_rate, duration):
dt = 1 / sample_rate
x = np.arange(0.0, duration, dt)
x = np.sin(2 * np.pi * freq * x)
return x
class SineWaves(nussl.datasets.BaseDataset):
def get_items(self, folder):
# ignore folder and return a list
# 100 items in this dataset
items = list(range(100))
return items
def process_item(self, item):
# we're ignoring items and making
# sums of random sine waves
sources = {}
freqs = []
for i in range(3):
freq = np.random.randint(110, 1000)
freqs.append(freq)
_data = make_sine_wave(freq, self.sample_rate, 2)
# this is a helper function in BaseDataset for
# making an audio signal from data
signal = self._load_audio_from_array(_data)
sources[f'sine{i}'] = signal * 1/3
mix = sum(sources.values())
metadata = {
'frequencies': freqs
}
output = {
'mix': mix,
'sources': sources,
'metadata': metadata
}
return output
# -
# The primary thing to note here is the format of what is output
# by the `process_item` function. It is a dictionary and must
# always be a dictionary. The dictionary contains three keys:
# `mix`, `sources`, and `metadata`. `sources` is similarly not a list
# but a dictionary. The sum of the values of `sources` adds up to
# `mix`.
#
# Great, now let's use this dataset.
# +
folder = 'ignored'
sine_wave_dataset = SineWaves(folder, sample_rate=44100)
item = sine_wave_dataset[0]
item
# -
# We can see that getting an item from the dataset resulted in a dictionary
# containing AudioSignal objects! And the exact frequencies for each sine
# tone were saved in the metadata. Now, let's listen and visualize:
# +
def visualize_and_embed(sources, y_axis='mel'):
plt.figure(figsize=(10, 4))
plt.subplot(111)
nussl.utils.visualize_sources_as_masks(
sources, db_cutoff=-60, y_axis=y_axis)
plt.tight_layout()
plt.show()
nussl.play_utils.multitrack(sources, ext='.wav')
visualize_and_embed(item['sources'])
# -
# The STFT parameters were inferred from the first time we used the dataset based
# on the audio signal's sample rate and the defaults in *nussl*. To enforce a
# specific STFT parameter, we can do the following:
# +
folder = 'ignored'
stft_params = nussl.STFTParams(window_length=256, hop_length=64)
sine_wave_dataset = SineWaves(folder, sample_rate=44100, stft_params=stft_params)
item = sine_wave_dataset[0]
visualize_and_embed(item['sources'])
print('STFT shape:', item['mix'].stft().shape)
# -
# Cool! Now let's look at some of the built-in dataset hooks that
# ship with *nussl*.
#
# MUSDB18
# -------
#
# MUSDB18 is a dataset for music source separation research. The full
# dataset is available [here](https://zenodo.org/record/3338373), but
# there is a useful functionality where if you don't have, 7-second clips
# of each track will be downloaded automatically. In *nussl*, these get
# downloaded to `~/.nussl/musdb18`. Let's set up a MUSDB18 dataset
# object and visualize/listen to an item from the dataset:
# +
musdb = nussl.datasets.MUSDB18(download=True)
i = 40 #or get a random track like this: np.random.randint(len(musdb))
item = musdb[i]
mix = item['mix']
sources = item['sources']
visualize_and_embed(sources)
# -
# MixSourceFolder
# ---------------
#
# Imagine you have a dataset with the following (somewhat common) structure:
#
# data/
# mix/
# [file0].wav
# [file1].wav
# [file2].wav
# ...
# [label0]/
# [file0].wav
# [file1].wav
# [file2].wav
# ...
# [label1]/
# [file0].wav
# [file1].wav
# [file2].wav
# ...
# [label2]/
# [file0].wav
# [file1].wav
# [file2].wav
# ...
# ...
#
# This structure is how popular speech separation datasets such as `wsj0-2mix` and `WHAM`
# are organized. Each folder contains isolated sources. The mix folder contains the sum
# of all the isolated sources that have the same name. So in the above: `mix/[file0].wav`
# is constructed from `[label0]/[file0].wav`, `[label1]/[file0].wav`, `[label2]/[file0].wav`.
#
# To use this dataset, we first need to construct a folder with a structure that looks
# like this. Let's grab a zip file containing some isolated speakers from file zoo
# and make one:
# +
import os
import zipfile
import glob
import random
def _unzip(path_to_zip, target_path):
with zipfile.ZipFile(path_to_zip, 'r') as zip_ref:
zip_ref.extractall(target_path)
def toy_datasets(_dir):
dataset_locations = {}
keys = ['babywsj_oW0F0H9.zip']
for k in keys:
target_folder = os.path.join(_dir, os.path.splitext(k)[0])
data = nussl.efz_utils.download_benchmark_file(k, _dir)
_unzip(data, target_folder)
dataset_locations[k] = target_folder
return dataset_locations
def make_mix_source_folder(toy_datasets, _dir):
wsj_sources = toy_datasets['babywsj_oW0F0H9.zip']
audio_files = glob.glob(
f"{wsj_sources}/**/*.wav", recursive=True)
n_sources = 2
n_mixtures = 5
_dir = os.path.join(_dir, 'mix_source_folder')
for i in range(n_mixtures):
sources = []
for n in range(n_sources):
path = random.choice(audio_files)
source = nussl.AudioSignal(path)
sources.append(source)
min_length = min([s.signal_length for s in sources])
for n in range(n_sources):
output_path = os.path.join(_dir, f's{n}', f'{i}.wav')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
sources[n].truncate_samples(min_length)
sources[n].write_audio_to_file(output_path)
mix = sum(sources)
output_path = os.path.join(_dir, 'mix', f'{i}.wav')
os.makedirs(os.path.dirname(output_path), exist_ok=True)
mix.write_audio_to_file(output_path)
return _dir
# -
# The functions above download a zip file containing isolated speakers, find all the
# audio files in the unzipped folder, and remix them into a MixSourceFolder-style
# dataset. Let's look at some items from this dataset:
# +
target_dir = os.path.expanduser('~/.nussl/tutorial/')
source_data = toy_datasets(target_dir)
folder = make_mix_source_folder(source_data, target_dir)
msf = nussl.datasets.MixSourceFolder(folder)
item = msf[0]
sources = item['sources']
visualize_and_embed(sources, y_axis='linear')
# -
# And let's look at the tree structure of the folder:
# !tree {folder}
# WHAM!
# -----
#
# The [WHAM! dataset](https://www.merl.com/publications/docs/TR2019-099.pdf) is built off of
# the Wall Street Journal dataset, which contains many hours of isolated speech. The folder
# structure is the same as MixSourceFolder. WHAM can be hooked into a dataset via:
#
# nussl.datasets.WHAM(folder_where_wham_is, sample_rate=[8000 or 16000])
#
# Look at the associated API documentation for more details.
# Scaper
# ------
#
# [Scaper](https://github.com/justinsalamon/scaper) is a tool for creating complex
# soundscapes containing multiple isolated sources. *nussl* has a hook for Scaper
# that can be used to create a dataset from any folder containing data generated
# by Scaper. Let's make some data using Scaper and then point `nussl.datasets.Scaper`
# at it. We'll start by downloading some data straight from the Scaper github.
# +
import scaper
import os
import zipfile
import subprocess
# Download the audio automatically
url = "https://github.com/justinsalamon/scaper/archive/v1.2.0.zip"
download_path = os.path.expanduser('~/.nussl/tutorial/scaper')
if not os.path.exists(download_path):
subprocess.run(f'wget {url}', shell=True)
subprocess.run(f'unzip v1.2.0.zip', shell=True)
os.makedirs(download_path, exist_ok=True)
subprocess.run(f'cp -r scaper-1.2.0/tests/data/audio {download_path}', shell=True)
subprocess.run(f'rm -rf scaper-1.2.0/', shell=True)
subprocess.run(f'rm -rf v1.2.0.zip', shell=True)
# -
# Here's what got downloaded:
# !tree {download_path}
# Now, let's use Scaper to make some interesting soundscapes! Note that to use
# Scaper, you'll need to have both `sox` and `ffmpeg` installed. These can't be
# packaged directly with Scaper or with *nussl*, so be sure to figure out how
# to do this on your machine.
# +
path_to_audio = os.path.join(download_path, 'audio')
output_folder = os.path.join(download_path, 'generated')
os.makedirs(output_folder, exist_ok=True)
soundscape_duration = 10.0
seed = 123
num_mixtures = 5
foreground_folder = os.path.join(path_to_audio, 'foreground')
background_folder = os.path.join(path_to_audio, 'background')
sc = scaper.Scaper(soundscape_duration,
foreground_folder,
background_folder,
random_state=seed)
sc.ref_db = -20
sc.add_background(label=('const', 'park'),
source_file=('choose', []),
source_time=('const', 0))
sc.add_event(label=('const', 'siren'),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('truncnorm', 3, 1, 0.5, 5),
snr=('normal', 10, 3),
pitch_shift=('uniform', -2, 2),
time_stretch=('uniform', 0.8, 1.2))
for _ in range(2):
sc.add_event(label=('choose', []),
source_file=('choose', []),
source_time=('const', 0),
event_time=('uniform', 0, 9),
event_duration=('truncnorm', 3, 1, 0.5, 5),
snr=('normal', 10, 3),
pitch_shift=None,
time_stretch=None)
for i in range(num_mixtures):
audiofile = os.path.join(output_folder, f'{i}.wav')
jamsfile = os.path.join(output_folder, f'{i}.jams')
sc.generate(audiofile, jamsfile,
allow_repeated_label=True,
allow_repeated_source=True,
reverb=None,
disable_sox_warnings=True,
no_audio=False,
save_isolated_events=True) # this is important!
# -
# The warnings are normal - Scaper is just adjusting the bounds so that things work
# out when creating the sound scene. Let's look at what got generated by Scaper:
# !tree {output_folder}
# So, there are three things per generated mix. The `wav` file which contains the audio
# of the mixture, the `jams` file which contains information about the soundscape, and
# the `_events` folder, which contains the audio for each isolated event. Let's load
# this folder into `nussl`.
# +
sca = nussl.datasets.Scaper(output_folder)
item = sca[0]
sources = item['sources']
visualize_and_embed(sources, y_axis='mel')
# -
# For all the cool things you can do with Scaper, check out the
# [Scaper docs](https://scaper.readthedocs.io/en/latest/index.html)!
# Transforms
# ----------
#
# *nussl* also provides a "transform" API, akin to the ones found in
# `torchvision`. These transforms consume dictionaries produced by
# a dataset or by other transforms. Let's use one of them now: `SumSources`,
# to transform the output of MUSDB18 by grouping some of the sources
# together.
# +
tfm = nussl.datasets.transforms.SumSources([
['vocals', 'other'], ['drums', 'bass']
])
musdb = nussl.datasets.MUSDB18(download=True, transform=tfm)
i = 40 #or get a random track like this: np.random.randint(len(musdb))
item = musdb[i]
mix = item['mix']
sources = item['sources']
visualize_and_embed(sources)
# -
# The sources are now grouped according to the SumSources transform. If you're using datasets in
# a machine learning pipeline, then you may also want direct access to actual spectrograms
# of each source. Let's use another transform to get that, the `PhaseSensitiveSpectrumApproximation`
# transform:
# +
tfm = nussl.datasets.transforms.PhaseSensitiveSpectrumApproximation()
musdb = nussl.datasets.MUSDB18(download=True, transform=tfm)
i = 40 #or get a random track like this: np.random.randint(len(musdb))
item = musdb[i]
mix = item['mix']
sources = item['sources']
print(item.keys())
# -
# The transform added some additional keys to the dictionary:
#
# 1. `mix_magnitude`: the magnitude spectrogram of the mixture
# 2. `source_magnitudes`: the magnitude spectrograms of each source
# 3. `ideal_binary_mask`: the ideal binary mask for each source
#
# Let's take a look at some of these:
# +
num_frequencies, num_time, num_channels, num_sources = item['source_magnitudes'].shape
print(
f"Shape of 'source_magnitudes': {item['source_magnitudes'].shape}\n"
f"Number of frequencies: {num_frequencies}\n"
f"Number of frames: {num_time}\n"
f"Number of audio channels: {num_channels}\n"
f"Number of sources: {num_sources}\n"
)
keys = ['ideal_binary_mask', 'source_magnitudes']
source_names = sorted(item['sources'].keys())
for key in keys:
plt.figure(figsize=(15, 3))
for i in range(item[key].shape[-1]):
plt.subplot(141 + i)
plt.imshow(
20 * np.log10(
1e-7 + item[key][..., 0, i]),
origin='lower', aspect='auto'
)
plt.title(f'{key}: {source_names[i]}')
plt.tight_layout()
plt.show()
# -
# Note that the order of the sources in the stacked array is in sorted order of the keys
# for each source. Now, what if we want to group the two operations together by
# applying both transforms to the item? To do this, we can use the `Compose` transform:
# +
tfm = nussl.datasets.transforms.Compose([
nussl.datasets.transforms.SumSources([
['vocals', 'other'], ['drums', 'bass']]),
nussl.datasets.transforms.PhaseSensitiveSpectrumApproximation(),
])
musdb = nussl.datasets.MUSDB18(download=True, transform=tfm)
i = 40 #or get a random track like this: np.random.randint(len(musdb))
item = musdb[i]
mix = item['mix']
sources = item['sources']
# -
# This applied both transforms in sequence. First the sources are summed, then the
# spectrograms and masks of the resultant summmed sources are computed.
# Here's the result:
# +
num_frequencies, num_time, num_channels, num_sources = item['source_magnitudes'].shape
print(
f"Shape of 'source_magnitudes': {item['source_magnitudes'].shape}\n"
f"Number of frequencies: {num_frequencies}\n"
f"Number of frames: {num_time}\n"
f"Number of audio channels: {num_channels}\n"
f"Number of sources: {num_sources}\n"
)
keys = ['ideal_binary_mask', 'source_magnitudes']
source_names = sorted(item['sources'].keys())
for key in keys:
plt.figure(figsize=(15, 3))
for i in range(item[key].shape[-1]):
plt.subplot(141 + i)
plt.imshow(
20 * np.log10(
1e-7 + item[key][..., 0, i]),
origin='lower', aspect='auto'
)
plt.title(f'{key}: {source_names[i]}')
plt.tight_layout()
plt.show()
# -
# Finally, all datasets just return dictionaries containing AudioSignal objects.
# Grabbing the audio data is as simple as:
plt.figure(figsize=(10, 3))
plt.plot(mix.audio_data[0])
plt.title(f'mix.audio_data, shape: {mix.audio_data.shape}')
plt.xlabel('Sample index')
plt.ylabel('Amplitude')
plt.show()
# Accessing the STFT can be done by:
mix.stft().shape
# If you so choose, you can use *nussl* datasets in your own machine learning pipeline
# instead of using *nussl* features. However, if you want to use *nussl* for training
# a deep model, read on to the next tutorial!
end_time = time.time()
time_taken = end_time - start_time
print(f'Time taken: {time_taken:.4f} seconds')
| 17,169 | 30.275046 | 100 | py |
nussl | nussl-master/docs/examples/spatial/projet.py | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.5.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PROJET
#
# Fitzgerald, Derry, Antoine Liutkus, and Roland Badeau.
# "Projection-based demixing of spatial audio."
# IEEE/ACM Transactions on Audio, Speech, and Language
# Processing 24.9 (2016): 1560-1572.
#
# Fitzgerald, Derry, Antoine Liutkus, and Roland Badeau.
# "Projet—spatial audio separation using projections."
# 2016 IEEE International Conference on Acoustics,
# Speech and Signal Processing (ICASSP). IEEE, 2016.
#
# @article{fitzgerald2016projection,
# title={Projection-based demixing of spatial audio},
# author={Fitzgerald, Derry and Liutkus, Antoine and Badeau, Roland},
# journal={IEEE/ACM Transactions on Audio, Speech, and Language Processing},
# volume={24},
# number={9},
# pages={1560--1572},
# year={2016},
# publisher={IEEE}
# }
# @inproceedings{fitzgerald2016projet,
# title={Projet—spatial audio separation using projections},
# author={Fitzgerald, Derry and Liutkus, Antoine and Badeau, Roland},
# booktitle={2016 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP)},
# pages={36--40},
# year={2016},
# organization={IEEE}
# }
# +
import nussl
import matplotlib.pyplot as plt
import time
import numpy as np
import warnings
import torch
warnings.filterwarnings("ignore")
start_time = time.time()
nussl.utils.seed(0)
def visualize_and_embed(sources):
plt.figure(figsize=(10, 6))
plt.subplot(211)
nussl.utils.visualize_sources_as_masks(sources,
y_axis='mel', db_cutoff=-40, alpha_amount=2.0)
plt.subplot(212)
nussl.utils.visualize_sources_as_waveform(
sources, show_legend=False)
plt.show()
nussl.play_utils.multitrack(sources)
musdb = nussl.datasets.MUSDB18(
download=True, sample_rate=16000,
strict_sample_rate = False
)
i = 39
# -
# Setting up a signal for PROJET
# +
item = musdb[i]
sources = [
item['sources']['drums'],
item['sources']['other']
]
a = nussl.mixing.pan_audio_signal(sources[0], -35)
a_delays = [np.random.randint(1, 10) for _ in range(a.num_channels)]
a = nussl.mixing.delay_audio_signal(a, a_delays)
b = nussl.mixing.pan_audio_signal(sources[1], -15)
b_delays = [np.random.randint(1, 10) for _ in range(b.num_channels)]
b = nussl.mixing.delay_audio_signal(b, b_delays)
mix = a + b
# -
# Now running PROJET
# +
DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
separator = nussl.separation.spatial.Projet(
mix, num_sources=2, device=DEVICE, num_iterations=500)
estimates = separator()
estimates = {
f'Source {i}': e for i, e in enumerate(estimates)
}
visualize_and_embed(estimates)
# -
end_time = time.time()
time_taken = end_time - start_time
print(f'Time taken: {time_taken:.4f} seconds')
| 3,113 | 25.389831 | 108 | py |
RE-Net | RE-Net-master/pretrain.py | import argparse
import numpy as np
import time
import torch
import utils
import os
from global_model import RENet_global
from sklearn.utils import shuffle
import pickle
def train(args):
# load data
num_nodes, num_rels = utils.get_total_number('./data/' + args.dataset, 'stat.txt')
train_data, train_times_origin = utils.load_quadruples('./data/' + args.dataset, 'train.txt')
# check cuda
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
seed = 999
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
torch.cuda.set_device(args.gpu)
os.makedirs('models', exist_ok=True)
os.makedirs('models/' + args.dataset, exist_ok=True)
if args.model == 0:
model_state_file = 'models/' + args.dataset + 'attn.pth'
elif args.model == 1:
model_state_file = 'models/' + args.dataset + 'mean.pth'
elif args.model == 2:
model_state_file = 'models/' + args.dataset + 'gcn.pth'
elif args.model == 3:
model_state_file = 'models/' + args.dataset + '/max'+str(args.maxpool)+'rgcn_global.pth'
# model_graph_file = 'models/' + args.dataset + 'rgcn_graph.pth'
model_state_file_backup = 'models/' + args.dataset+ '/max'+str(args.maxpool) + 'rgcn__global_backup.pth'
# model_graph_file_backup = 'models/' + args.dataset + 'rgcn_graph_backup.pth'
print("start training...")
model = RENet_global(num_nodes,
args.n_hidden,
num_rels,
dropout=args.dropout,
model=args.model,
seq_len=args.seq_len,
num_k=args.num_k, maxpool=args.maxpool)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.00001)
if use_cuda:
model.cuda()
# train_times = torch.from_numpy(train_times)
with open('./data/' + args.dataset + '/train_graphs.txt', 'rb') as f:
graph_dict = pickle.load(f)
true_prob_s, true_prob_o = utils.get_true_distribution(train_data, num_nodes)
epoch = 0
loss_small = 10000
while True:
model.train()
if epoch == args.max_epochs:
break
epoch += 1
loss_epoch = 0
t0 = time.time()
# print(graph_dict.keys())
# print(train_times_origin)
train_times, true_prob_s, true_prob_o = shuffle(train_times_origin, true_prob_s, true_prob_o)
for batch_data, true_s, true_o in utils.make_batch(train_times, true_prob_s, true_prob_o, args.batch_size):
batch_data = torch.from_numpy(batch_data)
true_s = torch.from_numpy(true_s)
true_o = torch.from_numpy(true_o)
if use_cuda:
batch_data = batch_data.cuda()
true_s = true_s.cuda()
true_o = true_o.cuda()
loss = model(batch_data, true_s, true_o, graph_dict)
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm) # clip gradients
optimizer.step()
optimizer.zero_grad()
loss_epoch += loss.item()
t3 = time.time()
model.global_emb = model.get_global_emb(train_times_origin, graph_dict)
print("Epoch {:04d} | Loss {:.4f} | time {:.4f}".
format(epoch, loss_epoch / (len(train_times) / args.batch_size), t3 - t0))
if loss_epoch < loss_small:
loss_small = loss_epoch
# if args.model == 3:
torch.save({'state_dict': model.state_dict(), 'global_emb': model.global_emb},
model_state_file)
# with open(model_graph_file, 'wb') as fp:
# pickle.dump(model.graph_dict, fp)
# else:
# torch.save({'state_dict': model.state_dict(), 'epoch': epoch,
# 's_hist': model.s_hist_test, 's_cache': model.s_his_cache,
# 'o_hist': model.o_hist_test, 'o_cache': model.o_his_cache,
# 'latest_time': model.latest_time},
# model_state_file)
print("training done")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RENet')
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=200,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=0,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("-d", "--dataset", type=str, default='ICEWS18',
help="dataset to use")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
parser.add_argument("--max-epochs", type=int, default=100
,
help="maximum epochs")
parser.add_argument("--model", type=int, default=3)
parser.add_argument("--seq-len", type=int, default=10)
parser.add_argument("--num-k", type=int, default=10,
help="cuttoff position")
parser.add_argument("--batch-size", type=int, default=1024)
parser.add_argument("--rnn-layers", type=int, default=1)
parser.add_argument("--maxpool", type=int, default=1)
args = parser.parse_args()
print(args)
train(args)
| 5,489 | 37.93617 | 115 | py |
RE-Net | RE-Net-master/test.py | import argparse
import numpy as np
import torch
import utils
import os
from model import RENet
from global_model import RENet_global
import pickle
def test(args):
# load data
num_nodes, num_rels = utils.get_total_number('./data/' + args.dataset, 'stat.txt')
if args.dataset == 'icews_know':
train_data, train_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt')
valid_data, valid_times = utils.load_quadruples('./data/' + args.dataset, 'test.txt')
test_data, test_times = utils.load_quadruples('./data/' + args.dataset, 'test.txt')
total_data, total_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt', 'test.txt')
else:
train_data, train_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt')
valid_data, valid_times = utils.load_quadruples('./data/' + args.dataset, 'valid.txt')
test_data, test_times = utils.load_quadruples('./data/' + args.dataset, 'test.txt')
total_data, total_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt', 'valid.txt', 'test.txt')
# check cuda
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
if use_cuda:
torch.cuda.set_device(args.gpu)
torch.cuda.manual_seed_all(999)
model_state_file = 'models/' + args.dataset + '/rgcn.pth'
model_graph_file = 'models/' + args.dataset + '/rgcn_graph.pth'
model_state_global_file2 = 'models/' + args.dataset + '/max' + str(args.maxpool) + 'rgcn_global2.pth'
model = RENet(num_nodes,
args.n_hidden,
num_rels,
model=args.model,
seq_len=args.seq_len,
num_k=args.num_k)
global_model = RENet_global(num_nodes,
args.n_hidden,
num_rels,
model=args.model,
seq_len=args.seq_len,
num_k=args.num_k, maxpool=args.maxpool)
if use_cuda:
model.cuda()
global_model.cuda()
with open('data/' + args.dataset+'/test_history_sub.txt', 'rb') as f:
s_history_test_data = pickle.load(f)
with open('data/' + args.dataset+'/test_history_ob.txt', 'rb') as f:
o_history_test_data = pickle.load(f)
s_history_test = s_history_test_data[0]
s_history_test_t = s_history_test_data[1]
o_history_test = o_history_test_data[0]
o_history_test_t = o_history_test_data[1]
print("\nstart testing:")
checkpoint = torch.load(model_state_file, map_location=lambda storage, loc: storage)
model.load_state_dict(checkpoint['state_dict'])
model.s_hist_test = checkpoint['s_hist']
model.s_his_cache = checkpoint['s_cache']
model.o_hist_test = checkpoint['o_hist']
model.o_his_cache = checkpoint['o_cache']
model.latest_time = checkpoint['latest_time']
if args.dataset == "icews_know":
model.latest_time = torch.LongTensor([4344])[0]
model.global_emb = checkpoint['global_emb']
model.s_hist_test_t = checkpoint['s_hist_t']
model.s_his_cache_t = checkpoint['s_cache_t']
model.o_hist_test_t = checkpoint['o_hist_t']
model.o_his_cache_t = checkpoint['o_cache_t']
with open(model_graph_file, 'rb') as f:
model.graph_dict = pickle.load(f)
checkpoint_global = torch.load(model_state_global_file2, map_location=lambda storage, loc: storage)
global_model.load_state_dict(checkpoint_global['state_dict'])
print("Using best epoch: {}".format(checkpoint['epoch']))
total_data = torch.from_numpy(total_data)
test_data = torch.from_numpy(test_data)
model.eval()
global_model.eval()
total_loss = 0
total_ranks = np.array([])
total_ranks_filter = np.array([])
ranks = []
for ee in range(num_nodes):
while len(model.s_hist_test[ee]) > args.seq_len:
model.s_hist_test[ee].pop(0)
model.s_hist_test_t[ee].pop(0)
while len(model.o_hist_test[ee]) > args.seq_len:
model.o_hist_test[ee].pop(0)
model.o_hist_test_t[ee].pop(0)
if use_cuda:
total_data = total_data.cuda()
latest_time = test_times[0]
for i in range(len(test_data)):
batch_data = test_data[i]
s_hist = s_history_test[i]
o_hist = o_history_test[i]
s_hist_t = s_history_test_t[i]
o_hist_t = o_history_test_t[i]
if latest_time != batch_data[3]:
ranks.append(total_ranks_filter)
latest_time = batch_data[3]
total_ranks_filter = np.array([])
if use_cuda:
batch_data = batch_data.cuda()
with torch.no_grad():
# Filtered metric
if args.raw:
ranks_filter, loss = model.evaluate(batch_data, (s_hist, s_hist_t), (o_hist, o_hist_t),
global_model)
else:
ranks_filter, loss = model.evaluate_filter(batch_data, (s_hist, s_hist_t), (o_hist, o_hist_t),
global_model, total_data)
total_ranks_filter = np.concatenate((total_ranks_filter, ranks_filter))
total_loss += loss.item()
ranks.append(total_ranks_filter)
for rank in ranks:
total_ranks = np.concatenate((total_ranks,rank))
mrr = np.mean(1.0 / total_ranks)
mr = np.mean(total_ranks)
hits = []
for hit in [1,3,10]:
avg_count = np.mean((total_ranks <= hit))
hits.append(avg_count)
print("Hits (filtered) @ {}: {:.6f}".format(hit, avg_count))
print("MRR (filtered): {:.6f}".format(mrr))
print("MR (filtered): {:.6f}".format(mr))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RENet')
parser.add_argument("-d", "--dataset", type=str, default='ICEWS18',
help="dataset to use")
parser.add_argument("--gpu", type=int, default=0,
help="gpu")
parser.add_argument("--model", type=int, default=3)
parser.add_argument("--n-hidden", type=int, default=200,
help="number of hidden units")
parser.add_argument("--seq-len", type=int, default=10)
parser.add_argument("--num-k", type=int, default=1000,
help="cuttoff position")
parser.add_argument("--maxpool", type=int, default=1)
parser.add_argument('--raw', action='store_true')
args = parser.parse_args()
test(args)
| 6,522 | 36.705202 | 119 | py |
RE-Net | RE-Net-master/RGCN.py | import torch
import torch.nn as nn
import dgl.function as fn
class RGCNLayer(nn.Module):
def __init__(self, in_feat, out_feat, bias=None, activation=None,
self_loop=False, dropout=0.0):
super(RGCNLayer, self).__init__()
self.bias = bias
self.activation = activation
self.self_loop = self_loop
if self.bias == True:
self.bias = nn.Parameter(torch.Tensor(out_feat))
nn.init.xavier_uniform_(self.bias,
gain=nn.init.calculate_gain('relu'))
# weight for self loop
if self.self_loop:
self.loop_weight = nn.Parameter(torch.Tensor(in_feat, out_feat))
nn.init.xavier_uniform_(self.loop_weight,
gain=nn.init.calculate_gain('relu'))
if dropout:
self.dropout = nn.Dropout(dropout)
else:
self.dropout = None
# define how propagation is done in subclass
def propagate(self, g, reverse):
raise NotImplementedError
def forward(self, g, reverse):
if self.self_loop:
loop_message = torch.mm(g.ndata['h'], self.loop_weight)
if self.dropout is not None:
loop_message = self.dropout(loop_message)
self.propagate(g, reverse)
# apply bias and activation
node_repr = g.ndata['h']
if self.bias:
node_repr = node_repr + self.bias
if self.self_loop:
node_repr = node_repr + loop_message
if self.activation:
node_repr = self.activation(node_repr)
g.ndata['h'] = node_repr
return g
class RGCNBlockLayer(RGCNLayer):
def __init__(self, in_feat, out_feat, num_rels, num_bases, bias=None,
activation=None, self_loop=False, dropout=0.0):
super(RGCNBlockLayer, self).__init__(in_feat, out_feat, bias,
activation, self_loop=self_loop,
dropout=dropout)
self.num_rels = num_rels
self.num_bases = num_bases
assert self.num_bases > 0
self.out_feat = out_feat
self.submat_in = in_feat // self.num_bases
self.submat_out = out_feat // self.num_bases
# assuming in_feat and out_feat are both divisible by num_bases
# if self.num_rels == 2:
# self.in_feat = in_feat
# self.weight = nn.Parameter(torch.Tensor(
# self.num_rels, in_feat, out_feat))
# else:
self.weight = nn.Parameter(torch.Tensor(
self.num_rels, self.num_bases * self.submat_in * self.submat_out))
nn.init.xavier_uniform_(self.weight, gain=nn.init.calculate_gain('relu'))
def msg_func(self, edges, reverse):
if reverse:
weight = self.weight.index_select(0, edges.data['type_o']).view(
-1, self.submat_in, self.submat_out)
else:
weight = self.weight.index_select(0, edges.data['type_s']).view(
-1, self.submat_in, self.submat_out)
node = edges.src['h'].view(-1, 1, self.submat_in)
msg = torch.bmm(node, weight).view(-1, self.out_feat)
return {'msg': msg}
def propagate(self, g, reverse):
g.update_all(lambda x: self.msg_func(x, reverse), fn.sum(msg='msg', out='h'), self.apply_func)
def apply_func(self, nodes):
return {'h': nodes.data['h'] * nodes.data['norm']}
| 3,494 | 35.789474 | 102 | py |
RE-Net | RE-Net-master/utils.py | import numpy as np
import os
import dgl
import torch
from collections import defaultdict
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1])
def load_quadruples(inPath, fileName, fileName2=None, fileName3=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
if fileName3 is not None:
with open(os.path.join(inPath, fileName3), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.asarray(quadrupleList), np.asarray(times)
def make_batch(a,b,c, n):
# For item i in a range that is a length of l,
for i in range(0, len(a), n):
# Create an index range for l of n items:
yield a[i:i+n], b[i:i+n], c[i:i+n]
def make_batch2(a,b,c,d,e, n):
# For item i in a range that is a length of l,
for i in range(0, len(a), n):
# Create an index range for l of n items:
yield a[i:i+n], b[i:i+n], c[i:i+n], d[i:i+n], e[i:i+n]
def get_big_graph(data, num_rels):
src, rel, dst = data.transpose()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
g = dgl.DGLGraph()
g.add_nodes(len(uniq_v))
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel_o = np.concatenate((rel + num_rels, rel))
rel_s = np.concatenate((rel, rel + num_rels))
g.add_edges(src, dst)
norm = comp_deg_norm(g)
g.ndata.update({'id': torch.from_numpy(uniq_v).long().view(-1, 1), 'norm': norm.view(-1, 1)})
g.edata['type_s'] = torch.LongTensor(rel_s)
g.edata['type_o'] = torch.LongTensor(rel_o)
g.ids = {}
idx = 0
for idd in uniq_v:
g.ids[idd] = idx
idx += 1
return g
def comp_deg_norm(g):
in_deg = g.in_degrees(range(g.number_of_nodes())).float()
in_deg[torch.nonzero(in_deg == 0).view(-1)] = 1
norm = 1.0 / in_deg
return norm
def get_data(s_hist, o_hist):
data = None
for i, s_his in enumerate(s_hist):
if len(s_his) != 0:
tem = torch.cat((torch.LongTensor([i]).repeat(len(s_his), 1), torch.LongTensor(s_his.cpu())), dim=1)
if data is None:
data = tem.cpu().numpy()
else:
data = np.concatenate((data, tem.cpu().numpy()), axis=0)
for i, o_his in enumerate(o_hist):
if len(o_his) != 0:
tem = torch.cat((torch.LongTensor(o_his[:,1].cpu()).view(-1,1), torch.LongTensor(o_his[:,0].cpu()).view(-1,1), torch.LongTensor([i]).repeat(len(o_his), 1)), dim=1)
if data is None:
data = tem.cpu().numpy()
else:
data = np.concatenate((data, tem.cpu().numpy()), axis=0)
data = np.unique(data, axis=0)
return data
def make_subgraph(g, nodes):
nodes = list(nodes)
relabeled_nodes = []
for node in nodes:
relabeled_nodes.append(g.ids[node])
sub_g = g.subgraph(relabeled_nodes)
sub_g.ndata.update({k: g.ndata[k][sub_g.ndata[dgl.NID]] for k in g.ndata if k != 'norm'})
sub_g.edata.update({k: g.edata[k][sub_g.edata[dgl.EID]] for k in g.edata})
sub_g.ids = {}
norm = comp_deg_norm(sub_g)
sub_g.ndata['norm'] = norm.view(-1,1)
node_id = sub_g.ndata['id'].view(-1).tolist()
sub_g.ids.update(zip(node_id, list(range(sub_g.number_of_nodes()))))
return sub_g
def cuda(tensor):
if tensor.device == torch.device('cpu'):
return tensor.cuda()
else:
return tensor
def move_dgl_to_cuda(g):
g.ndata.update({k: cuda(g.ndata[k]) for k in g.ndata})
g.edata.update({k: cuda(g.edata[k]) for k in g.edata})
'''
Get sorted s and r to make batch for RNN (sorted by length)
'''
def get_neighs_by_t(s_hist_sorted, s_hist_t_sorted, s_tem):
neighs_t = defaultdict(set)
for i, (hist, hist_t) in enumerate(zip(s_hist_sorted, s_hist_t_sorted)):
for neighs, t in zip(hist, hist_t):
neighs_t[t].update(neighs[:, 1].tolist())
neighs_t[t].add(s_tem[i].item())
return neighs_t
def get_g_list_id(neighs_t, graph_dict):
g_id_dict = {}
g_list = []
idx = 0
for tim in neighs_t.keys():
g_id_dict[tim] = idx
g_list.append(make_subgraph(graph_dict[tim], neighs_t[tim]))
if idx == 0:
g_list[idx].start_id = 0
else:
g_list[idx].start_id = g_list[idx - 1].start_id + g_list[idx - 1].number_of_nodes()
idx += 1
return g_list, g_id_dict
def get_node_ids_to_g_id(s_hist_sorted, s_hist_t_sorted, s_tem, g_list, g_id_dict):
node_ids_graph = []
len_s = []
for i, hist in enumerate(s_hist_sorted):
for j, neighs in enumerate(hist):
len_s.append(len(neighs))
t = s_hist_t_sorted[i][j]
graph = g_list[g_id_dict[t]]
node_ids_graph.append(graph.ids[s_tem[i].item()] + graph.start_id)
return node_ids_graph, len_s
'''
Get sorted s and r to make batch for RNN (sorted by length)
'''
def get_sorted_s_r_embed(s_hist, s, r, ent_embeds):
s_hist_len = torch.LongTensor(list(map(len, s_hist))).cuda()
s_len, s_idx = s_hist_len.sort(0, descending=True)
num_non_zero = len(torch.nonzero(s_len))
s_len_non_zero = s_len[:num_non_zero]
s_hist_sorted = []
for idx in s_idx:
s_hist_sorted.append(s_hist[idx.item()])
flat_s = []
len_s = []
s_hist_sorted = s_hist_sorted[:num_non_zero]
for hist in s_hist_sorted:
for neighs in hist:
len_s.append(len(neighs))
for neigh in neighs:
flat_s.append(neigh)
s_tem = s[s_idx]
r_tem = r[s_idx]
embeds = ent_embeds[torch.LongTensor(flat_s).cuda()]
embeds_split = torch.split(embeds, len_s)
return s_len_non_zero, s_tem, r_tem, embeds, len_s, embeds_split
def get_sorted_s_r_embed_rgcn(s_hist_data, s, r, ent_embeds, graph_dict, global_emb):
s_hist = s_hist_data[0]
s_hist_t = s_hist_data[1]
s_hist_len = torch.LongTensor(list(map(len, s_hist))).cuda()
s_len, s_idx = s_hist_len.sort(0, descending=True)
num_non_zero = len(torch.nonzero(s_len))
s_len_non_zero = s_len[:num_non_zero]
s_hist_sorted = []
s_hist_t_sorted = []
global_emb_list = []
for i, idx in enumerate(s_idx):
if i == num_non_zero:
break
s_hist_sorted.append(s_hist[idx])
s_hist_t_sorted.append(s_hist_t[idx])
for tt in s_hist_t[idx]:
global_emb_list.append(global_emb[tt].view(1, ent_embeds.shape[1]).cpu())
s_tem = s[s_idx]
r_tem = r[s_idx]
neighs_t = get_neighs_by_t(s_hist_sorted, s_hist_t_sorted, s_tem)
g_list, g_id_dict = get_g_list_id(neighs_t, graph_dict)
node_ids_graph, len_s = get_node_ids_to_g_id(s_hist_sorted, s_hist_t_sorted, s_tem, g_list, g_id_dict)
idx = torch.cuda.current_device()
g_list = [g.to(torch.device('cuda:'+str(idx))) for g in g_list]
batched_graph = dgl.batch(g_list)
batched_graph.ndata['h'] = ent_embeds[batched_graph.ndata['id']].view(-1, ent_embeds.shape[1])
move_dgl_to_cuda(batched_graph)
global_emb_list = torch.cat(global_emb_list, dim=0).cuda()
return s_len_non_zero, s_tem, r_tem, batched_graph, node_ids_graph, global_emb_list
def get_s_r_embed_rgcn(s_hist_data, s, r, ent_embeds, graph_dict, global_emb):
s_hist = s_hist_data[0]
s_hist_t = s_hist_data[1]
s_hist_len = torch.LongTensor(list(map(len, s_hist))).cuda()
s_idx = torch.arange(0,len(s_hist_len))
s_len = s_hist_len
num_non_zero = len(torch.nonzero(s_len))
s_len_non_zero = s_len[:num_non_zero]
s_hist_sorted = []
s_hist_t_sorted = []
global_emb_list = []
for i, idx in enumerate(s_idx):
if i == num_non_zero:
break
s_hist_sorted.append(s_hist[idx])
s_hist_t_sorted.append(s_hist_t[idx])
for tt in s_hist_t[idx]:
global_emb_list.append(global_emb[tt].view(1, ent_embeds.shape[1]).cpu())
s_tem = s[s_idx]
r_tem = r[s_idx]
neighs_t = get_neighs_by_t(s_hist_sorted, s_hist_t_sorted, s_tem)
g_list, g_id_dict = get_g_list_id(neighs_t, graph_dict)
node_ids_graph, len_s = get_node_ids_to_g_id(s_hist_sorted, s_hist_t_sorted, s_tem, g_list, g_id_dict)
idx = torch.cuda.current_device()
g_list = [g.to(torch.device('cuda:'+str(idx))) for g in g_list]
batched_graph = dgl.batch(g_list)
batched_graph.ndata['h'] = ent_embeds[batched_graph.ndata['id']].view(-1, ent_embeds.shape[1])
move_dgl_to_cuda(batched_graph)
global_emb_list = torch.cat(global_emb_list, dim=0).cuda()
return s_len_non_zero, s_tem, r_tem, batched_graph, node_ids_graph, global_emb_list
# assuming pred and soft_targets are both Variables with shape (batchsize, num_of_classes), each row of pred is predicted logits and each row of soft_targets is a discrete distribution.
def soft_cross_entropy(pred, soft_targets):
logsoftmax = torch.nn.LogSoftmax()
pred = pred.type('torch.DoubleTensor').cuda()
return torch.mean(torch.sum(- soft_targets * logsoftmax(pred), 1))
def get_true_distribution(train_data, num_s):
true_s = np.zeros(num_s)
true_o = np.zeros(num_s)
true_prob_s = None
true_prob_o = None
current_t = 0
for triple in train_data:
s = triple[0]
o = triple[2]
t = triple[3]
true_s[s] += 1
true_o[o] += 1
if current_t != t:
true_s = true_s / np.sum(true_s)
true_o = true_o /np.sum(true_o)
if true_prob_s is None:
true_prob_s = true_s.reshape(1, num_s)
true_prob_o = true_o.reshape(1, num_s)
else:
true_prob_s = np.concatenate((true_prob_s, true_s.reshape(1, num_s)), axis=0)
true_prob_o = np.concatenate((true_prob_o, true_o.reshape(1, num_s)), axis=0)
true_s = np.zeros(num_s)
true_o = np.zeros(num_s)
current_t = t
true_prob_s = np.concatenate((true_prob_s, true_s.reshape(1, num_s)), axis=0)
true_prob_o = np.concatenate((true_prob_o, true_o.reshape(1, num_s)), axis=0)
return true_prob_s, true_prob_o
| 11,375 | 34.003077 | 185 | py |
RE-Net | RE-Net-master/model.py | import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from Aggregator import MeanAggregator, AttnAggregator, RGCNAggregator
from utils import *
import time
class RENet(nn.Module):
def __init__(self, in_dim, h_dim, num_rels, dropout=0, model=0, seq_len=10, num_k=10):
super(RENet, self).__init__()
self.in_dim = in_dim
self.h_dim = h_dim
self.num_rels = num_rels
self.model = model
self.seq_len = seq_len
self.num_k= num_k
self.rel_embeds = nn.Parameter(torch.Tensor(2*num_rels, h_dim))
nn.init.xavier_uniform_(self.rel_embeds,
gain=nn.init.calculate_gain('relu'))
self.ent_embeds = nn.Parameter(torch.Tensor(in_dim, h_dim))
nn.init.xavier_uniform_(self.ent_embeds,
gain=nn.init.calculate_gain('relu'))
self.dropout = nn.Dropout(dropout)
self.encoder = nn.GRU(4 * h_dim, h_dim, batch_first=True)
self.encoder_r = nn.GRU(3 * h_dim, h_dim, batch_first=True)
self.preds_list_s = defaultdict(lambda: torch.zeros(self.num_k))
self.preds_ind_s = defaultdict(lambda: torch.zeros(self.num_k))
self.preds_list_o = defaultdict(lambda: torch.zeros(self.num_k))
self.preds_ind_o = defaultdict(lambda: torch.zeros(self.num_k))
self.aggregator = RGCNAggregator(h_dim, dropout, in_dim, num_rels, 100, model, seq_len)
self.linear = nn.Linear(3 * h_dim, in_dim)
self.linear_r = nn.Linear(2 * h_dim, num_rels)
self.global_emb = None
# For recording history in inference
self.s_hist_test = None
self.o_hist_test = None
self.s_hist_test_t = None
self.o_hist_test_t = None
self.s_his_cache = None
self.o_his_cache = None
self.s_his_cache_t = None
self.o_his_cache_t = None
self.graph_dict = None
self.data = None
self.global_emb = None
self.latest_time = 0
self.criterion = nn.CrossEntropyLoss()
"""
Prediction function in training.
This should be different from testing because in testing we don't use ground-truth history.
"""
def forward(self, triplets, s_hist, o_hist, graph_dict, subject=True):
if subject:
rel_embeds = self.rel_embeds[:self.num_rels]
s = triplets[:, 0]
r = triplets[:, 1]
o = triplets[:, 2]
hist = s_hist
reverse = False
else:
rel_embeds = self.rel_embeds[self.num_rels:]
o = triplets[:, 0]
r = triplets[:, 1]
s = triplets[:, 2]
hist = o_hist
reverse = True
hist_len = torch.LongTensor(list(map(len, hist[0]))).cuda()
s_len, s_idx = hist_len.sort(0, descending=True)
s_packed_input, s_packed_input_r = self.aggregator(hist, s, r, self.ent_embeds,
rel_embeds, graph_dict, self.global_emb,
reverse=reverse)
tt, s_h = self.encoder(s_packed_input)
s_h = s_h.squeeze()
s_h = torch.cat((s_h, torch.zeros(len(s) - len(s_h), self.h_dim).cuda()), dim=0)
ob_pred = self.linear(
self.dropout(torch.cat((self.ent_embeds[s[s_idx]], s_h, rel_embeds[r[s_idx]]), dim=1)))
loss_sub = self.criterion(ob_pred, o[s_idx])
###### Relations
tt, s_q = self.encoder_r(s_packed_input_r)
s_q = s_q.squeeze()
s_q = torch.cat((s_q, torch.zeros(len(s) - len(s_q), self.h_dim).cuda()), dim=0)
ob_pred_r = self.linear_r(
self.dropout(torch.cat((self.ent_embeds[s[s_idx]], s_q), dim=1)))
loss_sub_r = self.criterion(ob_pred_r, r[s_idx])
######
loss = loss_sub + 0.1*loss_sub_r
return loss
def init_history(self, triples, s_history, o_history, valid_triples, s_history_valid, o_history_valid, test_triples=None, s_history_test=None, o_history_test=None):
s_hist = s_history[0]
s_hist_t = s_history[1]
o_hist = o_history[0]
o_hist_t = o_history[1]
self.s_hist_test = [[] for _ in range(self.in_dim)]
self.o_hist_test = [[] for _ in range(self.in_dim)]
self.s_hist_test_t = [[] for _ in range(self.in_dim)]
self.o_hist_test_t = [[] for _ in range(self.in_dim)]
self.s_his_cache = [[] for _ in range(self.in_dim)]
self.o_his_cache = [[] for _ in range(self.in_dim)]
self.s_his_cache_t = [None for _ in range(self.in_dim)]
self.o_his_cache_t = [None for _ in range(self.in_dim)]
for triple, s_his, s_his_t, o_his, o_his_t in zip(triples, s_hist, s_hist_t, o_hist, o_hist_t):
s = triple[0]
o = triple[2]
last_t = triple[3]
self.s_hist_test[s] = s_his.copy()
self.s_hist_test_t[s] = s_his_t.copy()
self.o_hist_test[o] = o_his.copy()
self.o_hist_test_t[o] = o_his_t.copy()
# print(self.o_hist_test[o])
s_hist = s_history_valid[0]
s_hist_t = s_history_valid[1]
o_hist = o_history_valid[0]
o_hist_t = o_history_valid[1]
for triple, s_his, s_his_t, o_his, o_his_t in zip(valid_triples, s_hist, s_hist_t, o_hist, o_hist_t):
s = triple[0]
o = triple[2]
t = triple[3]
if len(s_his_t)!= 0 and s_his_t[-1] <= last_t:
self.s_hist_test[s] = s_his.copy()
self.s_hist_test_t[s] = s_his_t.copy()
if len(o_his_t)!= 0 and o_his_t[-1] <= last_t:
self.o_hist_test[o] = o_his.copy()
self.o_hist_test_t[o] = o_his_t.copy()
if test_triples is not None:
s_hist = s_history_test[0]
s_hist_t = s_history_test[1]
o_hist = o_history_test[0]
o_hist_t = o_history_test[1]
for triple, s_his, s_his_t, o_his, o_his_t in zip(test_triples, s_hist, s_hist_t, o_hist, o_hist_t):
s = triple[0]
o = triple[2]
t = triple[3]
if len(s_his_t) != 0 and s_his_t[-1] <= last_t:
self.s_hist_test[s] = s_his.copy()
self.s_hist_test_t[s] = s_his_t.copy()
if len(o_his_t) != 0 and o_his_t[-1] <= last_t:
self.o_hist_test[o] = o_his.copy()
self.o_hist_test_t[o] = o_his_t.copy()
def pred_r_rank2(self, s, r, subject=True):
if subject:
s_history = []
s_history_t = []
s_history.append(self.s_hist_test[s[0].item()].copy())
s_history = s_history * self.num_rels
s_history_t.append(self.s_hist_test_t[s[0].item()].copy())
s_history_t = s_history_t * self.num_rels
rel_embeds = self.rel_embeds[:self.num_rels]
reverse = False
else:
s_history = []
s_history_t = []
s_history.append(self.o_hist_test[s[0].item()].copy())
s_history = s_history * self.num_rels
s_history_t.append(self.o_hist_test_t[s[0].item()].copy())
s_history_t = s_history_t * self.num_rels
rel_embeds = self.rel_embeds[self.num_rels:]
reverse = True
if len(s_history[0]) == 0:
s_h = torch.zeros(self.num_rels, self.h_dim).cuda()
s_q = torch.zeros(self.num_rels, self.h_dim).cuda()
else:
s_packed_input, s_packed_input_r = self.aggregator.predict_batch((s_history, s_history_t), s, r, self.ent_embeds,
rel_embeds, self.graph_dict, self.global_emb,
reverse=reverse)
if s_packed_input is None:
s_h = torch.zeros(len(s), self.h_dim).cuda()
s_q = torch.zeros(len(s), self.h_dim).cuda()
else:
tt, s_h = self.encoder(s_packed_input)
s_h = s_h.squeeze()
s_h = torch.cat((s_h, torch.zeros(len(s) - len(s_h), self.h_dim).cuda()), dim=0)
###### Relations
tt, s_q = self.encoder_r(s_packed_input_r)
s_q = s_q.squeeze()
ob_pred = self.linear(torch.cat((self.ent_embeds[s], s_h, rel_embeds), dim=1))
p_o = torch.softmax(ob_pred.view(self.num_rels, self.in_dim), dim=1)
ob_pred_r = self.linear_r(torch.cat((self.ent_embeds[s[0]], s_q[0]), dim=0))
p_r = torch.softmax(ob_pred_r.view(-1), dim=0)
ob_pred_rank = p_o * p_r.view(self.num_rels,1)
return ob_pred_rank
"""
Prediction function in testing
"""
def predict(self, triplet, s_hist, o_hist, global_model):
s = triplet[0]
r = triplet[1]
o = triplet[2]
t = triplet[3].cpu()
if self.latest_time != t:
_, sub, prob_sub = global_model.predict(self.latest_time, self.graph_dict, subject=True)
m = torch.distributions.categorical.Categorical(prob_sub)
subjects = m.sample(torch.Size([self.num_k]))
prob_subjects = prob_sub[subjects]
s_done = set()
for s, prob_s in zip(subjects, prob_subjects):
if s in s_done:
continue
else:
s_done.add(s)
ss = torch.LongTensor([s]).repeat(self.num_rels)
rr = torch.arange(0,self.num_rels)
probs = prob_s * self.pred_r_rank2(ss, rr, subject=True)
probs, indices = torch.topk(probs.view(-1), self.num_k, sorted=False)
self.preds_list_s[s] = probs.view(-1)
self.preds_ind_s[s] = indices.view(-1)
s_to_id = dict()
s_num = len(self.preds_list_s.keys())
prob_tensor = torch.zeros(s_num * self.num_k)
idx = 0
for i, s in enumerate(self.preds_list_s.keys()):
s_to_id[idx] = s
prob_tensor[i * self.num_k: (i + 1) * self.num_k] = self.preds_list_s[s]
idx += 1
_, triple_candidates = torch.topk(prob_tensor, self.num_k, sorted=False)
indices = triple_candidates // self.num_k
for i,idx in enumerate(indices):
s = s_to_id[idx.item()]
num_r_num_s = self.preds_ind_s[s][triple_candidates[i] % self.num_k]
rr = num_r_num_s // self.in_dim
o_s = num_r_num_s % self.in_dim
self.s_his_cache[s] = self.update_cache(self.s_his_cache[s], rr, o_s.view(-1, 1))
self.s_his_cache_t[s] = self.latest_time.item()
_, ob, prob_ob = global_model.predict(t, self.graph_dict, subject=False)
prob_ob = torch.softmax(ob.view(-1), dim=0)
m = torch.distributions.categorical.Categorical(prob_ob)
objects = m.sample(torch.Size([self.num_k]))
prob_objects = prob_ob[objects]
o_done = set()
for o, prob_o in zip(objects, prob_objects):
if o in o_done:
continue
else:
o_done.add(o)
oo = torch.LongTensor([o]).repeat(self.num_rels)
rr = torch.arange(0, self.num_rels)
probs = prob_o * self.pred_r_rank2(oo, rr, subject=False)
probs, indices = torch.topk(probs.view(-1), self.num_k, sorted=False)
self.preds_list_o[o] = probs.view(-1)
self.preds_ind_o[o] = indices.view(-1)
o_to_id = dict()
o_num = len(self.preds_list_o.keys())
prob_tensor = torch.zeros(o_num * self.num_k)
idx = 0
for i, o in enumerate(self.preds_list_o.keys()):
o_to_id[idx] = o
prob_tensor[i * self.num_k: (i + 1) * self.num_k] = self.preds_list_o[o]
idx += 1
_, triple_candidates = torch.topk(prob_tensor, self.num_k, sorted=False)
indices = triple_candidates // self.num_k
for i, idx in enumerate(indices):
o = o_to_id[idx.item()]
num_r_num_o = self.preds_ind_o[o][triple_candidates[i] % self.num_k]
rr = num_r_num_o // self.in_dim
s_o = num_r_num_o % self.in_dim
# rr = torch.tensor(rr)
self.o_his_cache[o] = self.update_cache(self.o_his_cache[o], rr, s_o.view(-1, 1))
self.o_his_cache_t[o] = self.latest_time.item()
self.data = get_data(self.s_his_cache, self.o_his_cache)
self.graph_dict[self.latest_time.item()] = get_big_graph(self.data, self.num_rels)
global_emb_prev_t, _, _ = global_model.predict(self.latest_time, self.graph_dict, subject=True)
self.global_emb[self.latest_time.item()] = global_emb_prev_t
for ee in range(self.in_dim):
if len(self.s_his_cache[ee]) != 0:
while len(self.s_hist_test[ee]) >= self.seq_len:
self.s_hist_test[ee].pop(0)
self.s_hist_test_t[ee].pop(0)
self.s_hist_test[ee].append(self.s_his_cache[ee].cpu().numpy().copy())
self.s_hist_test_t[ee].append(self.s_his_cache_t[ee])
self.s_his_cache[ee] = []
self.s_his_cache_t[ee] = None
if len(self.o_his_cache[ee]) != 0:
while len(self.o_hist_test[ee]) >= self.seq_len:
self.o_hist_test[ee].pop(0)
self.o_hist_test_t[ee].pop(0)
self.o_hist_test[ee].append(self.o_his_cache[ee].cpu().numpy().copy())
self.o_hist_test_t[ee].append(self.o_his_cache_t[ee])
self.o_his_cache[ee] = []
self.o_his_cache_t[ee] = None
self.latest_time = t
self.data = None
self.preds_list_s = defaultdict(lambda: torch.zeros(self.num_k))
self.preds_ind_s = defaultdict(lambda: torch.zeros(self.num_k))
self.preds_list_o = defaultdict(lambda: torch.zeros(self.num_k))
self.preds_ind_o = defaultdict(lambda: torch.zeros(self.num_k))
if len(s_hist[0]) == 0 or len(self.s_hist_test[s]) == 0:
s_h = torch.zeros(self.h_dim).cuda()
else:
s_history = self.s_hist_test[s]
s_history_t = self.s_hist_test_t[s]
inp, _ = self.aggregator.predict((s_history, s_history_t), s, r, self.ent_embeds, self.rel_embeds[:self.num_rels], self.graph_dict, self.global_emb, reverse=False)
tt, s_h = self.encoder(inp.view(1, len(s_history), 4 * self.h_dim))
s_h = s_h.squeeze()
if len(o_hist[0]) == 0 or len(self.o_hist_test[o]) == 0:
o_h = torch.zeros(self.h_dim).cuda()
else:
o_history = self.o_hist_test[o]
o_history_t = self.o_hist_test_t[o]
inp, _ = self.aggregator.predict((o_history, o_history_t), o, r, self.ent_embeds, self.rel_embeds[self.num_rels:], self.graph_dict, self.global_emb, reverse=True)
tt, o_h = self.encoder(inp.view(1, len(o_history), 4 * self.h_dim))
o_h = o_h.squeeze()
ob_pred = self.linear(torch.cat((self.ent_embeds[s], s_h, self.rel_embeds[:self.num_rels][r]), dim=0))
sub_pred = self.linear(torch.cat((self.ent_embeds[o], o_h, self.rel_embeds[self.num_rels:][r]), dim=0))
loss_sub = self.criterion(ob_pred.view(1, -1), o.view(-1))
loss_ob = self.criterion(sub_pred.view(1, -1), s.view(-1))
loss = loss_sub + loss_ob
return loss, sub_pred, ob_pred
def evaluate(self, triplet, s_hist, o_hist, global_model):
s = triplet[0]
r = triplet[1]
o = triplet[2]
loss, sub_pred, ob_pred = self.predict(triplet, s_hist, o_hist, global_model)
o_label = o
s_label = s
ob_pred_comp1 = (ob_pred > ob_pred[o_label]).data.cpu().numpy()
ob_pred_comp2 = (ob_pred == ob_pred[o_label]).data.cpu().numpy()
rank_ob = np.sum(ob_pred_comp1) + ((np.sum(ob_pred_comp2) - 1.0) / 2) + 1
sub_pred_comp1 = (sub_pred > sub_pred[s_label]).data.cpu().numpy()
sub_pred_comp2 = (sub_pred == sub_pred[s_label]).data.cpu().numpy()
rank_sub = np.sum(sub_pred_comp1) + ((np.sum(sub_pred_comp2) - 1.0) / 2) + 1
return np.array([rank_sub, rank_ob]), loss
def evaluate_filter(self, triplet, s_hist, o_hist, global_model, all_triplets):
s = triplet[0]
r = triplet[1]
o = triplet[2]
loss, sub_pred, ob_pred = self.predict(triplet, s_hist, o_hist, global_model)
o_label = o
s_label = s
sub_pred = F.sigmoid(sub_pred)
ob_pred = F.sigmoid(ob_pred)
ground = ob_pred[o].clone()
s_id = torch.nonzero(all_triplets[:, 0] == s).view(-1)
idx = torch.nonzero(all_triplets[s_id, 1] == r).view(-1)
idx = s_id[idx]
idx = all_triplets[idx, 2]
ob_pred[idx] = 0
ob_pred[o_label] = ground
ob_pred_comp1 = (ob_pred > ground).data.cpu().numpy()
ob_pred_comp2 = (ob_pred == ground).data.cpu().numpy()
rank_ob = np.sum(ob_pred_comp1) + ((np.sum(ob_pred_comp2) - 1.0) / 2) + 1
ground = sub_pred[s].clone()
o_id = torch.nonzero(all_triplets[:, 2] == o).view(-1)
idx = torch.nonzero(all_triplets[o_id, 1] == r).view(-1)
idx = o_id[idx]
idx = all_triplets[idx, 0]
sub_pred[idx] = 0
sub_pred[s_label] = ground
sub_pred_comp1 = (sub_pred > ground).data.cpu().numpy()
sub_pred_comp2 = (sub_pred == ground).data.cpu().numpy()
rank_sub = np.sum(sub_pred_comp1) + ((np.sum(sub_pred_comp2) - 1.0) / 2) + 1
return np.array([rank_sub, rank_ob]), loss
def update_cache(self, s_his_cache, r, o_candidate):
o_candidate = o_candidate % self.in_dim
if len(s_his_cache) == 0:
s_his_cache = torch.cat((r.view(-1,1),
o_candidate.view(-1, 1)),
dim=1)
else:
# print(r)
temp = s_his_cache[torch.nonzero(s_his_cache[:, 0] == r).view(-1)]
if len(temp) == 0:
forward = torch.cat((r.repeat(len(o_candidate), 1), o_candidate.view(-1, 1)), dim=1)
s_his_cache = torch.cat((s_his_cache, forward), dim=0)
else:
ent_list = temp[:, 1]
tem = []
for i in range(len(o_candidate)):
if o_candidate[i] not in ent_list:
tem.append(i)
if len(tem) != 0:
forward = torch.cat((r.repeat(len(tem), 1), o_candidate[torch.LongTensor(tem)].view(-1, 1)), dim=1)
s_his_cache = torch.cat((s_his_cache, forward), dim=0)
return s_his_cache
| 19,170 | 41.792411 | 175 | py |
RE-Net | RE-Net-master/Aggregator.py | import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from utils import *
from RGCN import RGCNBlockLayer as RGCNLayer
class RGCNAggregator_global(nn.Module):
def __init__(self, h_dim, dropout, num_nodes, num_rels, num_bases, model, seq_len=10, maxpool=1):
super(RGCNAggregator_global, self).__init__()
self.h_dim = h_dim
self.dropout = nn.Dropout(dropout)
self.seq_len = seq_len
self.num_rels = num_rels
self.num_nodes = num_nodes
self.model = model
self.maxpool = maxpool
self.rgcn1 = RGCNLayer(self.h_dim, self.h_dim, 2 * self.num_rels, num_bases,
activation=F.relu, self_loop=True, dropout=dropout)
self.rgcn2 = RGCNLayer(self.h_dim, self.h_dim, 2 * self.num_rels, num_bases,
activation=None, self_loop=True, dropout=dropout)
def forward(self, t_list, ent_embeds, graph_dict, reverse):
times = list(graph_dict.keys())
time_unit = times[1] - times[0]
time_list = []
len_non_zero = []
num_non_zero = len(torch.nonzero(t_list))
t_list = t_list[:num_non_zero]
for tim in t_list:
length = int(tim // time_unit)
if self.seq_len <= length:
time_list.append(torch.LongTensor(times[length - self.seq_len:length]))
len_non_zero.append(self.seq_len)
else:
time_list.append(torch.LongTensor(times[:length]))
len_non_zero.append(length)
unique_t = torch.unique(torch.cat(time_list))
time_to_idx = dict()
g_list = []
idx = 0
for tim in unique_t:
time_to_idx[tim.item()] = idx
idx += 1
g_list.append(graph_dict[tim.item()])
batched_graph = dgl.batch(g_list)
batched_graph.ndata['h'] = ent_embeds[batched_graph.ndata['id']].view(-1, ent_embeds.shape[1])
move_dgl_to_cuda(batched_graph)
self.rgcn1(batched_graph, reverse)
self.rgcn2(batched_graph, reverse)
if self.maxpool == 1:
global_info = dgl.max_nodes(batched_graph, 'h')
else:
global_info = dgl.mean_nodes(batched_graph, 'h')
batched_graph.ndata.pop('h')
embed_seq_tensor = torch.zeros(len(len_non_zero), self.seq_len, self.h_dim).cuda()
for i, times in enumerate(time_list):
for j, t in enumerate(times):
embed_seq_tensor[i, j, :] = global_info[time_to_idx[t.item()]]
embed_seq_tensor = self.dropout(embed_seq_tensor)
packed_input = torch.nn.utils.rnn.pack_padded_sequence(embed_seq_tensor,
len_non_zero,
batch_first=True)
return packed_input
def predict(self, t, ent_embeds, graph_dict, reverse):
times = list(graph_dict.keys())
id = 0
for tt in times:
if tt >= t:
break
id += 1
if self.seq_len <= id:
timess = torch.LongTensor(times[id - self.seq_len:id])
else:
timess = torch.LongTensor(times[:id])
g_list = []
for tim in timess:
move_dgl_to_cuda(graph_dict[tim.item()])
g_list.append(graph_dict[tim.item()])
batched_graph = dgl.batch(g_list)
batched_graph.ndata['h'] = ent_embeds[batched_graph.ndata['id']].view(-1, ent_embeds.shape[1])
move_dgl_to_cuda(batched_graph)
self.rgcn1(batched_graph, reverse)
self.rgcn2(batched_graph, reverse)
if self.maxpool == 1:
global_info = dgl.max_nodes(batched_graph, 'h')
else:
global_info = dgl.mean_nodes(batched_graph, 'h')
batched_graph.ndata.pop('h')
return global_info
class RGCNAggregator(nn.Module):
def __init__(self, h_dim, dropout, num_nodes, num_rels, num_bases, model, seq_len=10):
super(RGCNAggregator, self).__init__()
self.h_dim = h_dim
self.dropout = nn.Dropout(dropout)
self.seq_len = seq_len
self.num_rels = num_rels
self.num_nodes = num_nodes
self.model = model
self.rgcn1 = RGCNLayer(self.h_dim, self.h_dim, 2*self.num_rels, num_bases,
activation=F.relu, self_loop=True, dropout=dropout)
self.rgcn2 = RGCNLayer(self.h_dim, self.h_dim, 2*self.num_rels, num_bases,
activation=None, self_loop=True, dropout=dropout)
def forward(self, s_hist, s, r, ent_embeds, rel_embeds, graph_dict, global_emb, reverse):
length = 0
for his in s_hist[0]:
length += len(his)
if length == 0:
s_packed_input = None
else:
s_len_non_zero, s_tem, r_tem, g, node_ids_graph, global_emb_list = get_sorted_s_r_embed_rgcn(s_hist, s, r, ent_embeds, graph_dict, global_emb)
if g is None:
s_packed_input = None
else:
self.rgcn1(g, reverse)
self.rgcn2(g, reverse)
embeds_mean = g.ndata.pop('h')
embeds_mean = embeds_mean[torch.LongTensor(node_ids_graph)]
embeds_split = torch.split(embeds_mean, s_len_non_zero.tolist())
global_emb_list_split = torch.split(global_emb_list, s_len_non_zero.tolist())
s_embed_seq_tensor = torch.zeros(len(s_len_non_zero), self.seq_len, 4 * self.h_dim).cuda()
s_embed_seq_tensor_r = torch.zeros(len(s_len_non_zero), self.seq_len, 3 * self.h_dim).cuda()
# Slow!!!
for i, embeds in enumerate(embeds_split):
s_embed_seq_tensor[i, torch.arange(len(embeds)), :] = torch.cat(
(embeds, ent_embeds[s_tem[i]].repeat(len(embeds), 1),
rel_embeds[r_tem[i]].repeat(len(embeds), 1), global_emb_list_split[i]), dim=1)
s_embed_seq_tensor_r[i, torch.arange(len(embeds)), :] = torch.cat(
(embeds, ent_embeds[s_tem[i]].repeat(len(embeds), 1), global_emb_list_split[i]), dim=1)
s_embed_seq_tensor = self.dropout(s_embed_seq_tensor)
s_embed_seq_tensor_r = self.dropout(s_embed_seq_tensor_r)
s_packed_input = torch.nn.utils.rnn.pack_padded_sequence(s_embed_seq_tensor,
s_len_non_zero,
batch_first=True)
s_packed_input_r = torch.nn.utils.rnn.pack_padded_sequence(s_embed_seq_tensor_r,
s_len_non_zero,
batch_first=True)
return s_packed_input, s_packed_input_r
def predict_batch(self, s_hist, s, r, ent_embeds, rel_embeds, graph_dict, global_emb, reverse):
length = 0
for his in s_hist[0]:
length += len(his)
if length == 0:
s_packed_input = None
s_packed_input_r = None
else:
s_len_non_zero, s_tem, r_tem, g, node_ids_graph, global_emb_list = get_s_r_embed_rgcn(s_hist, s, r, ent_embeds, graph_dict, global_emb)
if g is None:
s_packed_input = None
else:
self.rgcn1(g, reverse)
self.rgcn2(g, reverse)
embeds_mean = g.ndata.pop('h')
embeds_mean = embeds_mean[torch.LongTensor(node_ids_graph)]
embeds_split = torch.split(embeds_mean, s_len_non_zero.tolist())
global_emb_list_split = torch.split(global_emb_list, s_len_non_zero.tolist())
s_embed_seq_tensor = torch.zeros(len(s_len_non_zero), self.seq_len, 4 * self.h_dim).cuda()
s_embed_seq_tensor_r = torch.zeros(len(s_len_non_zero), self.seq_len, 3 * self.h_dim).cuda()
# Slow!!!
for i, embeds in enumerate(embeds_split):
s_embed_seq_tensor[i, torch.arange(len(embeds)), :] = torch.cat(
(embeds, ent_embeds[s_tem[i]].repeat(len(embeds), 1),
rel_embeds[r_tem[i]].repeat(len(embeds), 1), global_emb_list_split[i]), dim=1)
s_embed_seq_tensor_r[i, torch.arange(len(embeds)), :] = torch.cat(
(embeds, ent_embeds[s_tem[i]].repeat(len(embeds), 1), global_emb_list_split[i]), dim=1)
s_embed_seq_tensor = self.dropout(s_embed_seq_tensor)
s_embed_seq_tensor_r = self.dropout(s_embed_seq_tensor_r)
s_packed_input = torch.nn.utils.rnn.pack_padded_sequence(s_embed_seq_tensor,
s_len_non_zero,
batch_first=True)
s_packed_input_r = torch.nn.utils.rnn.pack_padded_sequence(s_embed_seq_tensor_r,
s_len_non_zero,
batch_first=True)
return s_packed_input, s_packed_input_r
def predict(self, s_history, s, r, ent_embeds, rel_embeds, graph_dict, global_emb, reverse):
s_hist = s_history[0]
s_hist_t = s_history[1]
s_len_non_zero, s_tem, r_tem, g, node_ids_graph, global_emb_list = get_s_r_embed_rgcn(([s_hist], [s_hist_t]), s.view(-1,1), r.view(-1,1), ent_embeds,
graph_dict, global_emb)
self.rgcn1(g, reverse)
self.rgcn2(g, reverse)
embeds_mean = g.ndata.pop('h')
embeds = embeds_mean[torch.LongTensor(node_ids_graph)]
inp = torch.zeros(len(s_hist), 4 * self.h_dim).cuda()
inp[torch.arange(len(embeds)), :] = torch.cat(
(embeds, ent_embeds[s].repeat(len(embeds), 1), rel_embeds[r].repeat(len(embeds), 1), global_emb_list), dim=1)
inp_r = torch.zeros(len(s_hist), 3 * self.h_dim).cuda()
inp_r[torch.arange(len(embeds)), :] = torch.cat((embeds, ent_embeds[s].repeat(len(embeds), 1), global_emb_list), dim=1)
return inp, inp_r
class MeanAggregator(nn.Module):
def __init__(self, h_dim, dropout, seq_len=10, gcn=False):
super(MeanAggregator, self).__init__()
self.h_dim = h_dim
self.dropout = nn.Dropout(dropout)
self.seq_len = seq_len
self.gcn = gcn
if gcn:
self.gcn_layer = nn.Linear(h_dim, h_dim)
def forward(self, s_hist, s, r, ent_embeds, rel_embeds):
s_len_non_zero, s_tem, r_tem, embeds_stack, len_s, embeds_split = get_sorted_s_r_embed(s_hist, s, r, ent_embeds)
# To get mean vector at each time
curr = 0
rows = []
cols = []
for i, leng in enumerate(len_s):
rows.extend([i] * leng)
cols.extend(list(range(curr,curr+leng)))
curr += leng
rows = torch.LongTensor(rows)
cols = torch.LongTensor(cols)
idxes = torch.stack([rows,cols], dim=0)
mask_tensor = torch.sparse.FloatTensor(idxes, torch.ones(len(rows)))
mask_tensor = mask_tensor.cuda()
embeds_sum = torch.sparse.mm(mask_tensor, embeds_stack)
embeds_mean = embeds_sum /torch.Tensor(len_s).cuda().view(-1,1)
if self.gcn:
embeds_mean = self.gcn_layer(embeds_mean)
embeds_mean = F.relu(embeds_mean)
embeds_split = torch.split(embeds_mean, s_len_non_zero.tolist())
s_embed_seq_tensor = torch.zeros(len(s_len_non_zero), self.seq_len, 2 * self.h_dim).cuda()
# Slow!!!
for i, embeds in enumerate(embeds_split):
s_embed_seq_tensor[i, torch.arange(len(embeds)), :] = torch.cat(
(embeds, ent_embeds[s_tem[i]].repeat(len(embeds), 1)), dim=1)
s_embed_seq_tensor = self.dropout(s_embed_seq_tensor)
s_packed_input = torch.nn.utils.rnn.pack_padded_sequence(s_embed_seq_tensor,
s_len_non_zero,
batch_first=True)
return s_packed_input
def predict(self, s_history, s, r, ent_embeds, rel_embeds):
inp = torch.zeros(len(s_history), 2 * self.h_dim).cuda()
for i, s_o in enumerate(s_history):
tem = torch.mean(ent_embeds[s_o], dim=0)
if self.gcn:
tem = F.relu(self.gcn_layer(tem))
inp[i] = torch.cat(
(tem, ent_embeds[s]), dim=0)
return inp
class AttnAggregator(nn.Module):
def __init__(self, h_dim, dropout, seq_len=10):
super(AttnAggregator, self).__init__()
self.h_dim = h_dim
self.dropout = nn.Dropout(dropout)
self.seq_len = seq_len
self.attn_s = nn.Linear(3 * h_dim, h_dim)
self.v_s = nn.Parameter(torch.Tensor(h_dim, 1))
nn.init.xavier_uniform_(self.v_s, gain=nn.init.calculate_gain('relu'))
def forward(self, s_hist, s, r, ent_embeds, rel_embeds):
s_len_non_zero, s_tem, r_tem, embeds_stack, len_s, embeds_split = get_sorted_s_r_embed(s_hist, s, r, ent_embeds)
s_embed_seq_tensor = torch.zeros(len(s_len_non_zero), self.seq_len, 3 * self.h_dim).cuda()
curr = 0
for i, s_l in enumerate(s_len_non_zero):
# Make a batch, get first elements from all sequences, and get second elements from all sequences
em = embeds_split[curr:curr + s_l]
len_s = list(map(len, em))
curr += s_l
em_cat = torch.cat(em, dim=0)
ss = ent_embeds[s_tem[i]]
rr = rel_embeds[r_tem[i]]
ss = ss.repeat(len(em_cat), 1)
rr = rr.repeat(len(em_cat), 1)
em_s_r = torch.cat((em_cat, ss, rr), dim=1)
weights = F.tanh(self.attn_s(em_s_r)) @ self.v_s
weights_split = torch.split(weights, len_s)
weights_cat = list(map(lambda x: F.softmax(x, dim=0), weights_split))
embeds = torch.stack(list(map(lambda x, y: torch.sum(x * y, dim=0), weights_cat, em)))
s_embed_seq_tensor[i, torch.arange(len(embeds)), :] = torch.cat(
(embeds, ent_embeds[s_tem[i]].repeat(len(embeds), 1),
rel_embeds[r_tem[i]].repeat(len(embeds), 1)), dim=1)
s_embed_seq_tensor = self.dropout(s_embed_seq_tensor)
s_packed_input = torch.nn.utils.rnn.pack_padded_sequence(s_embed_seq_tensor,
s_len_non_zero,
batch_first=True)
return s_packed_input
def predict(self, s_history, s, r, ent_embeds, rel_embeds):
inp = torch.zeros(len(s_history), 3 * self.h_dim).cuda()
for i, s_s in enumerate(s_history):
emb_s = ent_embeds[s_s]
ss = ent_embeds[s].repeat(len(emb_s), 1)
rr = rel_embeds[r].repeat(len(emb_s), 1)
emb_s_r = torch.cat((emb_s, ss, rr), dim=1)
weights = F.softmax(F.tanh(self.attn_s(emb_s_r)) @ self.v_s, dim=0)
inp[i] = torch.cat((torch.sum(weights * emb_s, dim=0), ent_embeds[s], rel_embeds[r]), dim=0)
return inp
| 15,591 | 41.835165 | 157 | py |
RE-Net | RE-Net-master/global_model.py | import torch.nn as nn
import numpy as np
import torch
import torch.nn.functional as F
from Aggregator import RGCNAggregator_global
from utils import *
import time
class RENet_global(nn.Module):
def __init__(self, in_dim, h_dim, num_rels, dropout=0, model=0, seq_len=10, num_k=10, maxpool=1):
super(RENet_global, self).__init__()
self.in_dim = in_dim
self.h_dim = h_dim
self.num_rels = num_rels
self.model = model
self.seq_len = seq_len
self.num_k = num_k
self.ent_embeds = nn.Parameter(torch.Tensor(in_dim, h_dim))
nn.init.xavier_uniform_(self.ent_embeds,
gain=nn.init.calculate_gain('relu'))
self.dropout = nn.Dropout(dropout)
self.encoder_global = nn.GRU(h_dim, h_dim, batch_first=True)
self.aggregator = RGCNAggregator_global(h_dim, dropout, in_dim, num_rels, 100, model, seq_len, maxpool)
self.linear_s = nn.Linear(h_dim, in_dim)
self.linear_o = nn.Linear(h_dim, in_dim)
self.global_emb = None
def forward(self, t_list, true_prob_s, true_prob_o, graph_dict, subject=True):
if subject:
reverse = False
linear = self.linear_s
true_prob = true_prob_o
else:
reverse = True
linear = self.linear_o
true_prob = true_prob_s
sorted_t, idx = t_list.sort(0, descending=True)
packed_input = self.aggregator(sorted_t, self.ent_embeds, graph_dict, reverse=reverse)
tt, s_q = self.encoder_global(packed_input)
s_q = s_q.squeeze()
s_q = torch.cat((s_q, torch.zeros(len(t_list) - len(s_q), self.h_dim).cuda()), dim=0)
pred = linear(s_q)
loss = soft_cross_entropy(pred, true_prob[idx])
return loss
def get_global_emb(self, t_list, graph_dict):
global_emb = dict()
times = list(graph_dict.keys())
time_unit = times[1] - times[0]
prev_t = 0
for t in t_list:
if t == 0:
continue
emb, _, _ = self.predict(t, graph_dict)
global_emb[prev_t] = emb.detach_()
prev_t = t
global_emb[t_list[-1]], _,_ = self.predict(t_list[-1] + int(time_unit), graph_dict)
global_emb[t_list[-1]].detach_()
return global_emb
"""
Prediction function in testing
"""
def predict(self, t, graph_dict, subject=True): # Predict s at time t, so <= t-1 graphs are used.
if subject:
linear = self.linear_s
reverse = False
else:
linear = self.linear_o
reverse = True
rnn_inp = self.aggregator.predict(t, self.ent_embeds, graph_dict, reverse=reverse)
tt, s_q = self.encoder_global(rnn_inp.view(1, -1, self.h_dim))
sub = linear(s_q)
prob_sub = torch.softmax(sub.view(-1), dim=0)
return s_q, sub, prob_sub
def update_global_emb(self, t, graph_dict):
pass
| 3,002 | 29.333333 | 111 | py |
RE-Net | RE-Net-master/train.py | import argparse
import numpy as np
import time
import torch
import utils
import os
from model import RENet
from global_model import RENet_global
from sklearn.utils import shuffle
import pickle
def train(args):
# load data
num_nodes, num_rels = utils.get_total_number('./data/' + args.dataset, 'stat.txt')
if args.dataset == 'icews_know':
train_data, train_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt')
valid_data, valid_times = utils.load_quadruples('./data/' + args.dataset, 'test.txt')
test_data, test_times = utils.load_quadruples('./data/' + args.dataset, 'test.txt')
total_data, total_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt', 'test.txt')
else:
train_data, train_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt')
valid_data, valid_times = utils.load_quadruples('./data/' + args.dataset, 'valid.txt')
test_data, test_times = utils.load_quadruples('./data/' + args.dataset, 'test.txt')
total_data, total_times = utils.load_quadruples('./data/' + args.dataset, 'train.txt', 'valid.txt','test.txt')
# check cuda
use_cuda = args.gpu >= 0 and torch.cuda.is_available()
seed = 999
np.random.seed(seed)
torch.manual_seed(seed)
if use_cuda:
torch.cuda.set_device(args.gpu)
os.makedirs('models', exist_ok=True)
os.makedirs('models/'+ args.dataset, exist_ok=True)
model_state_file = 'models/' + args.dataset + '/rgcn.pth'
model_graph_file = 'models/' + args.dataset + '/rgcn_graph.pth'
model_state_global_file2 = 'models/' + args.dataset + '/max' + str(args.maxpool) + 'rgcn_global2.pth'
model_state_global_file = 'models/' + args.dataset + '/max' + str(args.maxpool) + 'rgcn_global.pth'
model_state_file_backup = 'models/' + args.dataset + '/rgcn_backup.pth'
print("start training...")
model = RENet(num_nodes,
args.n_hidden,
num_rels,
dropout=args.dropout,
model=args.model,
seq_len=args.seq_len,
num_k=args.num_k)
global_model = RENet_global(num_nodes,
args.n_hidden,
num_rels,
dropout=args.dropout,
model=args.model,
seq_len=args.seq_len,
num_k=args.num_k, maxpool=args.maxpool)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=0.00001)
checkpoint_global = torch.load(model_state_global_file, map_location=lambda storage, loc: storage)
global_model.load_state_dict(checkpoint_global['state_dict'])
global_emb = checkpoint_global['global_emb']
model.global_emb = global_emb
if use_cuda:
model.cuda()
global_model.cuda()
train_sub = '/train_history_sub.txt'
train_ob = '/train_history_ob.txt'
if args.dataset == 'icews_know':
valid_sub = '/test_history_sub.txt'
valid_ob = '/test_history_ob.txt'
else:
valid_sub = '/dev_history_sub.txt'
valid_ob = '/dev_history_ob.txt'
with open('./data/' + args.dataset+'/train_graphs.txt', 'rb') as f:
graph_dict = pickle.load(f)
model.graph_dict = graph_dict
with open('data/' + args.dataset+'/test_history_sub.txt', 'rb') as f:
s_history_test_data = pickle.load(f)
with open('data/' + args.dataset+'/test_history_ob.txt', 'rb') as f:
o_history_test_data = pickle.load(f)
s_history_test = s_history_test_data[0]
s_history_test_t = s_history_test_data[1]
o_history_test = o_history_test_data[0]
o_history_test_t = o_history_test_data[1]
with open('./data/' + args.dataset+train_sub, 'rb') as f:
s_history_data = pickle.load(f)
with open('./data/' + args.dataset+train_ob, 'rb') as f:
o_history_data = pickle.load(f)
with open('./data/' + args.dataset+valid_sub, 'rb') as f:
s_history_valid_data = pickle.load(f)
with open('./data/' + args.dataset+valid_ob, 'rb') as f:
o_history_valid_data = pickle.load(f)
valid_data = torch.from_numpy(valid_data)
s_history = s_history_data[0]
s_history_t = s_history_data[1]
o_history = o_history_data[0]
o_history_t = o_history_data[1]
s_history_valid = s_history_valid_data[0]
s_history_valid_t = s_history_valid_data[1]
o_history_valid = o_history_valid_data[0]
o_history_valid_t = o_history_valid_data[1]
total_data = torch.from_numpy(total_data)
if use_cuda:
total_data = total_data.cuda()
epoch = 0
best_mrr = 0
while True:
model.train()
if epoch == args.max_epochs:
break
epoch += 1
loss_epoch = 0
t0 = time.time()
train_data_shuffle, s_history_shuffle, s_history_t_shuffle, o_history_shuffle, o_history_t_shuffle = shuffle(train_data, s_history, s_history_t,
o_history, o_history_t)
for batch_data, s_hist, s_hist_t, o_hist, o_hist_t in utils.make_batch2(train_data_shuffle, s_history_shuffle, s_history_t_shuffle,
o_history_shuffle, o_history_t_shuffle, args.batch_size):
# break
batch_data = torch.from_numpy(batch_data).long()
if use_cuda:
batch_data = batch_data.cuda()
loss_s = model(batch_data, (s_hist, s_hist_t), (o_hist, o_hist_t), graph_dict, subject=True)
loss_o = model(batch_data, (s_hist, s_hist_t), (o_hist, o_hist_t), graph_dict, subject=False)
loss = loss_s + loss_o
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_norm) # clip gradients
optimizer.step()
optimizer.zero_grad()
loss_epoch += loss.item()
t3 = time.time()
print("Epoch {:04d} | Loss {:.4f} | time {:.4f}".
format(epoch, loss_epoch/(len(train_data)/args.batch_size), t3 - t0))
if epoch % args.valid_every == 0 and epoch >= int(args.max_epochs/2):
model.eval()
global_model.eval()
total_loss = 0
total_ranks = np.array([])
model.init_history(train_data, (s_history, s_history_t), (o_history, o_history_t), valid_data,
(s_history_valid, s_history_valid_t), (o_history_valid, o_history_valid_t), test_data,
(s_history_test, s_history_test_t), (o_history_test, o_history_test_t))
model.latest_time = valid_data[0][3]
for i in range(len(valid_data)):
batch_data = valid_data[i]
s_hist = s_history_valid[i]
o_hist = o_history_valid[i]
s_hist_t = s_history_valid_t[i]
o_hist_t = o_history_valid_t[i]
if use_cuda:
batch_data = batch_data.cuda()
with torch.no_grad():
ranks, loss = model.evaluate_filter(batch_data, (s_hist, s_hist_t), (o_hist, o_hist_t), global_model, total_data)
total_ranks = np.concatenate((total_ranks, ranks))
total_loss += loss.item()
mrr = np.mean(1.0 / total_ranks)
mr = np.mean(total_ranks)
hits = []
for hit in [1, 3, 10]:
avg_count = np.mean((total_ranks <= hit))
hits.append(avg_count)
print("valid Hits (filtered) @ {}: {:.6f}".format(hit, avg_count))
print("valid MRR (filtered): {:.6f}".format(mrr))
print("valid MR (filtered): {:.6f}".format(mr))
print("valid Loss: {:.6f}".format(total_loss / (len(valid_data))))
if mrr > best_mrr:
best_mrr = mrr
torch.save({'state_dict': model.state_dict(), 'epoch': epoch,
's_hist': model.s_hist_test, 's_cache': model.s_his_cache,
'o_hist': model.o_hist_test, 'o_cache': model.o_his_cache,
's_hist_t': model.s_hist_test_t, 's_cache_t': model.s_his_cache_t,
'o_hist_t': model.o_hist_test_t, 'o_cache_t': model.o_his_cache_t,
'latest_time': model.latest_time, 'global_emb': model.global_emb},
model_state_file)
torch.save({'state_dict': global_model.state_dict(), 'epoch': epoch,
's_hist': model.s_hist_test, 's_cache': model.s_his_cache,
'o_hist': model.o_hist_test, 'o_cache': model.o_his_cache,
's_hist_t': model.s_hist_test_t, 's_cache_t': model.s_his_cache_t,
'o_hist_t': model.o_hist_test_t, 'o_cache_t': model.o_his_cache_t,
'latest_time': model.latest_time, 'global_emb': global_model.global_emb},
model_state_global_file2)
with open(model_graph_file, 'wb') as fp:
pickle.dump(model.graph_dict, fp)
print("training done")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RENet')
parser.add_argument("--dropout", type=float, default=0.5,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=200,
help="number of hidden units")
parser.add_argument("--gpu", type=int, default=0,
help="gpu")
parser.add_argument("--lr", type=float, default=1e-3,
help="learning rate")
parser.add_argument("-d", "--dataset", type=str, default='ICEWS18',
help="dataset to use")
parser.add_argument("--grad-norm", type=float, default=1.0,
help="norm to clip gradient to")
parser.add_argument("--max-epochs", type=int, default=20
,
help="maximum epochs")
parser.add_argument("--model", type=int, default=0)
parser.add_argument("--seq-len", type=int, default=10)
parser.add_argument("--num-k", type=int, default=1000,
help="cuttoff position")
parser.add_argument("--batch-size", type=int, default=1024)
parser.add_argument("--rnn-layers", type=int, default=1)
parser.add_argument("--maxpool", type=int, default=1)
parser.add_argument('--backup', action='store_true')
parser.add_argument("--valid-every", type=int, default=1)
parser.add_argument('--valid', action='store_true')
parser.add_argument('--raw', action='store_true')
args = parser.parse_args()
print(args)
train(args)
| 10,762 | 43.292181 | 152 | py |
RE-Net | RE-Net-master/baselines/TATransE.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-24 01:45:03
# @Author : jimmy (jimmywangheng@qq.com)
# @Link : http://sdcs.sysu.edu.cn
# @Version : $Id$
import os
import torch
torch.multiprocessing.set_start_method("spawn")
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import time
import datetime
import random
from utils import *
from data import *
from evaluation_TATransE import *
import loss as loss
import model as model
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
"""
The meaning of parameters:
self.dataset: Which dataset is used to train the model? Such as 'FB15k', 'WN18', etc.
self.learning_rate: Initial learning rate (lr) of the model.
self.early_stopping_round: How many times will lr decrease? If set to 0, it remains constant.
self.L1_flag: If set to True, use L1 distance as dissimilarity; else, use L2.
self.embedding_size: The embedding size of entities and relations.
self.num_batches: How many batches to train in one epoch?
self.train_times: The maximum number of epochs for training.
self.margin: The margin set for MarginLoss.
self.filter: Whether to check a generated negative sample is false negative.
self.momentum: The momentum of the optimizer.
self.optimizer: Which optimizer to use? Such as SGD, Adam, etc.
self.loss_function: Which loss function to use? Typically, we use margin loss.
self.entity_total: The number of different entities.
self.relation_total: The number of different relations.
self.batch_size: How many instances is contained in one batch?
"""
class Config(object):
def __init__(self):
self.dropout = 0
self.dataset = None
self.learning_rate = 0.001
self.early_stopping_round = 0
self.L1_flag = True
self.embedding_size = 100
# self.num_batches = 100
self.train_times = 1000
self.margin = 1.0
self.filter = True
self.momentum = 0.9
self.optimizer = optim.Adam
self.loss_function = loss.marginLoss
self.loss_type = 0
self.entity_total = 0
self.relation_total = 0
self.batch_size = 0
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser()
"""
The meaning of some parameters:
seed: Fix the random seed. Except for 0, which means no setting of random seed.
port: The port number used by hyperboard,
which is a demo showing training curves in real time.
You can refer to https://github.com/WarBean/hyperboard to know more.
num_processes: Number of processes used to evaluate the result.
"""
argparser.add_argument('-dr', '--dropout', type=float, default=0)
argparser.add_argument('-d', '--dataset', type=str)
argparser.add_argument('-l', '--learning_rate', type=float, default=0.001)
argparser.add_argument('-es', '--early_stopping_round', type=int, default=10)
argparser.add_argument('-L', '--L1_flag', type=int, default=1)
argparser.add_argument('-em', '--embedding_size', type=int, default=100)
# argparser.add_argument('-nb', '--num_batches', type=int, default=100)
argparser.add_argument('-bs', '--batch_size', type=int, default=512)
argparser.add_argument('-n', '--train_times', type=int, default=1000)
argparser.add_argument('-m', '--margin', type=float, default=1.0)
argparser.add_argument('-f', '--filter', type=int, default=1)
argparser.add_argument('-mo', '--momentum', type=float, default=0.9)
argparser.add_argument('-s', '--seed', type=int, default=0)
argparser.add_argument('-op', '--optimizer', type=int, default=1)
argparser.add_argument('-lo', '--loss_type', type=int, default=0)
argparser.add_argument('-p', '--port', type=int, default=5000)
argparser.add_argument('-np', '--num_processes', type=int, default=4)
argparser.add_argument('-test', '--test', type=int, default=0)
args = argparser.parse_args()
if args.seed != 0:
torch.manual_seed(args.seed)
trainTotal, trainList, trainDict, trainTimes = load_quadruples('./data/' + args.dataset + '_TA', 'train2id.txt', 'train_tem.npy')
validTotal, validList, validDict, validTimes = load_quadruples('./data/' + args.dataset + '_TA', 'valid2id.txt', 'valid_tem.npy')
quadrupleTotal, quadrupleList, tripleDict, _ = load_quadruples('./data/' + args.dataset + '_TA', 'train2id.txt', 'train_tem.npy', 'valid2id.txt', 'valid_tem.npy', 'test2id.txt', 'test_tem.npy')
config = Config()
config.dropout = args.dropout
config.dataset = args.dataset
config.learning_rate = args.learning_rate
config.early_stopping_round = args.early_stopping_round
if args.L1_flag == 1:
config.L1_flag = True
else:
config.L1_flag = False
config.embedding_size = args.embedding_size
# config.num_batches = args.num_batches
config.train_times = args.train_times
config.margin = args.margin
if args.filter == 1:
config.filter = True
else:
config.filter = False
config.momentum = args.momentum
if args.optimizer == 0:
config.optimizer = optim.SGD
elif args.optimizer == 1:
config.optimizer = optim.Adam
elif args.optimizer == 2:
config.optimizer = optim.RMSprop
if args.loss_type == 0:
config.loss_function = loss.marginLoss
config.entity_total, config.relation_total, _ = get_total_number('./data/' + args.dataset + '_TA', 'stat.txt')
# config.batch_size = trainTotal // config.num_batches
config.batch_size = args.batch_size
loss_function = config.loss_function()
filename = '_'.join(
['dropout', str(args.dropout),
'l', str(args.learning_rate),
'es', str(args.early_stopping_round),
'L', str(args.L1_flag),
'em', str(args.embedding_size),
# 'nb', str(args.num_batches),
# 'n', str(args.train_times),
'bs', str(args.batch_size),
'm', str(args.margin),
'f', str(args.filter),
'mo', str(args.momentum),
's', str(args.seed),
'op', str(args.optimizer),
'lo', str(args.loss_type),]) + '_TATransE.ckpt'
os.makedirs('./model/' + args.dataset, exist_ok=True)
path_name = os.path.join('./model/' + args.dataset, filename)
if os.path.exists(path_name):
model = torch.load(path_name)
else:
model = model.TATransEModel(config)
if USE_CUDA:
model.cuda()
loss_function.cuda()
optimizer = config.optimizer(model.parameters(), lr=config.learning_rate)
margin = autograd.Variable(floatTensor([config.margin]))
start_time = time.time()
if args.test == 0:
# trainBatchList = getBatchList(trainList, config.num_batches)
trainBatchList = getBatchList(trainList, config.batch_size)
for epoch in range(config.train_times):
model.train()
total_loss = floatTensor([0.0])
random.shuffle(trainBatchList)
for batchList in trainBatchList:
if config.filter == True:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_filter_all(batchList,
config.entity_total, tripleDict)
else:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_raw_all(batchList,
config.entity_total)
batch_entity_set = set(pos_h_batch + pos_t_batch + neg_h_batch + neg_t_batch)
batch_relation_set = set(pos_r_batch + neg_r_batch)
batch_entity_list = list(batch_entity_set)
batch_relation_list = list(batch_relation_set)
pos_h_batch = autograd.Variable(longTensor(pos_h_batch))
pos_t_batch = autograd.Variable(longTensor(pos_t_batch))
pos_r_batch = autograd.Variable(longTensor(pos_r_batch))
pos_time_batch = autograd.Variable(longTensor(pos_time_batch))
neg_h_batch = autograd.Variable(longTensor(neg_h_batch))
neg_t_batch = autograd.Variable(longTensor(neg_t_batch))
neg_r_batch = autograd.Variable(longTensor(neg_r_batch))
neg_time_batch = autograd.Variable(longTensor(neg_time_batch))
model.zero_grad()
pos, neg = model(pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch)
if args.loss_type == 0:
losses = loss_function(pos, neg, margin)
else:
labels = torch.squeeze(torch.cat([torch.ones((pos_h_batch.size()[0], 1)), torch.zeros((neg_h_batch.size()[0], 1))]))
print(labels.size())
print(torch.cat([pos, neg]).size())
losses = loss_function(torch.cat([pos, neg]), labels)
ent_embeddings = model.ent_embeddings(torch.cat([pos_h_batch, pos_t_batch, neg_h_batch, neg_t_batch]))
rseq_embeddings = model.get_rseq(torch.cat([pos_r_batch, neg_r_batch]), torch.cat([pos_time_batch, neg_time_batch]))
losses = losses + loss.normLoss(ent_embeddings) + loss.normLoss(rseq_embeddings)
losses.backward()
optimizer.step()
total_loss += losses.data
if epoch % 5 == 0:
now_time = time.time()
print(now_time - start_time)
print("Train total loss: %d %f" % (epoch, total_loss[0]))
if config.early_stopping_round > 0:
if epoch == 0:
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
batchNum = 2 * len(validList)
validBatchList = getBatchList(validList, config.batch_size)
hit1ValidSum = 0
hit3ValidSum = 0
hit10ValidSum = 0
meanrankValidSum = 0
meanrerankValidSum = 0
batchNum = 2 * len(validList)
for batchList in validBatchList:
hit1ValidSubSum, hit3ValidSubSum, hit10ValidSubSum, meanrankValidSubSum, meanrerankValidSubSum, _ = evaluation_batch(
batchList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1ValidSum += hit1ValidSubSum
hit3ValidSum += hit3ValidSubSum
hit10ValidSum += hit10ValidSubSum
meanrankValidSum += meanrankValidSubSum
meanrerankValidSum += meanrerankValidSubSum
hit1Valid = hit1ValidSum / batchNum
hit3Valid = hit3ValidSum / batchNum
hit10Valid = hit10ValidSum / batchNum
meanrankValid = meanrankValidSum / batchNum
meanrerankValid = meanrerankValidSum / batchNum
best_meanrank = meanrankValid
torch.save(model, os.path.join('./model/' + args.dataset, filename))
best_epoch = 0
meanrank_not_decrease_time = 0
lr_decrease_time = 0
# Evaluate on validation set for every 5 epochs
elif epoch % 5 == 0:
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
batchNum = 2 * len(validList)
validBatchList = getBatchList(validList, config.batch_size)
hit1ValidSum = 0
hit3ValidSum = 0
hit10ValidSum = 0
meanrankValidSum = 0
meanrerankValidSum = 0
batchNum = 2 * len(validList)
for batchList in validBatchList:
hit1ValidSubSum, hit3ValidSubSum, hit10ValidSubSum, meanrankValidSubSum, meanrerankValidSubSum, _ = evaluation_batch(
batchList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1ValidSum += hit1ValidSubSum
hit3ValidSum += hit3ValidSubSum
hit10ValidSum += hit10ValidSubSum
meanrankValidSum += meanrankValidSubSum
meanrerankValidSum += meanrerankValidSubSum
hit1Valid = hit1ValidSum / batchNum
hit3Valid = hit3ValidSum / batchNum
hit10Valid = hit10ValidSum / batchNum
meanrankValid = meanrankValidSum / batchNum
meanrerankValid = meanrerankValidSum / batchNum
now_meanrank = meanrankValid
if now_meanrank < best_meanrank:
meanrank_not_decrease_time = 0
best_meanrank = now_meanrank
torch.save(model, os.path.join('./model/' + args.dataset, filename))
else:
meanrank_not_decrease_time += 1
# If the result hasn't improved for consecutive 5 evaluations, decrease learning rate
if meanrank_not_decrease_time == 5:
lr_decrease_time += 1
if lr_decrease_time == config.early_stopping_round:
break
else:
optimizer.param_groups[0]['lr'] *= 0.5
meanrank_not_decrease_time = 0
if (epoch + 1) % 5 == 0 or epoch == 0:
torch.save(model, os.path.join('./model/' + args.dataset, filename))
model.eval()
testTotal, testList, testDict, testTimes = load_quadruples('./data/' + args.dataset + '_TA', 'test2id.txt', 'test_tem.npy')
# testBatchList = getBatchList(testList, config.num_batches)
testBatchList = getBatchList(testList, config.batch_size)
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
# hit1Test, hit3Test, hit10Test, meanrankTest, meanrerankTest= evaluation(testList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1TestSum = 0
hit3TestSum = 0
hit10TestSum = 0
meanrankTestSum = 0
meanrerankTestSum = 0
batchNum = 2*len(testList)
for batchList in testBatchList:
hit1TestSubSum, hit3TestSubSum, hit10TestSubSum, meanrankTestSubSum, meanrerankTestSubSum, batchSubNum = evaluation_batch(batchList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1TestSum += hit1TestSubSum
hit3TestSum += hit3TestSubSum
hit10TestSum += hit10TestSubSum
meanrankTestSum += meanrankTestSubSum
meanrerankTestSum += meanrerankTestSubSum
# batchNum += batchSubNum
hit1Test = hit1TestSum / batchNum
hit3Test = hit3TestSum / batchNum
hit10Test = hit10TestSum / batchNum
meanrankTest = meanrankTestSum / batchNum
meanrerankTest = meanrerankTestSum / batchNum
writeList = [filename,
'testSet', '%.6f' % hit1Test, '%.6f' % hit3Test, '%.6f' % hit10Test, '%.6f' % meanrankTest, '%.6f' % meanrerankTest]
# Write the result into file
os.makedirs('./result/', exist_ok=True)
with open(os.path.join('./result/', args.dataset + '.txt'), 'a') as fw:
fw.write('\t'.join(writeList) + '\n')
| 16,007 | 43.715084 | 200 | py |
RE-Net | RE-Net-master/baselines/evaluation_TTransE.py | import torch
import torch.autograd as autograd
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity
from data import *
from eval_lib import *
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
# Find the rank of ground truth tail in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
def argwhereTail(head, tail, rel, time, array, quadrupleDict):
wrongAnswer = 0
for num in array:
if num == tail:
return wrongAnswer
elif (head, num, rel, time) in quadrupleDict:
continue
else:
wrongAnswer += 1
return wrongAnswer
# Find the rank of ground truth head in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
def argwhereHead(head, tail, rel, time, array, quadrupleDict):
wrongAnswer = 0
for num in array:
if num == head:
return wrongAnswer
elif (num, tail, rel, time) in quadrupleDict:
continue
else:
wrongAnswer += 1
return wrongAnswer
def evaluation_helper(testList, tripleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, head=0):
# embeddings are numpy likre
headList, tailList, relList, timeList = getFourElements(testList)
h_e = ent_embeddings[headList]
t_e = ent_embeddings[tailList]
r_e = rel_embeddings[relList]
time_e = tem_embeddings[timeList]
c_t_e = h_e + (r_e + time_e)
c_h_e = t_e - (r_e + time_e)
if L1_flag == True:
dist = pairwise_distances(c_t_e, ent_embeddings, metric='manhattan')
else:
dist = pairwise_distances(c_t_e, ent_embeddings, metric='euclidean')
rankArrayTail = np.argsort(dist, axis=1)
if filter == False:
rankListTail = [int(np.argwhere(elem[1]==elem[0])) for elem in zip(tailList, rankArrayTail)]
else:
rankListTail = [argwhereTail(elem[0], elem[1], elem[2], elem[3], elem[4], tripleDict)
for elem in zip(headList, tailList, relList, timeList, rankArrayTail)]
isHit1ListTail = [x for x in rankListTail if x < 1]
isHit3ListTail = [x for x in rankListTail if x < 3]
isHit10ListTail = [x for x in rankListTail if x < 10]
if L1_flag == True:
dist = pairwise_distances(c_h_e, ent_embeddings, metric='manhattan')
else:
dist = pairwise_distances(c_h_e, ent_embeddings, metric='euclidean')
rankArrayHead = np.argsort(dist, axis=1)
if filter == False:
rankListHead = [int(np.argwhere(elem[1]==elem[0])) for elem in zip(headList, rankArrayHead)]
else:
rankListHead = [argwhereHead(elem[0], elem[1], elem[2], elem[3], elem[4], tripleDict)
for elem in zip(headList, tailList, relList, timeList, rankArrayHead)]
re_rankListHead = [1.0/(x+1) for x in rankListHead]
re_rankListTail = [1.0/(x+1) for x in rankListTail]
isHit1ListHead = [x for x in rankListHead if x < 1]
isHit3ListHead = [x for x in rankListHead if x < 3]
isHit10ListHead = [x for x in rankListHead if x < 10]
totalRank = sum(rankListTail) + sum(rankListHead)
totalReRank = sum(re_rankListHead) + sum(re_rankListTail)
hit1Count = len(isHit1ListTail) + len(isHit1ListHead)
hit3Count = len(isHit3ListTail) + len(isHit3ListHead)
hit10Count = len(isHit10ListTail) + len(isHit10ListHead)
tripleCount = len(rankListTail) + len(rankListHead)
return hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount
def process_data(testList, tripleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, L, head):
hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount = evaluation_helper(testList, tripleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, head)
L.append((hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount))
def evaluation(testList, tripleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, k=0, head=0):
# embeddings are numpy like
if k > len(testList):
testList = random.choices(testList, k=k)
elif k > 0:
testList = random.sample(testList, k=k)
L = []
process_data(testList, tripleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, L, head)
resultList = list(L)
# what is head?
if head == 1 or head == 2:
hit1 = sum([elem[0] for elem in resultList]) / len(testList)
hit3 = sum([elem[1] for elem in resultList]) / len(testList)
hit10 = sum([elem[2] for elem in resultList]) / len(testList)
meanrank = sum([elem[3] for elem in resultList]) / len(testList)
meanrerank = sum([elem[4] for elem in resultList]) / len(testList)
else:
hit1 = sum([elem[0] for elem in resultList]) / (2 * len(testList))
hit3 = sum([elem[1] for elem in resultList]) / (2 * len(testList))
hit10 = sum([elem[2] for elem in resultList]) / (2 * len(testList))
meanrank = sum([elem[3] for elem in resultList]) / (2 * len(testList))
meanrerank = sum([elem[4] for elem in resultList]) / (2 * len(testList))
print('Meanrank: %.6f' % meanrank)
print('Meanrerank: %.6f' % meanrerank)
print('Hit@1: %.6f' % hit1)
print('Hit@3: %.6f' % hit3)
print('Hit@10: %.6f' % hit10)
return hit1, hit3, hit10, meanrank, meanrerank
def evaluation_batch(testList, tripleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, k=0, head=0):
# embeddings are numpy like
if k > len(testList):
testList = random.choices(testList, k=k)
elif k > 0:
testList = random.sample(testList, k=k)
L = []
process_data(testList, tripleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, L, head)
resultList = list(L)
hit1 = sum([elem[0] for elem in resultList])
hit3 = sum([elem[1] for elem in resultList])
hit10 = sum([elem[2] for elem in resultList])
meanrank = sum([elem[3] for elem in resultList])
meanrerank = sum([elem[4] for elem in resultList])
if head == 1 or head == 2:
return hit1, hit3, hit10, meanrank, meanrerank, len(testList)
else:
return hit1, hit3, hit10, meanrank, meanrerank, 2 * len(testList)
| 6,469 | 36.836257 | 193 | py |
RE-Net | RE-Net-master/baselines/TADistmult.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-24 01:45:03
# @Author : jimmy (jimmywangheng@qq.com)
# @Link : http://sdcs.sysu.edu.cn
# @Version : $Id$
import os
import torch
torch.multiprocessing.set_start_method("spawn")
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import time
import datetime
import random
from utils import *
from data import *
from evaluation_TADistMult import *
import loss as loss
import model as model
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
"""
The meaning of parameters:
self.dataset: Which dataset is used to train the model? Such as 'FB15k', 'WN18', etc.
self.learning_rate: Initial learning rate (lr) of the model.
self.early_stopping_round: How many times will lr decrease? If set to 0, it remains constant.
self.L1_flag: If set to True, use L1 distance as dissimilarity; else, use L2.
self.embedding_size: The embedding size of entities and relations.
self.num_batches: How many batches to train in one epoch?
self.train_times: The maximum number of epochs for training.
self.margin: The margin set for MarginLoss.
self.filter: Whether to check a generated negative sample is false negative.
self.momentum: The momentum of the optimizer.
self.optimizer: Which optimizer to use? Such as SGD, Adam, etc.
self.loss_function: Which loss function to use? Typically, we use margin loss.
self.entity_total: The number of different entities.
self.relation_total: The number of different relations.
self.batch_size: How many instances is contained in one batch?
"""
class Config(object):
def __init__(self):
self.dropout = 0
self.dataset = None
self.learning_rate = 0.001
self.early_stopping_round = 0
self.L1_flag = True
self.embedding_size = 100
# self.num_batches = 100
self.train_times = 1000
self.margin = 1.0
self.filter = True
self.momentum = 0.9
self.optimizer = optim.Adam
self.loss_function = loss.binaryCrossLoss
self.loss_type = 0
self.entity_total = 0
self.relation_total = 0
self.batch_size = 0
self.tem_total = 32
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser()
"""
The meaning of some parameters:
seed: Fix the random seed. Except for 0, which means no setting of random seed.
port: The port number used by hyperboard,
which is a demo showing training curves in real time.
You can refer to https://github.com/WarBean/hyperboard to know more.
num_processes: Number of processes used to evaluate the result.
"""
argparser.add_argument('-dr', '--dropout', type=float, default=0)
argparser.add_argument('-d', '--dataset', type=str)
argparser.add_argument('-l', '--learning_rate', type=float, default=0.001)
argparser.add_argument('-es', '--early_stopping_round', type=int, default=10)
argparser.add_argument('-L', '--L1_flag', type=int, default=1)
argparser.add_argument('-em', '--embedding_size', type=int, default=100)
# argparser.add_argument('-nb', '--num_batches', type=int, default=100)
argparser.add_argument('-bs', '--batch_size', type=int, default=512)
argparser.add_argument('-n', '--train_times', type=int, default=1000)
argparser.add_argument('-m', '--margin', type=float, default=1.0)
argparser.add_argument('-f', '--filter', type=int, default=1)
argparser.add_argument('-mo', '--momentum', type=float, default=0.9)
argparser.add_argument('-s', '--seed', type=int, default=0)
argparser.add_argument('-op', '--optimizer', type=int, default=1)
argparser.add_argument('-lo', '--loss_type', type=int, default=0)
argparser.add_argument('-p', '--port', type=int, default=5000)
argparser.add_argument('-np', '--num_processes', type=int, default=4)
argparser.add_argument('-lm', '--lmbda', type=float, default=0.01)
argparser.add_argument('-test', '--test', type=int, default=0)
args = argparser.parse_args()
if args.seed != 0:
torch.manual_seed(args.seed)
trainTotal, trainList, trainDict, trainTimes = load_quadruples('./data/' + args.dataset + '_TA', 'train2id.txt', 'train_tem.npy')
validTotal, validList, validDict, validTimes = load_quadruples('./data/' + args.dataset + '_TA', 'valid2id.txt', 'valid_tem.npy')
quadrupleTotal, quadrupleList, tripleDict, _ = load_quadruples('./data/' + args.dataset + '_TA', 'train2id.txt', 'train_tem.npy', 'valid2id.txt', 'valid_tem.npy', 'test2id.txt', 'test_tem.npy')
config = Config()
config.dropout = args.dropout
config.dataset = args.dataset
config.learning_rate = args.learning_rate
if args.dataset == "GDELT":
config.tem_total = 46
if args.dataset == "ICEWS18":
config.tem_total = 32
config.early_stopping_round = args.early_stopping_round
if args.L1_flag == 1:
config.L1_flag = True
else:
config.L1_flag = False
config.embedding_size = args.embedding_size
# config.num_batches = args.num_batches
config.train_times = args.train_times
config.margin = args.margin
if args.filter == 1:
config.filter = True
else:
config.filter = False
config.momentum = args.momentum
if args.optimizer == 0:
config.optimizer = optim.SGD
elif args.optimizer == 1:
config.optimizer = optim.Adam
elif args.optimizer == 2:
config.optimizer = optim.RMSprop
if args.loss_type == 0:
config.loss_function = loss.binaryCrossLoss
config.entity_total, config.relation_total, _ = get_total_number('./data/' + args.dataset + '_TA', 'stat.txt')
# config.batch_size = trainTotal // config.num_batches
config.batch_size = args.batch_size
loss_function = config.loss_function()
filename = '_'.join(
['dropout', str(args.dropout),
'l', str(args.learning_rate),
'es', str(args.early_stopping_round),
'L', str(args.L1_flag),
'em', str(args.embedding_size),
# 'nb', str(args.num_batches),
# 'n', str(args.train_times),
'bs', str(args.batch_size),
'm', str(args.margin),
'f', str(args.filter),
'mo', str(args.momentum),
's', str(args.seed),
'op', str(args.optimizer),
'lo', str(args.loss_type),
'lmbda', str(args.lmbda)]) + '_TADistmult.ckpt'
os.makedirs('./model/' + args.dataset, exist_ok=True)
path_name = os.path.join('./model/' + args.dataset, filename)
if os.path.exists(path_name):
model = torch.load(path_name)
else:
model = model.TADistmultModel(config)
if USE_CUDA:
model.cuda()
loss_function.cuda()
optimizer = config.optimizer(model.parameters(), lr=config.learning_rate)
margin = autograd.Variable(floatTensor([config.margin]))
start_time = time.time()
if args.test == 0:
# trainBatchList = getBatchList(trainList, config.num_batches)
trainBatchList = getBatchList(trainList, config.batch_size)
for epoch in range(config.train_times):
model.train()
total_loss = floatTensor([0.0])
random.shuffle(trainBatchList)
for batchList in trainBatchList:
if config.filter == True:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_filter_all(batchList,
config.entity_total, tripleDict)
else:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_raw_all(batchList,
config.entity_total)
batch_entity_set = set(pos_h_batch + pos_t_batch + neg_h_batch + neg_t_batch)
batch_relation_set = set(pos_r_batch + neg_r_batch)
batch_entity_list = list(batch_entity_set)
batch_relation_list = list(batch_relation_set)
pos_h_batch = autograd.Variable(longTensor(pos_h_batch))
pos_t_batch = autograd.Variable(longTensor(pos_t_batch))
pos_r_batch = autograd.Variable(longTensor(pos_r_batch))
pos_time_batch = autograd.Variable(longTensor(pos_time_batch))
neg_h_batch = autograd.Variable(longTensor(neg_h_batch))
neg_t_batch = autograd.Variable(longTensor(neg_t_batch))
neg_r_batch = autograd.Variable(longTensor(neg_r_batch))
neg_time_batch = autograd.Variable(longTensor(neg_time_batch))
model.zero_grad()
pos, neg = model(pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch)
if args.loss_type == 0:
losses = loss_function(pos, neg)
ent_embeddings = model.ent_embeddings(torch.cat([pos_h_batch, pos_t_batch, neg_h_batch, neg_t_batch]))
rseq_embeddings = model.get_rseq(torch.cat([pos_r_batch, neg_r_batch]), torch.cat([pos_time_batch, neg_time_batch]))
losses = losses + args.lmbda * (loss.regulLoss(ent_embeddings) + loss.regulLoss(rseq_embeddings))
losses.backward()
optimizer.step()
total_loss += losses.data
if epoch % 5 == 0:
now_time = time.time()
print(now_time - start_time)
print("Train total loss: %d %f" % (epoch, total_loss[0]))
if total_loss[0] > 3000: # problem: loss explosion, dont save model
break
if epoch % 5 == 0:
if config.filter == True:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_filter_random(
validList,
config.batch_size, config.entity_total, tripleDict)
else:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_raw_random(
validList,
config.batch_size, config.entity_total)
pos_h_batch = autograd.Variable(longTensor(pos_h_batch))
pos_t_batch = autograd.Variable(longTensor(pos_t_batch))
pos_r_batch = autograd.Variable(longTensor(pos_r_batch))
pos_time_batch = autograd.Variable(longTensor(pos_time_batch))
neg_h_batch = autograd.Variable(longTensor(neg_h_batch))
neg_t_batch = autograd.Variable(longTensor(neg_t_batch))
neg_r_batch = autograd.Variable(longTensor(neg_r_batch))
neg_time_batch = autograd.Variable(longTensor(neg_time_batch))
pos, neg = model(pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch,
neg_r_batch, neg_time_batch)
if args.loss_type == 0:
losses = loss_function(pos, neg)
else:
losses = loss_function(pos, neg)
ent_embeddings = model.ent_embeddings(torch.cat([pos_h_batch, pos_t_batch, neg_h_batch, neg_t_batch]))
rseq_embeddings = model.get_rseq(torch.cat([pos_r_batch, neg_r_batch]),
torch.cat([pos_time_batch, neg_time_batch]))
losses = losses + args.lmbda * (loss.regulLoss(ent_embeddings) + loss.regulLoss(rseq_embeddings))
print("Valid batch loss: %d %f" % (epoch, losses.item()))
if config.early_stopping_round > 0:
if epoch == 0:
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
batchNum = 2 * len(validList)
validBatchList = getBatchList(validList, config.batch_size)
hit1ValidSum = 0
hit3ValidSum = 0
hit10ValidSum = 0
meanrankValidSum = 0
meanrerankValidSum = 0
batchNum = 2 * len(validList)
for batchList in validBatchList:
hit1ValidSubSum, hit3ValidSubSum, hit10ValidSubSum, meanrankValidSubSum, meanrerankValidSubSum, _ = evaluation_batch(
batchList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1ValidSum += hit1ValidSubSum
hit3ValidSum += hit3ValidSubSum
hit10ValidSum += hit10ValidSubSum
meanrankValidSum += meanrankValidSubSum
meanrerankValidSum += meanrerankValidSubSum
hit1Valid = hit1ValidSum / batchNum
hit3Valid = hit3ValidSum / batchNum
hit10Valid = hit10ValidSum / batchNum
meanrankValid = meanrankValidSum / batchNum
meanrerankValid = meanrerankValidSum / batchNum
best_meanrank = meanrankValid
torch.save(model, os.path.join('./model/' + args.dataset, filename))
best_epoch = 0
meanrank_not_decrease_time = 0
lr_decrease_time = 0
# Evaluate on validation set for every 5 epochs
elif epoch % 5 == 0:
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
batchNum = 2 * len(validList)
validBatchList = getBatchList(validList, config.batch_size)
hit1ValidSum = 0
hit3ValidSum = 0
hit10ValidSum = 0
meanrankValidSum = 0
meanrerankValidSum = 0
batchNum = 2 * len(validList)
for batchList in validBatchList:
hit1ValidSubSum, hit3ValidSubSum, hit10ValidSubSum, meanrankValidSubSum, meanrerankValidSubSum, _ = evaluation_batch(
batchList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1ValidSum += hit1ValidSubSum
hit3ValidSum += hit3ValidSubSum
hit10ValidSum += hit10ValidSubSum
meanrankValidSum += meanrankValidSubSum
meanrerankValidSum += meanrerankValidSubSum
hit1Valid = hit1ValidSum / batchNum
hit3Valid = hit3ValidSum / batchNum
hit10Valid = hit10ValidSum / batchNum
meanrankValid = meanrankValidSum / batchNum
meanrerankValid = meanrerankValidSum / batchNum
now_meanrank = meanrankValid
if now_meanrank < best_meanrank:
meanrank_not_decrease_time = 0
best_meanrank = now_meanrank
torch.save(model, os.path.join('./model/' + args.dataset, filename))
else:
meanrank_not_decrease_time += 1
# If the result hasn't improved for consecutive 5 evaluations, decrease learning rate
if meanrank_not_decrease_time == 5:
lr_decrease_time += 1
if lr_decrease_time == config.early_stopping_round:
break
else:
optimizer.param_groups[0]['lr'] *= 0.5
meanrank_not_decrease_time = 0
if (epoch + 1) % 5 == 0 or epoch == 0:
torch.save(model, os.path.join('./model/' + args.dataset, filename))
model.eval()
testTotal, testList, testDict, testTimes = load_quadruples('./data/' + args.dataset + '_TA', 'test2id.txt', 'test_tem.npy')
# testBatchList = getBatchList(testList, config.num_batches)
testBatchList = getBatchList(testList, config.batch_size)
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
# hit1Test, hit3Test, hit10Test, meanrankTest, meanrerankTest= evaluation(testList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1TestSum = 0
hit3TestSum = 0
hit10TestSum = 0
meanrankTestSum = 0
meanrerankTestSum = 0
batchNum = 2*len(testList)
for batchList in testBatchList:
hit1TestSubSum, hit3TestSubSum, hit10TestSubSum, meanrankTestSubSum, meanrerankTestSubSum, batchSubNum = evaluation_batch(batchList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0)
hit1TestSum += hit1TestSubSum
hit3TestSum += hit3TestSubSum
hit10TestSum += hit10TestSubSum
meanrankTestSum += meanrankTestSubSum
meanrerankTestSum += meanrerankTestSubSum
# batchNum += batchSubNum
hit1Test = hit1TestSum / batchNum
hit3Test = hit3TestSum / batchNum
hit10Test = hit10TestSum / batchNum
meanrankTest = meanrankTestSum / batchNum
meanrerankTest = meanrerankTestSum / batchNum
writeList = [filename,
'testSet', '%.6f' % hit1Test, '%.6f' % hit3Test, '%.6f' % hit10Test, '%.6f' % meanrankTest, '%.6f' % meanrerankTest]
# Write the result into file
os.makedirs('./result/', exist_ok=True)
with open(os.path.join('./result/', args.dataset), 'a') as fw:
fw.write('\t'.join(writeList) + '\n')
| 18,104 | 44.835443 | 200 | py |
RE-Net | RE-Net-master/baselines/loss.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
class marginLoss(nn.Module):
def __init__(self):
super(marginLoss, self).__init__()
def forward(self, pos, neg, margin):
zero_tensor = floatTensor(pos.size())
zero_tensor.zero_()
zero_tensor = autograd.Variable(zero_tensor)
return torch.sum(torch.max(pos - neg + margin, zero_tensor))
def orthogonalLoss(rel_embeddings, norm_embeddings):
return torch.sum(torch.sum(norm_embeddings * rel_embeddings, dim=1, keepdim=True) ** 2 / torch.sum(rel_embeddings ** 2, dim=1, keepdim=True))
def normLoss(embeddings, dim=1):
norm = torch.sum(embeddings ** 2, dim=dim, keepdim=True)
return torch.sum(torch.max(norm - autograd.Variable(floatTensor([1.0])), autograd.Variable(floatTensor([0.0]))))
def regulLoss(embeddings):
return torch.mean(embeddings ** 2)
class binaryCrossLoss(nn.Module):
def __init__(self):
super(binaryCrossLoss, self).__init__()
def forward(self, pos, neg):
pos_labels = floatTensor(pos.shape[0])
nn.init.ones_(pos_labels)
neg_labels = floatTensor(neg.shape[0])
nn.init.zeros_(neg_labels)
labels = torch.cat((pos_labels, neg_labels))
return F.binary_cross_entropy_with_logits(torch.cat((pos, neg)), labels)
| 1,481 | 30.531915 | 142 | py |
RE-Net | RE-Net-master/baselines/model.py | import os
import math
import pickle
import numpy as np
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from LSTMLinear import LSTMModel
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
class TTransEModel(nn.Module):
def __init__(self, config):
super(TTransEModel, self).__init__()
self.learning_rate = config.learning_rate
self.early_stopping_round = config.early_stopping_round
self.L1_flag = config.L1_flag
self.filter = config.filter
self.embedding_size = config.embedding_size
self.entity_total = config.entity_total
self.relation_total = config.relation_total
self.tem_total = config.tem_total
self.batch_size = config.batch_size
ent_weight = floatTensor(self.entity_total, self.embedding_size)
rel_weight = floatTensor(self.relation_total, self.embedding_size)
tem_weight = floatTensor(self.tem_total, self.embedding_size)
# Use xavier initialization method to initialize embeddings of entities and relations
nn.init.xavier_uniform(ent_weight)
nn.init.xavier_uniform(rel_weight)
nn.init.xavier_uniform(tem_weight)
self.ent_embeddings = nn.Embedding(self.entity_total, self.embedding_size)
self.rel_embeddings = nn.Embedding(self.relation_total, self.embedding_size)
self.tem_embeddings = nn.Embedding(self.tem_total, self.embedding_size)
self.ent_embeddings.weight = nn.Parameter(ent_weight)
self.rel_embeddings.weight = nn.Parameter(rel_weight)
self.tem_embeddings.weight = nn.Parameter(tem_weight)
normalize_entity_emb = F.normalize(self.ent_embeddings.weight.data, p=2, dim=1)
normalize_relation_emb = F.normalize(self.rel_embeddings.weight.data, p=2, dim=1)
normalize_temporal_emb = F.normalize(self.tem_embeddings.weight.data, p=2, dim=1)
self.ent_embeddings.weight.data = normalize_entity_emb
self.rel_embeddings.weight.data = normalize_relation_emb
self.tem_embeddings.weight.data = normalize_temporal_emb
def forward(self, pos_h, pos_t, pos_r, pos_tem, neg_h, neg_t, neg_r, neg_tem):
pos_h_e = self.ent_embeddings(pos_h)
pos_t_e = self.ent_embeddings(pos_t)
pos_r_e = self.rel_embeddings(pos_r)
pos_tem_e = self.tem_embeddings(pos_tem)
neg_h_e = self.ent_embeddings(neg_h)
neg_t_e = self.ent_embeddings(neg_t)
neg_r_e = self.rel_embeddings(neg_r)
neg_tem_e = self.tem_embeddings(neg_tem)
if self.L1_flag:
pos = torch.sum(torch.abs(pos_h_e + pos_r_e + pos_tem_e - pos_t_e), 1)
neg = torch.sum(torch.abs(neg_h_e + neg_r_e + neg_tem_e - neg_t_e), 1)
else:
pos = torch.sum((pos_h_e + pos_r_e + pos_tem_e - pos_t_e) ** 2, 1)
neg = torch.sum((neg_h_e + neg_r_e + neg_tem_e - neg_t_e) ** 2, 1)
return pos, neg
class TADistmultModel(nn.Module):
def __init__(self, config):
super(TADistmultModel, self).__init__()
self.learning_rate = config.learning_rate
self.early_stopping_round = config.early_stopping_round
self.L1_flag = config.L1_flag
self.filter = config.filter
self.embedding_size = config.embedding_size
self.entity_total = config.entity_total
self.relation_total = config.relation_total
self.tem_total = config.tem_total # 32
self.batch_size = config.batch_size
self.criterion = nn.Softplus()
torch.nn.BCELoss()
self.dropout = nn.Dropout(config.dropout)
self.lstm = LSTMModel(self.embedding_size, n_layer=1)
ent_weight = floatTensor(self.entity_total, self.embedding_size)
rel_weight = floatTensor(self.relation_total, self.embedding_size)
tem_weight = floatTensor(self.tem_total, self.embedding_size)
# Use xavier initialization method to initialize embeddings of entities and relations
nn.init.xavier_uniform(ent_weight)
nn.init.xavier_uniform(rel_weight)
nn.init.xavier_uniform(tem_weight)
self.ent_embeddings = nn.Embedding(self.entity_total, self.embedding_size)
self.rel_embeddings = nn.Embedding(self.relation_total, self.embedding_size)
self.tem_embeddings = nn.Embedding(self.tem_total, self.embedding_size)
self.ent_embeddings.weight = nn.Parameter(ent_weight)
self.rel_embeddings.weight = nn.Parameter(rel_weight)
self.tem_embeddings.weight = nn.Parameter(tem_weight)
normalize_entity_emb = F.normalize(self.ent_embeddings.weight.data, p=2, dim=1)
normalize_relation_emb = F.normalize(self.rel_embeddings.weight.data, p=2, dim=1)
normalize_temporal_emb = F.normalize(self.tem_embeddings.weight.data, p=2, dim=1)
self.ent_embeddings.weight.data = normalize_entity_emb
self.rel_embeddings.weight.data = normalize_relation_emb
self.tem_embeddings.weight.data = normalize_temporal_emb
def scoring(self, h, t, r):
return torch.sum(h * t * r, 1, False)
def forward(self, pos_h, pos_t, pos_r, pos_tem, neg_h, neg_t, neg_r, neg_tem):
pos_h_e = self.ent_embeddings(pos_h)
pos_t_e = self.ent_embeddings(pos_t)
pos_rseq_e = self.get_rseq(pos_r, pos_tem)
neg_h_e = self.ent_embeddings(neg_h)
neg_t_e = self.ent_embeddings(neg_t)
neg_rseq_e = self.get_rseq(neg_r, neg_tem)
pos_h_e = self.dropout(pos_h_e)
pos_t_e = self.dropout(pos_t_e)
pos_rseq_e = self.dropout(pos_rseq_e)
neg_h_e = self.dropout(neg_h_e)
neg_t_e = self.dropout(neg_t_e)
neg_rseq_e = self.dropout(neg_rseq_e)
pos = self.scoring(pos_h_e, pos_t_e, pos_rseq_e)
neg = self.scoring(neg_h_e, neg_t_e, neg_rseq_e)
return pos, neg
def get_rseq(self, r, tem):
r_e = self.rel_embeddings(r)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.shape[0] # batch size
tem_len = tem.shape[1]
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.tem_embeddings(tem)
token_e = token_e.view(bs, tem_len, self.embedding_size)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
class TATransEModel(nn.Module):
def __init__(self, config):
super(TATransEModel, self).__init__()
self.learning_rate = config.learning_rate
self.early_stopping_round = config.early_stopping_round
self.L1_flag = config.L1_flag
self.filter = config.filter
self.embedding_size = config.embedding_size
self.entity_total = config.entity_total
self.relation_total = config.relation_total
self.tem_total = 32
self.batch_size = config.batch_size
self.dropout = nn.Dropout(config.dropout)
self.lstm = LSTMModel(self.embedding_size, n_layer=1)
ent_weight = floatTensor(self.entity_total, self.embedding_size)
rel_weight = floatTensor(self.relation_total, self.embedding_size)
tem_weight = floatTensor(self.tem_total, self.embedding_size)
# Use xavier initialization method to initialize embeddings of entities and relations
nn.init.xavier_uniform(ent_weight)
nn.init.xavier_uniform(rel_weight)
nn.init.xavier_uniform(tem_weight)
self.ent_embeddings = nn.Embedding(self.entity_total, self.embedding_size)
self.rel_embeddings = nn.Embedding(self.relation_total, self.embedding_size)
self.tem_embeddings = nn.Embedding(self.tem_total, self.embedding_size)
self.ent_embeddings.weight = nn.Parameter(ent_weight)
self.rel_embeddings.weight = nn.Parameter(rel_weight)
self.tem_embeddings.weight = nn.Parameter(tem_weight)
normalize_entity_emb = F.normalize(self.ent_embeddings.weight.data, p=2, dim=1)
normalize_relation_emb = F.normalize(self.rel_embeddings.weight.data, p=2, dim=1)
normalize_temporal_emb = F.normalize(self.tem_embeddings.weight.data, p=2, dim=1)
self.ent_embeddings.weight.data = normalize_entity_emb
self.rel_embeddings.weight.data = normalize_relation_emb
self.tem_embeddings.weight.data = normalize_temporal_emb
def forward(self, pos_h, pos_t, pos_r, pos_tem, neg_h, neg_t, neg_r, neg_tem):
pos_h_e = self.ent_embeddings(pos_h)
pos_t_e = self.ent_embeddings(pos_t)
pos_rseq_e = self.get_rseq(pos_r, pos_tem)
neg_h_e = self.ent_embeddings(neg_h)
neg_t_e = self.ent_embeddings(neg_t)
neg_rseq_e = self.get_rseq(neg_r, neg_tem)
pos_h_e = self.dropout(pos_h_e)
pos_t_e = self.dropout(pos_t_e)
pos_rseq_e = self.dropout(pos_rseq_e)
neg_h_e = self.dropout(neg_h_e)
neg_t_e = self.dropout(neg_t_e)
neg_rseq_e = self.dropout(neg_rseq_e)
if self.L1_flag:
pos = torch.sum(torch.abs(pos_h_e + pos_rseq_e - pos_t_e), 1)
neg = torch.sum(torch.abs(neg_h_e + neg_rseq_e - neg_t_e), 1)
else:
pos = torch.sum((pos_h_e + pos_rseq_e - pos_t_e) ** 2, 1)
neg = torch.sum((neg_h_e + neg_rseq_e - neg_t_e) ** 2, 1)
return pos, neg
def get_rseq(self, r, tem):
r_e = self.rel_embeddings(r)
r_e = r_e.unsqueeze(0).transpose(0, 1)
bs = tem.shape[0] # batch size
tem_len = tem.shape[1]
tem = tem.contiguous()
tem = tem.view(bs * tem_len)
token_e = self.tem_embeddings(tem)
token_e = token_e.view(bs, tem_len, self.embedding_size)
seq_e = torch.cat((r_e, token_e), 1)
hidden_tem = self.lstm(seq_e)
hidden_tem = hidden_tem[0, :, :]
rseq_e = hidden_tem
return rseq_e
| 8,996 | 36.962025 | 87 | py |
RE-Net | RE-Net-master/baselines/eval_lib.py | import torch
import torch.autograd as autograd
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
def isHit10(triple, tree, cal_embedding, tripleDict, isTail):
# If isTail == True, evaluate the prediction of tail entity
if isTail == True:
k = 0
wrongCount = 0
while wrongCount < 10:
k += 15
tail_dist, tail_ind = tree.query(cal_embedding, k=k)
for elem in tail_ind[0][k - 15: k]:
if triple.t == elem:
return True
elif (triple.h, elem, triple.r) in tripleDict:
continue
else:
wrongCount += 1
if wrongCount > 9:
return False
# If isTail == False, evaluate the prediction of head entity
else:
k = 0
wrongCount = 0
while wrongCount < 10:
k += 15
head_dist, head_ind = tree.query(cal_embedding, k=k)
for elem in head_ind[0][k - 15: k]:
if triple.h == elem:
return True
elif (elem, triple.t, triple.r) in tripleDict:
continue
else:
wrongCount += 1
if wrongCount > 9:
return False
def pairwise_L1_distances(A, B):
dist = torch.sum(torch.abs(A.unsqueeze(1) - B.unsqueeze(0)), dim=2)
return dist
def pairwise_L2_distances(A, B):
AA = torch.sum(A ** 2, dim=1).unsqueeze(1)
BB = torch.sum(B ** 2, dim=1).unsqueeze(0)
dist = torch.mm(A, torch.transpose(B, 0, 1))
dist *= -2
dist += AA
dist += BB
return dist
| 1,836 | 28.15873 | 71 | py |
RE-Net | RE-Net-master/baselines/evaluation_TADistMult.py | import os
import numpy as np
import time
import datetime
import random
import multiprocessing
import math
import torch
import torch.autograd as autograd
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity, linear_kernel
from data import *
from eval_lib import *
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
# Find the rank of ground truth tail in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
def argwhereTail(head, tail, rel, array, tripleDict):
wrongAnswer = 0
for num in array:
if num == tail:
return wrongAnswer
elif (head, num, rel) in tripleDict:
continue
else:
wrongAnswer += 1
return wrongAnswer
# Find the rank of ground truth head in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
def argwhereHead(head, tail, rel, array, tripleDict):
wrongAnswer = 0
for num in array:
if num == head:
return wrongAnswer
elif (num, tail, rel) in tripleDict:
continue
else:
wrongAnswer += 1
return wrongAnswer
def evaluation_helper(testList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0):
# embeddings are numpy like
headList, tailList, relList, timeList = getFourElements(testList)
h_e = ent_embeddings[headList]
t_e = ent_embeddings[tailList]
test_r_batch = autograd.Variable(longTensor(relList))
test_time_batch = autograd.Variable(longTensor(timeList))
rseq_e = model.get_rseq(test_r_batch, test_time_batch).data.cpu().numpy()
c_t_e = h_e * rseq_e
c_h_e = t_e * rseq_e
dist = linear_kernel(c_t_e, ent_embeddings)
rankArrayTail = np.argsort(-dist, axis=1)
if filter == False:
rankListTail = [int(np.argwhere(elem[1]==elem[0])) for elem in zip(tailList, rankArrayTail)]
else:
rankListTail = [argwhereTail(elem[0], elem[1], elem[2], elem[3], tripleDict)
for elem in zip(headList, tailList, relList, rankArrayTail)]
isHit1ListTail = [x for x in rankListTail if x < 1]
isHit3ListTail = [x for x in rankListTail if x < 3]
isHit10ListTail = [x for x in rankListTail if x < 10]
dist = linear_kernel(c_h_e, ent_embeddings)
rankArrayHead = np.argsort(-dist, axis=1)
if filter == False:
rankListHead = [int(np.argwhere(elem[1]==elem[0])) for elem in zip(headList, rankArrayHead)]
else:
rankListHead = [argwhereHead(elem[0], elem[1], elem[2], elem[3], tripleDict)
for elem in zip(headList, tailList, relList, rankArrayHead)]
re_rankListHead = [1.0/(x+1) for x in rankListHead]
re_rankListTail = [1.0/(x+1) for x in rankListTail]
isHit1ListHead = [x for x in rankListHead if x < 1]
isHit3ListHead = [x for x in rankListHead if x < 3]
isHit10ListHead = [x for x in rankListHead if x < 10]
totalRank = sum(rankListTail) + sum(rankListHead)
totalReRank = sum(re_rankListHead) + sum(re_rankListTail)
hit1Count = len(isHit1ListTail) + len(isHit1ListHead)
hit3Count = len(isHit3ListTail) + len(isHit3ListHead)
hit10Count = len(isHit10ListTail) + len(isHit10ListHead)
tripleCount = len(rankListTail) + len(rankListHead)
return hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount
def process_data(testList, tripleDict, model, ent_embeddings, L1_flag, filter, L, head):
hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount = evaluation_helper(testList, tripleDict, model, ent_embeddings, L1_flag, filter, head)
L.append((hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount))
def evaluation(testList, tripleDict, model, ent_embeddings, L1_flag, filter, k=0, head=0):
# embeddings are numpy like
if k > len(testList):
testList = random.choices(testList, k=k)
elif k > 0:
testList = random.sample(testList, k=k)
L = []
process_data(testList, tripleDict, model, ent_embeddings, L1_flag, filter, L, head)
resultList = list(L)
# what is head?
if head == 1 or head == 2:
hit1 = sum([elem[0] for elem in resultList]) / len(testList)
hit3 = sum([elem[1] for elem in resultList]) / len(testList)
hit10 = sum([elem[2] for elem in resultList]) / len(testList)
meanrank = sum([elem[3] for elem in resultList]) / len(testList)
meanrerank = sum([elem[4] for elem in resultList]) / len(testList)
else:
hit1 = sum([elem[0] for elem in resultList]) / (2 * len(testList))
hit3 = sum([elem[1] for elem in resultList]) / (2 * len(testList))
hit10 = sum([elem[2] for elem in resultList]) / (2 * len(testList))
meanrank = sum([elem[3] for elem in resultList]) / (2 * len(testList))
meanrerank = sum([elem[4] for elem in resultList]) / (2 * len(testList))
print('Meanrank: %.6f' % meanrank)
print('Meanrerank: %.6f' % meanrerank)
print('Hit@1: %.6f' % hit1)
print('Hit@3: %.6f' % hit3)
print('Hit@10: %.6f' % hit10)
return hit1, hit3, hit10, meanrank, meanrerank
def evaluation_batch(testList, tripleDict, model, ent_embeddings, L1_flag, filter, k=0, head=0):
# embeddings are numpy like
if k > len(testList):
testList = random.choices(testList, k=k)
elif k > 0:
testList = random.sample(testList, k=k)
L = []
process_data(testList, tripleDict, model, ent_embeddings, L1_flag, filter, L, head)
resultList = list(L)
hit1 = sum([elem[0] for elem in resultList])
hit3 = sum([elem[1] for elem in resultList])
hit10 = sum([elem[2] for elem in resultList])
meanrank = sum([elem[3] for elem in resultList])
meanrerank = sum([elem[4] for elem in resultList])
if head == 1 or head == 2:
return hit1, hit3, hit10, meanrank, meanrerank, len(testList)
else:
return hit1, hit3, hit10, meanrank, meanrerank, 2 * len(testList)
| 6,125 | 34.005714 | 161 | py |
RE-Net | RE-Net-master/baselines/evaluation_TATransE.py | import numpy as np
import torch
import torch.autograd as autograd
from sklearn.metrics.pairwise import pairwise_distances, cosine_similarity
from data import *
from eval_lib import *
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
# Find the rank of ground truth tail in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
def argwhereTail(head, tail, rel, array, tripleDict):
wrongAnswer = 0
for num in array:
if num == tail:
return wrongAnswer
elif (head, num, rel) in tripleDict:
continue
else:
wrongAnswer += 1
return wrongAnswer
# Find the rank of ground truth head in the distance array,
# If (head, num, rel) in tripleDict,
# skip without counting.
def argwhereHead(head, tail, rel, array, tripleDict):
wrongAnswer = 0
for num in array:
if num == head:
return wrongAnswer
elif (num, tail, rel) in tripleDict:
continue
else:
wrongAnswer += 1
return wrongAnswer
def evaluation_helper(testList, tripleDict, model, ent_embeddings, L1_flag, filter, head=0):
# embeddings are numpy likre
headList, tailList, relList, timeList = getFourElements(testList)
h_e = ent_embeddings[headList]
t_e = ent_embeddings[tailList]
test_r_batch = autograd.Variable(longTensor(relList))
test_time_batch = autograd.Variable(longTensor(timeList))
rseq_e = model.get_rseq(test_r_batch, test_time_batch).data.cpu().numpy()
c_t_e = h_e + rseq_e
c_h_e = t_e - rseq_e
if L1_flag == True:
dist = pairwise_distances(c_t_e, ent_embeddings, metric='manhattan')
else:
dist = pairwise_distances(c_t_e, ent_embeddings, metric='euclidean')
rankArrayTail = np.argsort(dist, axis=1)
if filter == False:
rankListTail = [int(np.argwhere(elem[1]==elem[0])) for elem in zip(tailList, rankArrayTail)]
else:
rankListTail = [argwhereTail(elem[0], elem[1], elem[2], elem[3], tripleDict)
for elem in zip(headList, tailList, relList, rankArrayTail)]
isHit1ListTail = [x for x in rankListTail if x < 1]
isHit3ListTail = [x for x in rankListTail if x < 3]
isHit10ListTail = [x for x in rankListTail if x < 10]
if L1_flag == True:
dist = pairwise_distances(c_h_e, ent_embeddings, metric='manhattan')
else:
dist = pairwise_distances(c_h_e, ent_embeddings, metric='euclidean')
rankArrayHead = np.argsort(dist, axis=1)
if filter == False:
rankListHead = [int(np.argwhere(elem[1]==elem[0])) for elem in zip(headList, rankArrayHead)]
else:
rankListHead = [argwhereHead(elem[0], elem[1], elem[2], elem[3], tripleDict)
for elem in zip(headList, tailList, relList, rankArrayHead)]
re_rankListHead = [1.0/(x+1) for x in rankListHead]
re_rankListTail = [1.0/(x+1) for x in rankListTail]
isHit1ListHead = [x for x in rankListHead if x < 1]
isHit3ListHead = [x for x in rankListHead if x < 3]
isHit10ListHead = [x for x in rankListHead if x < 10]
totalRank = sum(rankListTail) + sum(rankListHead)
totalReRank = sum(re_rankListHead) + sum(re_rankListTail)
hit1Count = len(isHit1ListTail) + len(isHit1ListHead)
hit3Count = len(isHit3ListTail) + len(isHit3ListHead)
hit10Count = len(isHit10ListTail) + len(isHit10ListHead)
tripleCount = len(rankListTail) + len(rankListHead)
return hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount
def process_data(testList, tripleDict, model, ent_embeddings, L1_flag, filter, L, head):
hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount = evaluation_helper(testList, tripleDict, model, ent_embeddings, L1_flag, filter, head)
L.append((hit1Count, hit3Count, hit10Count, totalRank, totalReRank, tripleCount))
def evaluation(testList, tripleDict, model, ent_embeddings, L1_flag, filter, k=0, head=0):
# embeddings are numpy like
if k > len(testList):
testList = random.choices(testList, k=k)
elif k > 0:
testList = random.sample(testList, k=k)
L = []
process_data(testList, tripleDict, model, ent_embeddings, L1_flag, filter, L, head)
resultList = list(L)
# what is head?
if head == 1 or head == 2:
hit1 = sum([elem[0] for elem in resultList]) / len(testList)
hit3 = sum([elem[1] for elem in resultList]) / len(testList)
hit10 = sum([elem[2] for elem in resultList]) / len(testList)
meanrank = sum([elem[3] for elem in resultList]) / len(testList)
meanrerank = sum([elem[4] for elem in resultList]) / len(testList)
else:
hit1 = sum([elem[0] for elem in resultList]) / (2 * len(testList))
hit3 = sum([elem[1] for elem in resultList]) / (2 * len(testList))
hit10 = sum([elem[2] for elem in resultList]) / (2 * len(testList))
meanrank = sum([elem[3] for elem in resultList]) / (2 * len(testList))
meanrerank = sum([elem[4] for elem in resultList]) / (2 * len(testList))
print('Meanrank: %.6f' % meanrank)
print('Meanrerank: %.6f' % meanrerank)
print('Hit@1: %.6f' % hit1)
print('Hit@3: %.6f' % hit3)
print('Hit@10: %.6f' % hit10)
return hit1, hit3, hit10, meanrank, meanrerank
def evaluation_batch(testList, tripleDict, model, ent_embeddings, L1_flag, filter, k=0, head=0):
# embeddings are numpy like
if k > len(testList):
testList = random.choices(testList, k=k)
elif k > 0:
testList = random.sample(testList, k=k)
L = []
process_data(testList, tripleDict, model, ent_embeddings, L1_flag, filter, L, head)
resultList = list(L)
hit1 = sum([elem[0] for elem in resultList])
hit3 = sum([elem[1] for elem in resultList])
hit10 = sum([elem[2] for elem in resultList])
meanrank = sum([elem[3] for elem in resultList])
meanrerank = sum([elem[4] for elem in resultList])
if head == 1 or head == 2:
return hit1, hit3, hit10, meanrank, meanrerank, len(testList)
else:
return hit1, hit3, hit10, meanrank, meanrerank, 2 * len(testList)
| 6,301 | 35.218391 | 161 | py |
RE-Net | RE-Net-master/baselines/TTransE.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2017-12-24 01:45:03
# @Author : jimmy (jimmywangheng@qq.com)
# @Link : http://sdcs.sysu.edu.cn
# @Version : $Id$
import os
import torch
torch.multiprocessing.set_start_method("spawn")
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import time
import datetime
import random
from utils import *
from data import *
from evaluation_TTransE import *
import loss as loss
import model as model
USE_CUDA = torch.cuda.is_available()
if USE_CUDA:
longTensor = torch.cuda.LongTensor
floatTensor = torch.cuda.FloatTensor
else:
longTensor = torch.LongTensor
floatTensor = torch.FloatTensor
"""
The meaning of parameters:
self.dataset: Which dataset is used to train the model? Such as 'FB15k', 'WN18', etc.
self.learning_rate: Initial learning rate (lr) of the model.
self.early_stopping_round: How many times will lr decrease? If set to 0, it remains constant.
self.L1_flag: If set to True, use L1 distance as dissimilarity; else, use L2.
self.embedding_size: The embedding size of entities and relations.
self.num_batches: How many batches to train in one epoch?
self.train_times: The maximum number of epochs for training.
self.margin: The margin set for MarginLoss.
self.filter: Whether to check a generated negative sample is false negative.
self.momentum: The momentum of the optimizer.
self.optimizer: Which optimizer to use? Such as SGD, Adam, etc.
self.loss_function: Which loss function to use? Typically, we use margin loss.
self.entity_total: The number of different entities.
self.relation_total: The number of different relations.
self.batch_size: How many instances is contained in one batch?
"""
class Config(object):
def __init__(self):
self.dropout = 0
self.dataset = None
self.learning_rate = 0.001
self.early_stopping_round = 0
self.L1_flag = True
self.embedding_size = 100
# self.num_batches = 100
self.train_times = 1000
self.margin = 1.0
self.filter = True
self.momentum = 0.9
self.optimizer = optim.Adam
self.loss_function = loss.marginLoss
self.loss_type = 0
self.entity_total = 0
self.relation_total = 0
self.batch_size = 0
self.tem_total = 0
if __name__ == "__main__":
import argparse
argparser = argparse.ArgumentParser()
"""
The meaning of some parameters:
seed: Fix the random seed. Except for 0, which means no setting of random seed.
port: The port number used by hyperboard,
which is a demo showing training curves in real time.
You can refer to https://github.com/WarBean/hyperboard to know more.
num_processes: Number of processes used to evaluate the result.
"""
argparser.add_argument('-dr', '--dropout', type=float, default=0)
argparser.add_argument('-d', '--dataset', type=str)
argparser.add_argument('-l', '--learning_rate', type=float, default=0.001)
argparser.add_argument('-es', '--early_stopping_round', type=int, default=10)
argparser.add_argument('-L', '--L1_flag', type=int, default=1)
argparser.add_argument('-em', '--embedding_size', type=int, default=100)
# argparser.add_argument('-nb', '--num_batches', type=int, default=100)
argparser.add_argument('-bs', '--batch_size', type=int, default=512)
argparser.add_argument('-n', '--train_times', type=int, default=1000)
argparser.add_argument('-m', '--margin', type=float, default=1.0)
argparser.add_argument('-f', '--filter', type=int, default=1)
argparser.add_argument('-mo', '--momentum', type=float, default=0.9)
argparser.add_argument('-s', '--seed', type=int, default=0)
argparser.add_argument('-op', '--optimizer', type=int, default=1)
argparser.add_argument('-lo', '--loss_type', type=int, default=0)
argparser.add_argument('-p', '--port', type=int, default=5000)
argparser.add_argument('-np', '--num_processes', type=int, default=4)
argparser.add_argument('-test', '--test', type=int, default=0)
args = argparser.parse_args()
if args.seed != 0:
torch.manual_seed(args.seed)
trainTotal, trainList, trainDict = load_quadruples_TTransE('./data/' + args.dataset + '_TTransE', 'train2id.txt')
validTotal, validList, validDict = load_quadruples_TTransE('./data/' + args.dataset + '_TTransE', 'valid2id.txt')
quadrupleTotal, quadrupleList, quadrupleDict = load_quadruples_TTransE('./data/' + args.dataset + '_TTransE', 'train2id.txt', 'valid2id.txt', 'test2id.txt')
config = Config()
config.dropout = args.dropout
config.dataset = args.dataset
config.learning_rate = args.learning_rate
config.early_stopping_round = args.early_stopping_round
if args.L1_flag == 1:
config.L1_flag = True
else:
config.L1_flag = False
if args.dataset == "GDELT":
config.tem_total = 46
else:
config.tem_total = 32
config.embedding_size = args.embedding_size
# config.num_batches = args.num_batches
config.train_times = args.train_times
config.margin = args.margin
if args.filter == 1:
config.filter = True
else:
config.filter = False
config.momentum = args.momentum
if args.optimizer == 0:
config.optimizer = optim.SGD
elif args.optimizer == 1:
config.optimizer = optim.Adam
elif args.optimizer == 2:
config.optimizer = optim.RMSprop
if args.loss_type == 0:
config.loss_function = loss.marginLoss
config.entity_total, config.relation_total, config.tem_total = get_total_number('./data/' + args.dataset + '_TTransE', 'stat.txt')
# config.batch_size = trainTotal // config.num_batches
config.batch_size = args.batch_size
loss_function = config.loss_function()
filename = '_'.join(
['dropout', str(args.dropout),
'l', str(args.learning_rate),
'es', str(args.early_stopping_round),
'L', str(args.L1_flag),
'em', str(args.embedding_size),
# 'nb', str(args.num_batches),
# 'n', str(args.train_times),
'bs', str(args.batch_size),
'm', str(args.margin),
'f', str(args.filter),
'mo', str(args.momentum),
's', str(args.seed),
'op', str(args.optimizer),
'lo', str(args.loss_type),]) + '_TTransE.ckpt'
os.makedirs('./model/' + args.dataset, exist_ok=True)
path_name = os.path.join('./model/' + args.dataset, filename)
if os.path.exists(path_name):
model = torch.load(path_name)
else:
model = model.TTransEModel(config)
if USE_CUDA:
model.cuda()
loss_function.cuda()
optimizer = config.optimizer(model.parameters(), lr=config.learning_rate)
margin = autograd.Variable(floatTensor([config.margin]))
start_time = time.time()
if args.test == 0:
# trainBatchList = getBatchList(trainList, config.num_batches)
trainBatchList = getBatchList(trainList, config.batch_size)
for epoch in range(config.train_times):
model.train()
total_loss = floatTensor([0.0])
random.shuffle(trainBatchList)
for batchList in trainBatchList:
if config.filter == True:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_filter_all(batchList,
config.entity_total, quadrupleDict)
else:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_raw_all(batchList,
config.entity_total)
pos_h_batch = autograd.Variable(longTensor(pos_h_batch))
pos_t_batch = autograd.Variable(longTensor(pos_t_batch))
pos_r_batch = autograd.Variable(longTensor(pos_r_batch))
pos_time_batch = autograd.Variable(longTensor(pos_time_batch))
neg_h_batch = autograd.Variable(longTensor(neg_h_batch))
neg_t_batch = autograd.Variable(longTensor(neg_t_batch))
neg_r_batch = autograd.Variable(longTensor(neg_r_batch))
neg_time_batch = autograd.Variable(longTensor(neg_time_batch))
model.zero_grad()
pos, neg = model(pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch)
if args.loss_type == 0:
losses = loss_function(pos, neg, margin)
else:
losses = loss_function(pos, neg)
ent_embeddings = model.ent_embeddings(torch.cat([pos_h_batch, pos_t_batch, neg_h_batch, neg_t_batch]))
rel_embeddings = model.rel_embeddings(torch.cat([pos_r_batch, neg_r_batch]))
tem_embeddings = model.tem_embeddings(torch.cat([pos_time_batch, neg_time_batch]))
losses = losses + loss.normLoss(ent_embeddings) + loss.normLoss(rel_embeddings) + loss.normLoss(tem_embeddings)
losses.backward()
optimizer.step()
total_loss += losses.data
if epoch % 5 == 0:
now_time = time.time()
print(now_time - start_time)
print("Train total loss: %d %f" % (epoch, total_loss[0]))
if epoch % 5 == 0:
if config.filter == True:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_filter_random(
validList,
config.batch_size, config.entity_total, quadrupleDict)
else:
pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch, neg_r_batch, neg_time_batch = getBatch_raw_random(
validList,
config.batch_size, config.entity_total)
pos_h_batch = autograd.Variable(longTensor(pos_h_batch))
pos_t_batch = autograd.Variable(longTensor(pos_t_batch))
pos_r_batch = autograd.Variable(longTensor(pos_r_batch))
pos_time_batch = autograd.Variable(longTensor(pos_time_batch))
neg_h_batch = autograd.Variable(longTensor(neg_h_batch))
neg_t_batch = autograd.Variable(longTensor(neg_t_batch))
neg_r_batch = autograd.Variable(longTensor(neg_r_batch))
neg_time_batch = autograd.Variable(longTensor(neg_time_batch))
pos, neg = model(pos_h_batch, pos_t_batch, pos_r_batch, pos_time_batch, neg_h_batch, neg_t_batch,
neg_r_batch, neg_time_batch)
if args.loss_type == 0:
losses = loss_function(pos, neg, margin)
else:
losses = loss_function(pos, neg)
ent_embeddings = model.ent_embeddings(torch.cat([pos_h_batch, pos_t_batch, neg_h_batch, neg_t_batch]))
rel_embeddings = model.rel_embeddings(torch.cat([pos_r_batch, neg_r_batch]))
tem_embeddings = model.tem_embeddings(torch.cat([pos_time_batch, neg_time_batch]))
losses = losses + loss.normLoss(ent_embeddings) + loss.normLoss(rel_embeddings) + loss.normLoss(tem_embeddings)
print("Valid batch loss: %d %f" % (epoch, losses.item()))
if config.early_stopping_round > 0:
if epoch % 5 == 0:
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
rel_embeddings = model.rel_embeddings.weight.data.cpu().numpy()
tem_embeddings = model.tem_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
batchNum = 2 * len(validList)
hit1ValidSum, hit3ValidSum, hit10ValidSum, meanrankValidSum, meanrerankValidSum, _ = evaluation_batch(
validList, quadrupleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, head=0)
hit1Valid = hit1ValidSum / batchNum
hit3Valid = hit3ValidSum / batchNum
hit10Valid = hit10ValidSum / batchNum
meanrankValid = meanrankValidSum / batchNum
meanrerankValid = meanrerankValidSum / batchNum
now_meanrank = meanrankValid
if epoch == 0:
best_meanrank = now_meanrank
best_epoch = 0
meanrank_not_decrease_time = 0
lr_decrease_time = 0
else:
if now_meanrank < best_meanrank:
meanrank_not_decrease_time = 0
best_meanrank = now_meanrank
torch.save(model, os.path.join('./model/' + args.dataset, filename))
else:
meanrank_not_decrease_time += 1
# If the result hasn't improved for consecutive 5 evaluations, decrease learning rate
if meanrank_not_decrease_time == 5:
lr_decrease_time += 1
if lr_decrease_time == config.early_stopping_round:
break
else:
optimizer.param_groups[0]['lr'] *= 0.5
meanrank_not_decrease_time = 0
if (epoch + 1) % 10 == 0 or epoch == 0:
torch.save(model, os.path.join('./model/' + args.dataset, filename))
model.eval()
testTotal, testList, testDict = load_quadruples_TTransE('./data/' + args.dataset + '_TTransE', 'test2id.txt')
# testBatchList = getBatchList(testList, config.num_batches)
testBatchList = getBatchList(testList, config.batch_size)
ent_embeddings = model.ent_embeddings.weight.data.cpu().numpy()
rel_embeddings = model.rel_embeddings.weight.data.cpu().numpy()
tem_embeddings = model.tem_embeddings.weight.data.cpu().numpy()
L1_flag = model.L1_flag
filter = model.filter
# hit1Test, hit3Test, hit10Test, meanrankTest, meanrerankTest= evaluation(testList, quadrupleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, head=0)
hit1TestSum = 0
hit3TestSum = 0
hit10TestSum = 0
meanrankTestSum = 0
meanrerankTestSum = 0
batchNum = 2*len(testList)
for batchList in testBatchList:
hit1TestSubSum, hit3TestSubSum, hit10TestSubSum, meanrankTestSubSum, meanrerankTestSubSum, batchSubNum = evaluation_batch(batchList, quadrupleDict, model, ent_embeddings, rel_embeddings, tem_embeddings, L1_flag, filter, head=0)
hit1TestSum += hit1TestSubSum
hit3TestSum += hit3TestSubSum
hit10TestSum += hit10TestSubSum
meanrankTestSum += meanrankTestSubSum
meanrerankTestSum += meanrerankTestSubSum
# batchNum += batchSubNum
hit1Test = hit1TestSum / batchNum
hit3Test = hit3TestSum / batchNum
hit10Test = hit10TestSum / batchNum
meanrankTest = meanrankTestSum / batchNum
meanrerankTest = meanrerankTestSum / batchNum
writeList = [filename,
'testSet', '%.6f' % hit1Test, '%.6f' % hit3Test, '%.6f' % hit10Test, '%.6f' % meanrankTest, '%.6f' % meanrerankTest]
# Write the result into file
os.makedirs('./result/', exist_ok=True)
with open(os.path.join('./result/', args.dataset + '.txt'), 'a') as fw:
fw.write('\t'.join(writeList) + '\n')
| 15,907 | 44.451429 | 235 | py |
RE-Net | RE-Net-master/baselines/LSTMLinear.py | import math
import torch as th
import torch
from torch import nn
import numpy as np
class LSTMModel(nn.Module):
def __init__(self, in_dim, n_layer):
super(LSTMModel, self).__init__()
self.n_layer = n_layer
self.hidden_dim = in_dim
# self.lstm = nn.LSTM(in_dim, self.hidden_dim, n_layer, batch_first=True)
self.lstm = LSTMLinear(in_dim, self.hidden_dim)
def forward(self, x):
out, h = self.lstm(x)
return h[0]
class LSTMCell(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.i2h = nn.Linear(input_size, 4 * hidden_size, bias=bias)
self.h2h = nn.Linear(hidden_size, 4 * hidden_size, bias=bias)
self.linear_acti = nn.Linear(hidden_size, hidden_size)
self.reset_parameters()
def reset_parameters(self):
std = 1.0 / math.sqrt(self.hidden_size)
for w in self.parameters():
w.data.uniform_(-std, std)
def forward(self, x, hidden):
x = x.contiguous()
if hidden is None:
hidden = self._init_hidden(x, self.hidden_size)
h, c = hidden
h = h.view(h.size(1), -1)
c = c.view(c.size(1), -1)
preact = self.i2h(x) + self.h2h(h)
# activations
gates = preact[:, :3 * self.hidden_size].sigmoid()
# g_t = preact[:, 3 * self.hidden_size:].tanh()
g_t = preact[:, 3 * self.hidden_size:]
i_t = gates[:, :self.hidden_size]
f_t = gates[:, self.hidden_size:2 * self.hidden_size]
o_t = gates[:, -self.hidden_size:]
c_t = th.mul(c, f_t) + th.mul(i_t, g_t)
h_t = th.mul(o_t, c_t)
h_t = h_t.view(1, h_t.size(0), -1)
c_t = c_t.view(1, c_t.size(0), -1)
return h_t, c_t
@staticmethod
def _init_hidden(input_, hidden_size):
h = th.zeros_like(input_.view(1, input_.size(0), -1))
c = th.zeros_like(input_.view(1, input_.size(0), -1))
return h, c
class LSTMLinear(nn.Module):
def __init__(self, input_size, hidden_size, bias=True):
super(LSTMLinear, self).__init__()
self.lstm_cell = LSTMCell(input_size, hidden_size, bias)
self.batch_first = True
def forward(self, input_, hidden=None):
if self.batch_first:
input_ = input_.transpose(0, 1)
outputs = []
steps = range(input_.size(0))
for i in steps:
hidden = self.lstm_cell(input_[i], hidden)
if isinstance(hidden, tuple):
outputs.append(hidden[0])
else:
outputs.append(hidden)
outputs = torch.stack(outputs, dim=0)
if self.batch_first:
outputs = outputs.transpose(0, 1)
return outputs, hidden
| 2,897 | 28.571429 | 81 | py |
RE-Net | RE-Net-master/data/YAGO/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.asarray(quadrupleList), np.asarray(times)
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1])
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.array(quadrupleList), np.asarray(times)
def get_data_with_t(data, tim):
x = data[np.where(data[:,3] == tim)].copy()
x = np.delete(x, 3, 1) # drops 3rd column
return x
def comp_deg_norm(g):
in_deg = g.in_degrees(range(g.number_of_nodes())).float()
in_deg[torch.nonzero(in_deg == 0).view(-1)] = 1
norm = 1.0 / in_deg
return norm
def get_big_graph(data, num_rels):
src, rel, dst = data.transpose()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
g = dgl.DGLGraph()
g.add_nodes(len(uniq_v))
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel_o = np.concatenate((rel + num_rels, rel))
rel_s = np.concatenate((rel, rel + num_rels))
g.add_edges(src, dst)
norm = comp_deg_norm(g)
g.ndata.update({'id': torch.from_numpy(uniq_v).long().view(-1, 1), 'norm': norm.view(-1, 1)})
g.edata['type_s'] = torch.LongTensor(rel_s)
g.edata['type_o'] = torch.LongTensor(rel_o)
g.ids = {}
idx = 0
for id in uniq_v:
g.ids[id] = idx
idx += 1
return g
graph_dict_train = {}
train_data, train_times = load_quadruples('', 'train.txt')
test_data, test_times = load_quadruples('', 'test.txt')
dev_data, dev_times = load_quadruples('', 'valid.txt')
# total_data, _ = load_quadruples('', 'train.txt', 'test.txt')
history_len = 10
num_e, num_r = get_total_number('', 'stat.txt')
s_his = [[] for _ in range(num_e)]
o_his = [[] for _ in range(num_e)]
s_his_t = [[] for _ in range(num_e)]
o_his_t = [[] for _ in range(num_e)]
s_history_data = [[] for _ in range(len(train_data))]
o_history_data = [[] for _ in range(len(train_data))]
s_history_data_t = [[] for _ in range(len(train_data))]
o_history_data_t = [[] for _ in range(len(train_data))]
e = []
r = []
latest_t = 0
s_his_cache = [[] for _ in range(num_e)]
o_his_cache = [[] for _ in range(num_e)]
s_his_cache_t = [None for _ in range(num_e)]
o_his_cache_t = [None for _ in range(num_e)]
for tim in train_times:
print(str(tim)+'\t'+str(max(train_times)))
data = get_data_with_t(train_data, tim)
graph_dict_train[tim] = get_big_graph(data, num_r)
for i, train in enumerate(train_data):
if i % 10000 == 0:
print("train", i, len(train_data))
# if i == 10000:
# break
t = train[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his[ee].append(s_his_cache[ee].copy())
s_his_t[ee].append(s_his_cache_t[ee])
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his[ee].append(o_his_cache[ee].copy())
o_his_t[ee].append(o_his_cache_t[ee])
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = train[0]
r = train[1]
o = train[2]
# print(s_his[r][s])
s_history_data[i] = s_his[s].copy()
o_history_data[i] = o_his[o].copy()
s_history_data_t[i] = s_his_t[s].copy()
o_history_data_t[i] = o_his_t[o].copy()
# print(o_history_data_g[i])
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(s_history_data[i], s_history_data_g[i])
# with open('ttt.txt', 'wb') as fp:
# pickle.dump(s_history_data_g, fp)
# print("save")
with open('train_graphs.txt', 'wb') as fp:
pickle.dump(graph_dict_train, fp)
with open('train_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data, s_history_data_t], fp)
with open('train_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data, o_history_data_t], fp)
# print(s_history_data[0])
s_history_data_dev = [[] for _ in range(len(dev_data))]
o_history_data_dev = [[] for _ in range(len(dev_data))]
s_history_data_dev_t = [[] for _ in range(len(dev_data))]
o_history_data_dev_t = [[] for _ in range(len(dev_data))]
for i, dev in enumerate(dev_data):
if i % 10000 == 0:
print("valid", i, len(dev_data))
t = dev[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = dev[0]
r = dev[1]
o = dev[2]
s_history_data_dev[i] = s_his[s].copy()
o_history_data_dev[i] = o_his[o].copy()
s_history_data_dev_t[i] = s_his_t[s].copy()
o_history_data_dev_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('dev_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_dev, s_history_data_dev_t], fp)
with open('dev_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_dev, o_history_data_dev_t], fp)
s_history_data_test = [[] for _ in range(len(test_data))]
o_history_data_test = [[] for _ in range(len(test_data))]
s_history_data_test_t = [[] for _ in range(len(test_data))]
o_history_data_test_t = [[] for _ in range(len(test_data))]
for i, test in enumerate(test_data):
if i % 10000 == 0:
print("test", i, len(test_data))
t = test[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = test[0]
r = test[1]
o = test[2]
s_history_data_test[i] = s_his[s].copy()
o_history_data_test[i] = o_his[o].copy()
s_history_data_test_t[i] = s_his_t[s].copy()
o_history_data_test_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('test_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_test, s_history_data_test_t], fp)
with open('test_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_test, o_history_data_test_t], fp)
# print(train)
| 10,633 | 32.23125 | 97 | py |
RE-Net | RE-Net-master/data/ICEWS14/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.asarray(quadrupleList), np.asarray(times)
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1])
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.array(quadrupleList), np.asarray(times)
def get_data_with_t(data, tim):
x = data[np.where(data[:,3] == tim)].copy()
x = np.delete(x, 3, 1) # drops 3rd column
return x
def comp_deg_norm(g):
in_deg = g.in_degrees(range(g.number_of_nodes())).float()
in_deg[torch.nonzero(in_deg == 0).view(-1)] = 1
norm = 1.0 / in_deg
return norm
def get_big_graph(data, num_rels):
src, rel, dst = data.transpose()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
g = dgl.DGLGraph()
g.add_nodes(len(uniq_v))
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel_o = np.concatenate((rel + num_rels, rel))
rel_s = np.concatenate((rel, rel + num_rels))
g.add_edges(src, dst)
norm = comp_deg_norm(g)
g.ndata.update({'id': torch.from_numpy(uniq_v).long().view(-1, 1), 'norm': norm.view(-1, 1)})
g.edata['type_s'] = torch.LongTensor(rel_s)
g.edata['type_o'] = torch.LongTensor(rel_o)
g.ids = {}
idx = 0
for id in uniq_v:
g.ids[id] = idx
idx += 1
return g
graph_dict_train = {}
train_data, train_times = load_quadruples('', 'train.txt')
test_data, test_times = load_quadruples('', 'test.txt')
# dev_data, dev_times = load_quadruples('', 'valid.txt')
# total_data, _ = load_quadruples('', 'train.txt', 'test.txt')
history_len = 10
num_e, num_r = get_total_number('', 'stat.txt')
s_his = [[] for _ in range(num_e)]
o_his = [[] for _ in range(num_e)]
s_his_t = [[] for _ in range(num_e)]
o_his_t = [[] for _ in range(num_e)]
s_history_data = [[] for _ in range(len(train_data))]
o_history_data = [[] for _ in range(len(train_data))]
s_history_data_t = [[] for _ in range(len(train_data))]
o_history_data_t = [[] for _ in range(len(train_data))]
e = []
r = []
latest_t = 0
s_his_cache = [[] for _ in range(num_e)]
o_his_cache = [[] for _ in range(num_e)]
s_his_cache_t = [None for _ in range(num_e)]
o_his_cache_t = [None for _ in range(num_e)]
for tim in train_times:
print(str(tim)+'\t'+str(max(train_times)))
data = get_data_with_t(train_data, tim)
graph_dict_train[tim] = get_big_graph(data, num_r)
for i, train in enumerate(train_data):
if i % 10000 == 0:
print("train", i, len(train_data))
# if i == 10000:
# break
t = train[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his[ee].append(s_his_cache[ee].copy())
s_his_t[ee].append(s_his_cache_t[ee])
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his[ee].append(o_his_cache[ee].copy())
o_his_t[ee].append(o_his_cache_t[ee])
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = train[0]
r = train[1]
o = train[2]
# print(s_his[r][s])
s_history_data[i] = s_his[s].copy()
o_history_data[i] = o_his[o].copy()
s_history_data_t[i] = s_his_t[s].copy()
o_history_data_t[i] = o_his_t[o].copy()
# print(o_history_data_g[i])
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(s_history_data[i], s_history_data_g[i])
# with open('ttt.txt', 'wb') as fp:
# pickle.dump(s_history_data_g, fp)
# print("save")
with open('train_graphs.txt', 'wb') as fp:
pickle.dump(graph_dict_train, fp)
with open('train_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data, s_history_data_t], fp)
with open('train_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data, o_history_data_t], fp)
# print(s_history_data[0])
# s_history_data_dev = [[] for _ in range(len(dev_data))]
# o_history_data_dev = [[] for _ in range(len(dev_data))]
# s_history_data_dev_t = [[] for _ in range(len(dev_data))]
# o_history_data_dev_t = [[] for _ in range(len(dev_data))]
#
# for i, dev in enumerate(dev_data):
# if i % 10000 == 0:
# print("valid", i, len(dev_data))
# t = dev[3]
# if latest_t != t:
# for ee in range(num_e):
# if len(s_his_cache[ee]) != 0:
# if len(s_his[ee]) >= history_len:
# s_his[ee].pop(0)
# s_his_t[ee].pop(0)
# s_his_t[ee].append(s_his_cache_t[ee])
# s_his[ee].append(s_his_cache[ee].copy())
# s_his_cache[ee] = []
# s_his_cache_t[ee] = None
# if len(o_his_cache[ee]) != 0:
# if len(o_his[ee]) >= history_len:
# o_his[ee].pop(0)
# o_his_t[ee].pop(0)
#
# o_his_t[ee].append(o_his_cache_t[ee])
# o_his[ee].append(o_his_cache[ee].copy())
#
# o_his_cache[ee] = []
# o_his_cache_t[ee] = None
# latest_t = t
# s = dev[0]
# r = dev[1]
# o = dev[2]
# s_history_data_dev[i] = s_his[s].copy()
# o_history_data_dev[i] = o_his[o].copy()
# s_history_data_dev_t[i] = s_his_t[s].copy()
# o_history_data_dev_t[i] = o_his_t[o].copy()
# if len(s_his_cache[s]) == 0:
# s_his_cache[s] = np.array([[r, o]])
# else:
# s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
# s_his_cache_t[s] = t
#
# if len(o_his_cache[o]) == 0:
# o_his_cache[o] = np.array([[r, s]])
# else:
# o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
# o_his_cache_t[o] = t
#
# # print(o_his_cache[o])
# with open('dev_history_sub.txt', 'wb') as fp:
# pickle.dump([s_history_data_dev, s_history_data_dev_t], fp)
# with open('dev_history_ob.txt', 'wb') as fp:
# pickle.dump([o_history_data_dev, o_history_data_dev_t], fp)
s_history_data_test = [[] for _ in range(len(test_data))]
o_history_data_test = [[] for _ in range(len(test_data))]
s_history_data_test_t = [[] for _ in range(len(test_data))]
o_history_data_test_t = [[] for _ in range(len(test_data))]
for i, test in enumerate(test_data):
if i % 10000 == 0:
print("test", i, len(test_data))
t = test[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = test[0]
r = test[1]
o = test[2]
s_history_data_test[i] = s_his[s].copy()
o_history_data_test[i] = o_his[o].copy()
s_history_data_test_t[i] = s_his_t[s].copy()
o_history_data_test_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('test_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_test, s_history_data_test_t], fp)
with open('test_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_test, o_history_data_test_t], fp)
# print(train)
| 10,737 | 32.661442 | 97 | py |
RE-Net | RE-Net-master/data/ICEWS18/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.asarray(quadrupleList), np.asarray(times)
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1])
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.array(quadrupleList), np.asarray(times)
def get_data_with_t(data, tim):
x = data[np.where(data[:,3] == tim)].copy()
x = np.delete(x, 3, 1) # drops 3rd column
return x
def comp_deg_norm(g):
in_deg = g.in_degrees(range(g.number_of_nodes())).float()
in_deg[torch.nonzero(in_deg == 0).view(-1)] = 1
norm = 1.0 / in_deg
return norm
def get_big_graph(data, num_rels):
src, rel, dst = data.transpose()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
g = dgl.DGLGraph()
g.add_nodes(len(uniq_v))
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel_o = np.concatenate((rel + num_rels, rel))
rel_s = np.concatenate((rel, rel + num_rels))
g.add_edges(src, dst)
norm = comp_deg_norm(g)
g.ndata.update({'id': torch.from_numpy(uniq_v).long().view(-1, 1), 'norm': norm.view(-1, 1)})
g.edata['type_s'] = torch.LongTensor(rel_s)
g.edata['type_o'] = torch.LongTensor(rel_o)
g.ids = {}
idx = 0
for id in uniq_v:
g.ids[id] = idx
idx += 1
return g
graph_dict_train = {}
train_data, train_times = load_quadruples('', 'train.txt')
test_data, test_times = load_quadruples('', 'test.txt')
dev_data, dev_times = load_quadruples('', 'valid.txt')
# total_data, _ = load_quadruples('', 'train.txt', 'test.txt')
history_len = 10
num_e, num_r = get_total_number('', 'stat.txt')
s_his = [[] for _ in range(num_e)]
o_his = [[] for _ in range(num_e)]
s_his_t = [[] for _ in range(num_e)]
o_his_t = [[] for _ in range(num_e)]
s_history_data = [[] for _ in range(len(train_data))]
o_history_data = [[] for _ in range(len(train_data))]
s_history_data_t = [[] for _ in range(len(train_data))]
o_history_data_t = [[] for _ in range(len(train_data))]
e = []
r = []
latest_t = 0
s_his_cache = [[] for _ in range(num_e)]
o_his_cache = [[] for _ in range(num_e)]
s_his_cache_t = [None for _ in range(num_e)]
o_his_cache_t = [None for _ in range(num_e)]
for tim in train_times:
print(str(tim)+'\t'+str(max(train_times)))
data = get_data_with_t(train_data, tim)
graph_dict_train[tim] = get_big_graph(data, num_r)
for i, train in enumerate(train_data):
if i % 10000 == 0:
print("train", i, len(train_data))
# if i == 10000:
# break
t = train[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his[ee].append(s_his_cache[ee].copy())
s_his_t[ee].append(s_his_cache_t[ee])
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his[ee].append(o_his_cache[ee].copy())
o_his_t[ee].append(o_his_cache_t[ee])
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = train[0]
r = train[1]
o = train[2]
# print(s_his[r][s])
s_history_data[i] = s_his[s].copy()
o_history_data[i] = o_his[o].copy()
s_history_data_t[i] = s_his_t[s].copy()
o_history_data_t[i] = o_his_t[o].copy()
# print(o_history_data_g[i])
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(s_history_data[i], s_history_data_g[i])
# with open('ttt.txt', 'wb') as fp:
# pickle.dump(s_history_data_g, fp)
# print("save")
with open('train_graphs.txt', 'wb') as fp:
pickle.dump(graph_dict_train, fp)
with open('train_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data, s_history_data_t], fp)
with open('train_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data, o_history_data_t], fp)
# print(s_history_data[0])
s_history_data_dev = [[] for _ in range(len(dev_data))]
o_history_data_dev = [[] for _ in range(len(dev_data))]
s_history_data_dev_t = [[] for _ in range(len(dev_data))]
o_history_data_dev_t = [[] for _ in range(len(dev_data))]
for i, dev in enumerate(dev_data):
if i % 10000 == 0:
print("valid", i, len(dev_data))
t = dev[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = dev[0]
r = dev[1]
o = dev[2]
s_history_data_dev[i] = s_his[s].copy()
o_history_data_dev[i] = o_his[o].copy()
s_history_data_dev_t[i] = s_his_t[s].copy()
o_history_data_dev_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('dev_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_dev, s_history_data_dev_t], fp)
with open('dev_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_dev, o_history_data_dev_t], fp)
s_history_data_test = [[] for _ in range(len(test_data))]
o_history_data_test = [[] for _ in range(len(test_data))]
s_history_data_test_t = [[] for _ in range(len(test_data))]
o_history_data_test_t = [[] for _ in range(len(test_data))]
for i, test in enumerate(test_data):
if i % 10000 == 0:
print("test", i, len(test_data))
t = test[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = test[0]
r = test[1]
o = test[2]
s_history_data_test[i] = s_his[s].copy()
o_history_data_test[i] = o_his[o].copy()
s_history_data_test_t[i] = s_his_t[s].copy()
o_history_data_test_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('test_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_test, s_history_data_test_t], fp)
with open('test_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_test, o_history_data_test_t], fp)
# print(train)
| 10,633 | 32.23125 | 97 | py |
RE-Net | RE-Net-master/data/WIKI/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.asarray(quadrupleList), np.asarray(times)
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1])
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.array(quadrupleList), np.asarray(times)
def get_data_with_t(data, tim):
x = data[np.where(data[:,3] == tim)].copy()
x = np.delete(x, 3, 1) # drops 3rd column
return x
def comp_deg_norm(g):
in_deg = g.in_degrees(range(g.number_of_nodes())).float()
in_deg[torch.nonzero(in_deg == 0).view(-1)] = 1
norm = 1.0 / in_deg
return norm
def get_big_graph(data, num_rels):
src, rel, dst = data.transpose()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
g = dgl.DGLGraph()
g.add_nodes(len(uniq_v))
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel_o = np.concatenate((rel + num_rels, rel))
rel_s = np.concatenate((rel, rel + num_rels))
g.add_edges(src, dst)
norm = comp_deg_norm(g)
g.ndata.update({'id': torch.from_numpy(uniq_v).long().view(-1, 1), 'norm': norm.view(-1, 1)})
g.edata['type_s'] = torch.LongTensor(rel_s)
g.edata['type_o'] = torch.LongTensor(rel_o)
g.ids = {}
idx = 0
for id in uniq_v:
g.ids[id] = idx
idx += 1
return g
graph_dict_train = {}
train_data, train_times = load_quadruples('', 'train.txt')
test_data, test_times = load_quadruples('', 'test.txt')
dev_data, dev_times = load_quadruples('', 'valid.txt')
# total_data, _ = load_quadruples('', 'train.txt', 'test.txt')
history_len = 10
num_e, num_r = get_total_number('', 'stat.txt')
s_his = [[] for _ in range(num_e)]
o_his = [[] for _ in range(num_e)]
s_his_t = [[] for _ in range(num_e)]
o_his_t = [[] for _ in range(num_e)]
s_history_data = [[] for _ in range(len(train_data))]
o_history_data = [[] for _ in range(len(train_data))]
s_history_data_t = [[] for _ in range(len(train_data))]
o_history_data_t = [[] for _ in range(len(train_data))]
e = []
r = []
latest_t = 0
s_his_cache = [[] for _ in range(num_e)]
o_his_cache = [[] for _ in range(num_e)]
s_his_cache_t = [None for _ in range(num_e)]
o_his_cache_t = [None for _ in range(num_e)]
for tim in train_times:
print(str(tim)+'\t'+str(max(train_times)))
data = get_data_with_t(train_data, tim)
graph_dict_train[tim] = get_big_graph(data, num_r)
for i, train in enumerate(train_data):
if i % 10000 == 0:
print("train", i, len(train_data))
# if i == 10000:
# break
t = train[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his[ee].append(s_his_cache[ee].copy())
s_his_t[ee].append(s_his_cache_t[ee])
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his[ee].append(o_his_cache[ee].copy())
o_his_t[ee].append(o_his_cache_t[ee])
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = train[0]
r = train[1]
o = train[2]
# print(s_his[r][s])
s_history_data[i] = s_his[s].copy()
o_history_data[i] = o_his[o].copy()
s_history_data_t[i] = s_his_t[s].copy()
o_history_data_t[i] = o_his_t[o].copy()
# print(o_history_data_g[i])
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(s_history_data[i], s_history_data_g[i])
# with open('ttt.txt', 'wb') as fp:
# pickle.dump(s_history_data_g, fp)
# print("save")
with open('train_graphs.txt', 'wb') as fp:
pickle.dump(graph_dict_train, fp)
with open('train_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data, s_history_data_t], fp)
with open('train_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data, o_history_data_t], fp)
# print(s_history_data[0])
s_history_data_dev = [[] for _ in range(len(dev_data))]
o_history_data_dev = [[] for _ in range(len(dev_data))]
s_history_data_dev_t = [[] for _ in range(len(dev_data))]
o_history_data_dev_t = [[] for _ in range(len(dev_data))]
for i, dev in enumerate(dev_data):
if i % 10000 == 0:
print("valid", i, len(dev_data))
t = dev[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = dev[0]
r = dev[1]
o = dev[2]
s_history_data_dev[i] = s_his[s].copy()
o_history_data_dev[i] = o_his[o].copy()
s_history_data_dev_t[i] = s_his_t[s].copy()
o_history_data_dev_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('dev_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_dev, s_history_data_dev_t], fp)
with open('dev_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_dev, o_history_data_dev_t], fp)
s_history_data_test = [[] for _ in range(len(test_data))]
o_history_data_test = [[] for _ in range(len(test_data))]
s_history_data_test_t = [[] for _ in range(len(test_data))]
o_history_data_test_t = [[] for _ in range(len(test_data))]
for i, test in enumerate(test_data):
if i % 10000 == 0:
print("test", i, len(test_data))
t = test[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = test[0]
r = test[1]
o = test[2]
s_history_data_test[i] = s_his[s].copy()
o_history_data_test[i] = o_his[o].copy()
s_history_data_test_t[i] = s_his_t[s].copy()
o_history_data_test_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('test_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_test, s_history_data_test_t], fp)
with open('test_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_test, o_history_data_test_t], fp)
# print(train)
| 10,633 | 32.23125 | 97 | py |
RE-Net | RE-Net-master/data/GDELT/get_history_graph.py | import numpy as np
import os
from collections import defaultdict
import pickle
import dgl
import torch
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.asarray(quadrupleList), np.asarray(times)
def get_total_number(inPath, fileName):
with open(os.path.join(inPath, fileName), 'r') as fr:
for line in fr:
line_split = line.split()
return int(line_split[0]), int(line_split[1])
def load_quadruples(inPath, fileName, fileName2=None):
with open(os.path.join(inPath, fileName), 'r') as fr:
quadrupleList = []
times = set()
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
# times = list(times)
# times.sort()
if fileName2 is not None:
with open(os.path.join(inPath, fileName2), 'r') as fr:
for line in fr:
line_split = line.split()
head = int(line_split[0])
tail = int(line_split[2])
rel = int(line_split[1])
time = int(line_split[3])
quadrupleList.append([head, rel, tail, time])
times.add(time)
times = list(times)
times.sort()
return np.array(quadrupleList), np.asarray(times)
def get_data_with_t(data, tim):
x = data[np.where(data[:,3] == tim)].copy()
x = np.delete(x, 3, 1) # drops 3rd column
return x
def comp_deg_norm(g):
in_deg = g.in_degrees(range(g.number_of_nodes())).float()
in_deg[torch.nonzero(in_deg == 0).view(-1)] = 1
norm = 1.0 / in_deg
return norm
def get_big_graph(data, num_rels):
src, rel, dst = data.transpose()
uniq_v, edges = np.unique((src, dst), return_inverse=True)
src, dst = np.reshape(edges, (2, -1))
g = dgl.DGLGraph()
g.add_nodes(len(uniq_v))
src, dst = np.concatenate((src, dst)), np.concatenate((dst, src))
rel_o = np.concatenate((rel + num_rels, rel))
rel_s = np.concatenate((rel, rel + num_rels))
g.add_edges(src, dst)
norm = comp_deg_norm(g)
g.ndata.update({'id': torch.from_numpy(uniq_v).long().view(-1, 1), 'norm': norm.view(-1, 1)})
g.edata['type_s'] = torch.LongTensor(rel_s)
g.edata['type_o'] = torch.LongTensor(rel_o)
g.ids = {}
idx = 0
for id in uniq_v:
g.ids[id] = idx
idx += 1
return g
graph_dict_train = {}
train_data, train_times = load_quadruples('', 'train.txt')
test_data, test_times = load_quadruples('', 'test.txt')
dev_data, dev_times = load_quadruples('', 'valid.txt')
# total_data, _ = load_quadruples('', 'train.txt', 'test.txt')
history_len = 10
num_e, num_r = get_total_number('', 'stat.txt')
s_his = [[] for _ in range(num_e)]
o_his = [[] for _ in range(num_e)]
s_his_t = [[] for _ in range(num_e)]
o_his_t = [[] for _ in range(num_e)]
s_history_data = [[] for _ in range(len(train_data))]
o_history_data = [[] for _ in range(len(train_data))]
s_history_data_t = [[] for _ in range(len(train_data))]
o_history_data_t = [[] for _ in range(len(train_data))]
e = []
r = []
latest_t = 0
s_his_cache = [[] for _ in range(num_e)]
o_his_cache = [[] for _ in range(num_e)]
s_his_cache_t = [None for _ in range(num_e)]
o_his_cache_t = [None for _ in range(num_e)]
for tim in train_times:
print(str(tim)+'\t'+str(max(train_times)))
data = get_data_with_t(train_data, tim)
graph_dict_train[tim] = get_big_graph(data, num_r)
for i, train in enumerate(train_data):
if i % 10000 == 0:
print("train", i, len(train_data))
# if i == 10000:
# break
t = train[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his[ee].append(s_his_cache[ee].copy())
s_his_t[ee].append(s_his_cache_t[ee])
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his[ee].append(o_his_cache[ee].copy())
o_his_t[ee].append(o_his_cache_t[ee])
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = train[0]
r = train[1]
o = train[2]
# print(s_his[r][s])
s_history_data[i] = s_his[s].copy()
o_history_data[i] = o_his[o].copy()
s_history_data_t[i] = s_his_t[s].copy()
o_history_data_t[i] = o_his_t[o].copy()
# print(o_history_data_g[i])
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(s_history_data[i], s_history_data_g[i])
# with open('ttt.txt', 'wb') as fp:
# pickle.dump(s_history_data_g, fp)
# print("save")
with open('train_graphs.txt', 'wb') as fp:
pickle.dump(graph_dict_train, fp)
with open('train_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data, s_history_data_t], fp)
with open('train_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data, o_history_data_t], fp)
# print(s_history_data[0])
s_history_data_dev = [[] for _ in range(len(dev_data))]
o_history_data_dev = [[] for _ in range(len(dev_data))]
s_history_data_dev_t = [[] for _ in range(len(dev_data))]
o_history_data_dev_t = [[] for _ in range(len(dev_data))]
for i, dev in enumerate(dev_data):
if i % 10000 == 0:
print("valid", i, len(dev_data))
t = dev[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = dev[0]
r = dev[1]
o = dev[2]
s_history_data_dev[i] = s_his[s].copy()
o_history_data_dev[i] = o_his[o].copy()
s_history_data_dev_t[i] = s_his_t[s].copy()
o_history_data_dev_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('dev_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_dev, s_history_data_dev_t], fp)
with open('dev_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_dev, o_history_data_dev_t], fp)
s_history_data_test = [[] for _ in range(len(test_data))]
o_history_data_test = [[] for _ in range(len(test_data))]
s_history_data_test_t = [[] for _ in range(len(test_data))]
o_history_data_test_t = [[] for _ in range(len(test_data))]
for i, test in enumerate(test_data):
if i % 10000 == 0:
print("test", i, len(test_data))
t = test[3]
if latest_t != t:
for ee in range(num_e):
if len(s_his_cache[ee]) != 0:
if len(s_his[ee]) >= history_len:
s_his[ee].pop(0)
s_his_t[ee].pop(0)
s_his_t[ee].append(s_his_cache_t[ee])
s_his[ee].append(s_his_cache[ee].copy())
s_his_cache[ee] = []
s_his_cache_t[ee] = None
if len(o_his_cache[ee]) != 0:
if len(o_his[ee]) >= history_len:
o_his[ee].pop(0)
o_his_t[ee].pop(0)
o_his_t[ee].append(o_his_cache_t[ee])
o_his[ee].append(o_his_cache[ee].copy())
o_his_cache[ee] = []
o_his_cache_t[ee] = None
latest_t = t
s = test[0]
r = test[1]
o = test[2]
s_history_data_test[i] = s_his[s].copy()
o_history_data_test[i] = o_his[o].copy()
s_history_data_test_t[i] = s_his_t[s].copy()
o_history_data_test_t[i] = o_his_t[o].copy()
if len(s_his_cache[s]) == 0:
s_his_cache[s] = np.array([[r, o]])
else:
s_his_cache[s] = np.concatenate((s_his_cache[s], [[r, o]]), axis=0)
s_his_cache_t[s] = t
if len(o_his_cache[o]) == 0:
o_his_cache[o] = np.array([[r, s]])
else:
o_his_cache[o] = np.concatenate((o_his_cache[o], [[r, s]]), axis=0)
o_his_cache_t[o] = t
# print(o_his_cache[o])
with open('test_history_sub.txt', 'wb') as fp:
pickle.dump([s_history_data_test, s_history_data_test_t], fp)
with open('test_history_ob.txt', 'wb') as fp:
pickle.dump([o_history_data_test, o_history_data_test_t], fp)
# print(train)
| 10,633 | 32.23125 | 97 | py |
OPT | OPT-main/src/main.py | import numpy as np
import os
import collections
from os.path import dirname, abspath
from copy import deepcopy
from sacred import Experiment, SETTINGS
from sacred.observers import FileStorageObserver
from sacred.utils import apply_backspaces_and_linefeeds
import sys
import torch as th
from utils.logging import get_logger
import yaml
from run import run
SETTINGS['CAPTURE_MODE'] = "fd" # set to "no" if you want to see stdout/stderr in console
logger = get_logger()
ex = Experiment("pymarl")
ex.logger = logger
ex.captured_out_filter = apply_backspaces_and_linefeeds
results_path = os.path.join(dirname(dirname(abspath(__file__))), "results")
@ex.main
def my_main(_run, _config, _log):
# Setting the random seed throughout the modules
config = config_copy(_config)
np.random.seed(config["seed"])
th.manual_seed(config["seed"])
config['env_args']['seed'] = config["seed"]
# run the framework
run(_run, config, _log)
def _get_config(params, arg_name, subfolder):
config_name = None
for _i, _v in enumerate(params):
if _v.split("=")[0] == arg_name:
config_name = _v.split("=")[1]
del params[_i]
break
if config_name is not None:
with open(os.path.join(os.path.dirname(__file__), "config", subfolder, "{}.yaml".format(config_name)), "r") as f:
try:
config_dict = yaml.load(f)
except yaml.YAMLError as exc:
assert False, "{}.yaml error: {}".format(config_name, exc)
return config_dict
def recursive_dict_update(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
d[k] = recursive_dict_update(d.get(k, {}), v)
else:
d[k] = v
return d
def config_copy(config):
if isinstance(config, dict):
return {k: config_copy(v) for k, v in config.items()}
elif isinstance(config, list):
return [config_copy(v) for v in config]
else:
return deepcopy(config)
if __name__ == '__main__':
params = deepcopy(sys.argv)
# Get the defaults from default.yaml
with open(os.path.join(os.path.dirname(__file__), "config", "default.yaml"), "r") as f:
try:
config_dict = yaml.load(f)
except yaml.YAMLError as exc:
assert False, "default.yaml error: {}".format(exc)
# Load algorithm and env base configs
env_config = _get_config(params, "--env-config", "envs")
alg_config = _get_config(params, "--config", "algs")
# config_dict = {**config_dict, **env_config, **alg_config}
config_dict = recursive_dict_update(config_dict, env_config)
config_dict = recursive_dict_update(config_dict, alg_config)
if 'use_token' not in config_dict:
config_dict["use_token"] = False
if 'entity_scheme' in config_dict["env_args"]:
config_dict["entity_scheme"] = config_dict["env_args"]["entity_scheme"]
else:
config_dict["entity_scheme"] = False
# now add all the config to sacred
ex.add_config(config_dict)
# Save to disk by default for sacred
logger.info("Saving to FileStorageObserver in results/sacred.")
file_obs_path = os.path.join(results_path, "sacred")
ex.observers.append(FileStorageObserver.create(file_obs_path))
ex.run_commandline(params)
| 3,318 | 29.731481 | 121 | py |
OPT | OPT-main/src/run.py | import datetime
import os
import pprint
import time
import threading
import torch as th
from types import SimpleNamespace as SN
from utils.logging import Logger
from utils.timehelper import time_left, time_str
from os.path import dirname, abspath
from learners import REGISTRY as le_REGISTRY
from runners import REGISTRY as r_REGISTRY
from controllers import REGISTRY as mac_REGISTRY
from components.episode_buffer import ReplayBuffer
from components.transforms import OneHot
def run(_run, _config, _log):
# check args sanity
_config = args_sanity_check(_config, _log)
args = SN(**_config)
args.device = "cuda" if args.use_cuda else "cpu"
# setup loggers
logger = Logger(_log)
_log.info("Experiment Parameters:")
experiment_params = pprint.pformat(_config,
indent=4,
width=1)
_log.info("\n\n" + experiment_params + "\n")
# configure tensorboard logger
unique_token = "{}__{}".format(args.name, datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S"))
args.unique_token = unique_token
if args.use_tensorboard:
tb_logs_direc = os.path.join(dirname(dirname(abspath(__file__))), "results", "tb_logs")
tb_exp_direc = os.path.join(tb_logs_direc, "{}").format(unique_token)
logger.setup_tb(tb_exp_direc)
# sacred is on by default
logger.setup_sacred(_run)
# Run and train
run_sequential(args=args, logger=logger)
# Clean up after finishing
print("Exiting Main")
print("Stopping all threads")
for t in threading.enumerate():
if t.name != "MainThread":
print("Thread {} is alive! Is daemon: {}".format(t.name, t.daemon))
t.join(timeout=1)
print("Thread joined")
print("Exiting script")
# Making sure framework really exits
os._exit(os.EX_OK)
def evaluate_sequential(args, runner):
for _ in range(args.test_nepisode):
runner.run(test_mode=True)
if args.save_replay:
runner.save_replay()
runner.close_env()
def run_sequential(args, logger):
# Init runner so we can get env info
runner = r_REGISTRY[args.runner](args=args, logger=logger)
# Set up schemes and groups here
env_info = runner.get_env_info()
args.episode_limit = env_info["episode_limit"]
args.n_agents = env_info["n_agents"]
args.n_actions = env_info["n_actions"]
args.n_fixed_actions = env_info["n_fixed_actions"]
args.n_mutual_actions = env_info["n_mutual_actions"]
if args.use_token:
args.n_tokens = env_info["n_tokens"]
args.obs_token_dim = env_info["obs_token_shape"]
args.obs_mask_bit = env_info["obs_mask_bit"]
args.state_token_dim = env_info["state_token_shape"]
args.state_mask_bit = env_info["state_mask_bit"]
if not args.entity_scheme:
args.state_shape = env_info["state_shape"]
scheme = {
"state": {"vshape": env_info["state_shape"]},
"obs": {"vshape": env_info["obs_shape"], "group": "agents"},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents
}
else:
args.n_entities = env_info["n_entities"]
args.entity_shape = env_info["entity_shape"]
scheme = {
"entities": {"vshape": env_info["entity_shape"], "group": "entities"},
"obs_mask": {"vshape": env_info["n_entities"], "group": "entities", "dtype": th.bool},
"entity_mask": {"vshape": env_info["n_entities"], "dtype": th.bool},
"actions": {"vshape": (1,), "group": "agents", "dtype": th.long},
"avail_actions": {"vshape": (env_info["n_actions"],), "group": "agents", "dtype": th.int},
"reward": {"vshape": (1,)},
"terminated": {"vshape": (1,), "dtype": th.uint8},
}
groups = {
"agents": args.n_agents,
"entities": args.n_entities
}
preprocess = {
"actions": ("actions_onehot", [OneHot(out_dim=args.n_actions)])
}
buffer = ReplayBuffer(scheme, groups, args.buffer_size, env_info["episode_limit"] + 1,
preprocess=preprocess,
device="cpu" if args.buffer_cpu_only else args.device)
# Setup multiagent controller here
mac = mac_REGISTRY[args.mac](buffer.scheme, groups, args)
# Give runner the scheme
runner.setup(scheme=scheme, groups=groups, preprocess=preprocess, mac=mac)
# Learner
learner = le_REGISTRY[args.learner](mac, buffer.scheme, logger, args)
if args.use_cuda:
learner.cuda()
if args.checkpoint_path != "":
timesteps = []
timestep_to_load = 0
if not os.path.isdir(args.checkpoint_path):
logger.console_logger.info("Checkpoint directiory {} doesn't exist".format(args.checkpoint_path))
return
# Go through all files in args.checkpoint_path
for name in os.listdir(args.checkpoint_path):
full_name = os.path.join(args.checkpoint_path, name)
# Check if they are dirs the names of which are numbers
if os.path.isdir(full_name) and name.isdigit():
timesteps.append(int(name))
if args.load_step == 0:
# choose the max timestep
timestep_to_load = max(timesteps)
else:
# choose the timestep closest to load_step
timestep_to_load = min(timesteps, key=lambda x: abs(x - args.load_step))
model_path = os.path.join(args.checkpoint_path, str(timestep_to_load))
logger.console_logger.info("Loading model from {}".format(model_path))
learner.load_models(model_path)
runner.t_env = timestep_to_load
if args.evaluate or args.save_replay:
evaluate_sequential(args, runner)
return
# start training
episode = 0
last_test_T = -args.test_interval - 1
last_log_T = 0
model_save_time = 0
start_time = time.time()
last_time = start_time
logger.console_logger.info("Beginning training for {} timesteps".format(args.t_max))
while runner.t_env <= args.t_max:
# Run for a whole episode at a time
with th.no_grad():
episode_batch = runner.run(test_mode=False)
buffer.insert_episode_batch(episode_batch)
if buffer.can_sample(args.batch_size):
episode_sample = buffer.sample(args.batch_size)
# Truncate batch to only filled timesteps
max_ep_t = episode_sample.max_t_filled()
episode_sample = episode_sample[:, :max_ep_t]
if episode_sample.device != args.device:
episode_sample.to(args.device)
learner.train(episode_sample, runner.t_env, episode)
# Execute test runs once in a while
n_test_runs = max(1, args.test_nepisode // runner.batch_size)
if (runner.t_env - last_test_T) / args.test_interval >= 1.0:
logger.console_logger.info("t_env: {} / {}".format(runner.t_env, args.t_max))
logger.console_logger.info("Estimated time left: {}. Time passed: {}".format(
time_left(last_time, last_test_T, runner.t_env, args.t_max), time_str(time.time() - start_time)))
last_time = time.time()
last_test_T = runner.t_env
for _ in range(n_test_runs):
runner.run(test_mode=True)
if args.save_model and (runner.t_env - model_save_time >= args.save_model_interval or model_save_time == 0):
model_save_time = runner.t_env
save_path = os.path.join(args.local_results_path, "models", args.unique_token, str(runner.t_env))
#"results/models/{}".format(unique_token)
os.makedirs(save_path, exist_ok=True)
logger.console_logger.info("Saving models to {}".format(save_path))
# learner should handle saving/loading -- delegate actor save/load to mac,
# use appropriate filenames to do critics, optimizer states
learner.save_models(save_path)
episode += args.batch_size_run
if (runner.t_env - last_log_T) >= args.log_interval:
logger.log_stat("episode", episode, runner.t_env)
logger.print_recent_stats()
last_log_T = runner.t_env
runner.close_env()
logger.console_logger.info("Finished Training")
def args_sanity_check(config, _log):
# set CUDA flags
# config["use_cuda"] = True # Use cuda whenever possible!
if config["use_cuda"] and not th.cuda.is_available():
config["use_cuda"] = False
_log.warning("CUDA flag use_cuda was switched OFF automatically because no CUDA devices are available!")
if config["test_nepisode"] < config["batch_size_run"]:
config["test_nepisode"] = config["batch_size_run"]
else:
config["test_nepisode"] = (config["test_nepisode"]//config["batch_size_run"]) * config["batch_size_run"]
return config
| 9,273 | 34.945736 | 116 | py |
OPT | OPT-main/src/modules/mixers/token_opt_qmix.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import OPTTransformer
class TokenOPTQMixer(nn.Module):
def __init__(self, args):
super(TokenOPTQMixer, self).__init__()
self.args = args
self.state_shape = None
self.token_embedding = nn.Linear(args.state_token_dim, args.mix_emb_dim)
self.transformer = OPTTransformer(args.mix_n_blocks, args.mix_emb_dim, args.mix_n_heads, args.mix_emb_dim * 4)
if self.args.scale_q:
self.hyper_w_0 = nn.Linear(args.mix_emb_dim, 1)
self.softmax = nn.Softmax(dim=-1)
self.hyper_w_1 = nn.Linear(args.mix_emb_dim, args.mix_emb_dim)
self.hyper_b_1 = nn.Linear(args.mix_emb_dim, args.mix_emb_dim)
self.hyper_w_2 = nn.Linear(args.mix_emb_dim, args.mix_emb_dim)
self.hyper_b_2 = nn.Sequential(nn.Linear(args.mix_emb_dim, args.mix_emb_dim),
nn.ReLU(),
nn.Linear(args.mix_emb_dim, 1))
def forward(self, agent_qs, states):
self.state_shape = states.size()
b, s, t, e = states.size()
states = states.reshape(b * s, t, e)
agent_qs = agent_qs.view(b * s, 1, self.args.n_agents)
x = F.relu(self.token_embedding(states))
x = self.transformer.forward(x)[:, :self.args.n_agents]
if self.args.scale_q:
w_0 = self.softmax(self.hyper_w_0(x).view(b * s, 1, self.args.n_agents))
agent_qs = torch.mul(w_0, agent_qs)
w_1 = torch.abs(self.hyper_w_1(x))
b_1 = self.hyper_b_1(x).mean(1, True)
h = F.elu(torch.bmm(agent_qs, w_1) + b_1)
w_2 = torch.abs(self.hyper_w_2(x)).mean(1, True)
b_2 = self.hyper_b_2(x).mean(1, True)
q_tot = torch.bmm(h, w_2.transpose(1, 2)) + b_2
q_tot = q_tot.view(b, s, 1)
return q_tot
def get_disentangle_loss(self):
b, s, t, e = self.state_shape
loss = 0
for block in self.transformer.transformer_blocks:
loss += block.attn.cal_disentangle_loss()
loss = torch.mean(loss.reshape(b, s, t), dim=2)
return loss
| 2,185 | 33.15625 | 118 | py |
OPT | OPT-main/src/modules/mixers/entity_opt_qmix.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import OPTTransformer
class EntityOPTQMixer(nn.Module):
def __init__(self, args):
super(EntityOPTQMixer, self).__init__()
self.args = args
input_shape = args.entity_shape
if self.args.entity_last_action:
input_shape += args.n_actions
self.entity_shape = None
self.entity_embedding = nn.Linear(input_shape, args.mix_emb_dim)
self.transformer = OPTTransformer(args.mix_n_blocks, args.mix_emb_dim, args.mix_n_heads, args.mix_emb_dim * 4)
if self.args.scale_q:
self.hyper_w_0 = nn.Linear(args.mix_emb_dim, 1)
self.softmax = nn.Softmax(dim=-1)
self.hyper_w_1 = nn.Linear(args.mix_emb_dim, args.mix_emb_dim)
self.hyper_b_1 = nn.Linear(args.mix_emb_dim, args.mix_emb_dim)
self.hyper_w_2 = nn.Linear(args.mix_emb_dim, args.mix_emb_dim)
self.hyper_b_2 = nn.Sequential(nn.Linear(args.mix_emb_dim, args.mix_emb_dim),
nn.ReLU(),
nn.Linear(args.mix_emb_dim, 1))
def forward(self, agent_qs, states):
entities, entity_mask = states
self.entity_shape = entities.shape
b, s, t, e = entities.shape
entities = entities.reshape(b * s, t, e)
entity_mask = entity_mask.reshape(b * s, t)
agent_mask = entity_mask[:, :self.args.n_agents]
agent_qs = agent_qs.view(b * s, 1, self.args.n_agents)
x = F.relu(self.entity_embedding(entities))
x = self.transformer(x, entity_mask.repeat(1, t).reshape(b * s, t, t))[:, :self.args.n_agents]
x = x.masked_fill(agent_mask.unsqueeze(2), 0)
if self.args.scale_q:
w_0 = self.hyper_w_0(x).masked_fill(agent_mask.unsqueeze(2), float('-inf'))
w_0 = self.softmax(w_0.view(-1, 1, self.args.n_agents))
agent_qs = torch.mul(w_0, agent_qs)
w_1 = torch.abs(self.hyper_w_1(x))
b_1 = self.hyper_b_1(x).masked_fill(agent_mask.unsqueeze(2), 0).mean(1, True)
h = F.elu(torch.bmm(agent_qs, w_1) + b_1)
w_2 = torch.abs(self.hyper_w_2(x)).masked_fill(agent_mask.unsqueeze(2), 0).mean(1, True)
b_2 = self.hyper_b_2(x).masked_fill(agent_mask.unsqueeze(2), 0).mean(1, True)
q_tot = torch.bmm(h, w_2.transpose(1, 2)) + b_2
q_tot = q_tot.view(b, s, 1)
return q_tot
def get_disentangle_loss(self):
b, s, t, e = self.entity_shape
loss = 0
for block in self.transformer.transformer_blocks:
loss += block.attn.cal_disentangle_loss()
loss = torch.mean(loss.reshape(b, s, t), dim=2)
return loss
| 2,747 | 36.643836 | 118 | py |
OPT | OPT-main/src/modules/agents/entity_opt_agent.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import EntityOPTTransformer
class EntityOPTAgent(nn.Module):
def __init__(self, input_shape, args):
super(EntityOPTAgent, self).__init__()
self.args = args
self.entity_shape = None
self.entity_embedding = nn.Linear(input_shape, args.emb_dim)
self.transformer = EntityOPTTransformer(args.n_blocks, args.emb_dim, args.n_heads, args.emb_dim * 4, args.rnn_hidden_dim)
self.fc1 = nn.Linear(args.emb_dim, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_actions)
def init_hidden(self):
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
entities, obs_mask, entity_mask = inputs
self.entity_shape = entities.shape
b, s, t, e = entities.shape
entities = entities.reshape(b * s, t, e)
obs_mask = obs_mask.reshape(b * s, t, t)
entity_mask = entity_mask.reshape(b * s, t)
agent_mask = entity_mask[:, :self.args.n_agents]
x = F.relu(self.entity_embedding(entities))
x = x.reshape(b, s, t, -1)
obs_mask = obs_mask.reshape(b, s, t, t)
agent_mask = agent_mask.reshape(b, s, self.args.n_agents)
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = []
for i in range(s):
out = self.transformer(x[:, i], h_in.reshape(b, self.args.n_agents, self.args.rnn_hidden_dim), obs_mask[:, i])[:, :self.args.n_agents]
out = out.masked_fill(agent_mask[:, i].unsqueeze(2), 0)
out = F.relu(self.fc1(out))
h_in = self.rnn(out.reshape(-1, self.args.rnn_hidden_dim), h_in)
h.append(h_in.reshape(b, self.args.n_agents, self.args.rnn_hidden_dim))
h = torch.stack(h, dim=1)
q = self.fc2(h)
q = q.reshape(b, s, self.args.n_agents, -1)
q = q.masked_fill(agent_mask.reshape(b, s, self.args.n_agents, 1), 0)
return q, h
def get_disentangle_loss(self):
b, s, t, e = self.entity_shape
loss = 0
for block in self.transformer.transformer_blocks:
loss += block.attn.cal_disentangle_loss()
loss = torch.mean(loss.reshape(s, b, t).permute(1, 0, 2), dim=2)
return loss
def get_cmi_loss(self):
b, s, t, e = self.entity_shape
entropy_loss = 0
kl_loss = 0
for block in self.transformer.transformer_blocks:
loss1, loss2 = block.attn.cal_cmi_loss()
entropy_loss += loss1
kl_loss += loss2
entropy_loss = torch.mean(entropy_loss.reshape(s, b, -1).permute(1, 0, 2), dim=2)
kl_loss = torch.mean(kl_loss.reshape(s, b, -1).permute(1, 0, 2), dim=2)
return entropy_loss, kl_loss
def set_pattern(self, use_pattern):
for block in self.transformer.transformer_blocks:
block.attn.set_pattern(use_pattern=use_pattern)
| 3,074 | 37.4375 | 146 | py |
OPT | OPT-main/src/modules/agents/token_opt_agent.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.layers import TokenOPTTransformer
class TokenOPTAgent(nn.Module):
def __init__(self, input_shape, args):
super(TokenOPTAgent, self).__init__()
self.args = args
self.x_shape = None
self.token_embedding = nn.Linear(input_shape, args.emb_dim)
self.transformer = TokenOPTTransformer(args.n_blocks, args.emb_dim, args.n_heads, args.emb_dim * 4, args.rnn_hidden_dim)
self.fc1 = nn.Linear(args.emb_dim, args.rnn_hidden_dim)
self.rnn = nn.GRUCell(args.rnn_hidden_dim, args.rnn_hidden_dim)
self.fc2 = nn.Linear(args.rnn_hidden_dim, args.n_fixed_actions)
def init_hidden(self):
return self.fc1.weight.new(1, self.args.rnn_hidden_dim).zero_()
def forward(self, inputs, hidden_state):
x, mask = inputs
self.x_shape = x.size()
b, t, e = x.size()
x = F.relu(self.token_embedding(x))
x = self.transformer.forward(x, hidden_state.reshape(-1, t, self.args.rnn_hidden_dim), mask)
x = F.relu(self.fc1(x)).reshape(-1, self.args.rnn_hidden_dim)
h_in = hidden_state.reshape(-1, self.args.rnn_hidden_dim)
h = self.rnn(x, h_in).reshape(b, t, self.args.rnn_hidden_dim)
q_self_actions = self.fc2(h[:, 0, :])
q = q_self_actions
q_mutual_actions = self.fc2(h[:, 1:self.args.n_mutual_actions + 1, :]).mean(2)
q = torch.cat((q, q_mutual_actions), 1)
return q, h
def get_disentangle_loss(self):
b, t, e = self.x_shape
loss = 0
for block in self.transformer.transformer_blocks:
loss += block.attn.cal_disentangle_loss()
loss = torch.mean(loss.reshape(-1, b, t).permute(1, 0, 2), dim=2)
return loss
def get_cmi_loss(self):
b, t, e = self.x_shape
entropy_loss = 0
kl_loss = 0
for block in self.transformer.transformer_blocks:
loss1, loss2 = block.attn.cal_cmi_loss()
entropy_loss += loss1
kl_loss += loss2
entropy_loss = entropy_loss.reshape(-1, b).permute(1, 0)
kl_loss = kl_loss.reshape(-1, b).permute(1, 0)
return entropy_loss, kl_loss
def set_pattern(self, use_pattern):
for block in self.transformer.transformer_blocks:
block.attn.set_pattern(use_pattern=use_pattern)
| 2,410 | 32.957746 | 128 | py |
OPT | OPT-main/src/modules/layers/entity_opt_attention.py | import torch
import torch.nn as nn
from entmax import sparsemax
import torch.nn.functional as F
from torch.distributions import kl_divergence
from torch.distributions import Categorical
class ScaledDotProductEntityOPTAttention(nn.Module):
def __init__(self, temperature, dropout_attn=0.0):
super().__init__()
self.temperature = temperature
self.dropout_attn = nn.Dropout(dropout_attn)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q / self.temperature, k.transpose(1, 2))
if mask is not None:
attn = attn.masked_fill(mask.bool(), float('-inf'))
inf_mask = mask.bool().all(dim=2).unsqueeze(2).repeat(1, 1, mask.size()[2])
attn = attn.masked_fill(inf_mask, 0)
attn = sparsemax(attn, dim=2)
if mask is not None:
attn = attn.masked_fill(inf_mask, 0)
attn = self.dropout_attn(attn)
out = torch.bmm(attn, v)
return out, attn
class MultiHeadEntityOPTAttention(nn.Module):
def __init__(self, emb_dim, n_heads, rnn_hidden_dim, dropout_attn=0.0, dropout_attn_out=0.0):
super().__init__()
self.emb_dim = emb_dim
self.n_heads = n_heads
self.fc_select = nn.Linear(emb_dim, n_heads)
self.fc_latent = nn.Linear(rnn_hidden_dim + emb_dim, n_heads)
self.w_k = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.w_q = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.w_v = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.attention = ScaledDotProductEntityOPTAttention(temperature=emb_dim ** 0.5, dropout_attn=dropout_attn)
self.dropout_attn_out = nn.Dropout(dropout_attn_out)
self.disentangle_x = []
self.disentangle_classifier = nn.Linear(emb_dim, n_heads)
self.disentangle_loss = nn.CrossEntropyLoss(reduce=False)
self.cmi_attn_select = []
self.cmi_attn_latent = []
self.use_pattern = False
def set_pattern(self, use_pattern):
self.disentangle_x = []
self.cmi_attn_select = []
self.cmi_attn_latent = []
self.use_pattern = use_pattern
def forward(self, x, h, mask=None):
b, t, e = x.size()
_, n_agents, _ = h.size()
n_heads = self.n_heads
k = self.w_k(x).view(b, t, n_heads, e)
q = self.w_q(x).view(b, t, n_heads, e)
v = self.w_v(x).view(b, t, n_heads, e)
k = k.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
q = q.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
v = v.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
if mask is not None:
mask = mask.repeat(n_heads, 1, 1)
out, _ = self.attention(q, k, v, mask=mask)
out = out.view(n_heads, b, t, e)
out = out.permute(1, 2, 0, 3).contiguous()
if self.use_pattern:
self.disentangle_x.append(out.view(b * t, n_heads, e))
out_agent = out.view(b, t, n_heads, e)[:, :n_agents]
out_other = out.view(b, t, n_heads, e)[:, n_agents:]
if mask is not None:
x = torch.bmm((~mask[:b]).float()[:, :n_agents], x)
attn_select = self.fc_select(x)
attn_select = F.softmax(attn_select, dim=2)
if self.use_pattern:
self.cmi_attn_select.append(attn_select.view(b * n_agents, n_heads))
attn_latent = self.fc_latent(torch.cat([h.detach(), x], dim=2))
attn_latent = F.softmax(attn_latent, dim=2)
self.cmi_attn_latent.append(attn_latent.view(b * n_agents, n_heads))
attn_select = attn_select.view(b, n_agents, 1, n_heads)
out_agent = torch.matmul(attn_select, out_agent)
out_agent = out_agent.squeeze(2)
out_other = torch.mean(out_other, dim=2)
out = torch.cat([out_agent, out_other], dim=1)
out = self.dropout_attn_out(out)
return out
def cal_disentangle_loss(self):
x = torch.cat(self.disentangle_x, dim=0)
dist = torch.bmm(x, x.permute(0, 2, 1))
positive_res = torch.exp(torch.diagonal(dist, dim1=-2, dim2=-1) - torch.max(dist, dim=2)[0])
negative_res = torch.sum(torch.exp(dist - torch.max(dist, dim=2)[0].unsqueeze(2)), dim=2)
loss = torch.mean(-torch.log(positive_res / negative_res), dim=1)
return loss
def cal_cmi_loss(self):
attn_select = torch.cat(self.cmi_attn_select, dim=0)
attn_latent = torch.cat(self.cmi_attn_latent, dim=0)
distribution_select = Categorical(probs=attn_select)
distribution_latent = Categorical(probs=attn_latent)
entropy_loss = distribution_select.entropy()
kl_loss = kl_divergence(distribution_select, distribution_latent)
return entropy_loss, kl_loss
class PositionWiseFeedForward(nn.Module):
def __init__(self, emb_dim, ff_emb_dim, dropout_ff=0.0):
super().__init__()
self.fc = nn.Sequential(
nn.Linear(emb_dim, ff_emb_dim),
nn.ReLU(),
nn.Linear(ff_emb_dim, emb_dim)
)
self.dropout = nn.Dropout(dropout_ff)
def forward(self, x):
out = self.fc(x)
out = self.dropout(out)
return out
class EntityOPTTransformerBlock(nn.Module):
def __init__(self, emb_dim, n_heads, ff_emb_dim, rnn_hidden_dim, dropout_attn=0.0, dropout_attn_out=0.0, dropout_ff=0.0):
super().__init__()
self.attn = MultiHeadEntityOPTAttention(emb_dim, n_heads, rnn_hidden_dim, dropout_attn, dropout_attn_out)
self.ff = PositionWiseFeedForward(emb_dim, ff_emb_dim, dropout_ff)
self.norm1 = nn.LayerNorm(emb_dim)
self.norm2 = nn.LayerNorm(emb_dim)
def forward(self, inputs):
x, h, mask = inputs
attn_x = self.attn(x, h, mask)
x = self.norm1(attn_x + x)
ff_x = self.ff(x)
x = self.norm2(ff_x + x)
return x, h, mask
class EntityOPTTransformer(nn.Module):
def __init__(self, n_blocks, emb_dim, n_heads, ff_emb_dim, rnn_hidden_dim,
dropout_attn=0.0, dropout_attn_out=0.0, dropout_ff=0.0):
super().__init__()
self.transformer_blocks = nn.Sequential(*[
EntityOPTTransformerBlock(emb_dim, n_heads, ff_emb_dim, rnn_hidden_dim, dropout_attn, dropout_attn_out, dropout_ff)
for _ in range(n_blocks)])
self.fc = nn.Linear(emb_dim, emb_dim)
def forward(self, x, h, mask=None):
out, _, _ = self.transformer_blocks((x, h, mask))
out = self.fc(out)
return out
| 6,584 | 32.426396 | 127 | py |
OPT | OPT-main/src/modules/layers/token_opt_attention.py | import torch
import torch.nn as nn
from entmax import sparsemax
import torch.nn.functional as F
from torch.distributions import kl_divergence
from torch.distributions import Categorical
class ScaledDotProductTokenOPTAttention(nn.Module):
def __init__(self, temperature, dropout_attn=0.0):
super().__init__()
self.temperature = temperature
self.dropout_attn = nn.Dropout(dropout_attn)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q / self.temperature, k.transpose(1, 2))
if mask is not None:
attn = attn.masked_fill(mask.bool(), float('-inf'))
# sparsemax cannot handle all -inf input
inf_mask = mask.bool().all(dim=2).unsqueeze(2).repeat(1, 1, mask.size()[2])
attn = attn.masked_fill(inf_mask, 0)
attn = sparsemax(attn, dim=2)
if mask is not None:
# if agent is inactive and all entities were masked
attn = attn.masked_fill(inf_mask, 0)
attn = self.dropout_attn(attn)
out = torch.bmm(attn, v)
return out, attn
class MultiHeadTokenOPTAttention(nn.Module):
def __init__(self, emb_dim, n_heads, rnn_hidden_dim, dropout_attn=0.0, dropout_attn_out=0.0):
super().__init__()
self.emb_dim = emb_dim
self.n_heads = n_heads
self.fc_select = nn.Linear(emb_dim, n_heads)
self.fc_latent = nn.Linear(rnn_hidden_dim + emb_dim, n_heads)
self.w_k = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.w_q = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.w_v = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.attention = ScaledDotProductTokenOPTAttention(temperature=emb_dim ** 0.5, dropout_attn=dropout_attn)
self.dropout_attn_out = nn.Dropout(dropout_attn_out)
self.disentangle_x = []
self.disentangle_classifier = nn.Linear(emb_dim, n_heads)
self.disentangle_loss = nn.CrossEntropyLoss(reduce=False)
self.cmi_attn_select = []
self.cmi_attn_latent = []
self.use_pattern = False
def set_pattern(self, use_pattern):
self.disentangle_x = []
self.cmi_attn_select = []
self.cmi_attn_latent = []
self.use_pattern = use_pattern
def forward(self, x, h, mask=None):
b, t, e = x.size()
n_heads = self.n_heads
k = self.w_k(x).view(b, t, n_heads, e)
q = self.w_q(x).view(b, t, n_heads, e)
v = self.w_v(x).view(b, t, n_heads, e)
k = k.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
q = q.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
v = v.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
if mask is not None:
mask = mask.repeat(n_heads, 1, 1)
out, _ = self.attention(q, k, v, mask=mask)
out = out.view(n_heads, b, t, e)
out = out.permute(1, 2, 0, 3).contiguous()
if self.use_pattern:
self.disentangle_x.append(out.view(b * t, n_heads, e))
out = out.view(b, t, n_heads, e)
attn_select = self.fc_select(torch.mean(x, dim=1))
attn_select = F.softmax(attn_select, dim=1)
if self.use_pattern:
self.cmi_attn_select.append(attn_select.view(b, n_heads))
attn_latent = self.fc_latent(torch.mean(torch.cat([h.detach(), x], dim=2), dim=1))
attn_latent = F.softmax(attn_latent, dim=1)
self.cmi_attn_latent.append(attn_latent.view(b, n_heads))
attn_select = attn_select.view(b, 1, 1, n_heads)
out = torch.matmul(attn_select, out)
out = out.squeeze(2)
out = self.dropout_attn_out(out)
return out
def cal_disentangle_loss(self):
x = torch.cat(self.disentangle_x, dim=0)
dist = torch.bmm(x, x.permute(0, 2, 1))
positive_res = torch.exp(torch.diagonal(dist, dim1=-2, dim2=-1) - torch.max(dist, dim=2)[0])
negative_res = torch.sum(torch.exp(dist - torch.max(dist, dim=2)[0].unsqueeze(2)), dim=2)
loss = torch.mean(-torch.log(positive_res / negative_res), dim=1)
return loss
def cal_cmi_loss(self):
attn_select = torch.cat(self.cmi_attn_select, dim=0)
attn_latent = torch.cat(self.cmi_attn_latent, dim=0)
distribution_select = Categorical(probs=attn_select)
distribution_latent = Categorical(probs=attn_latent)
entropy_loss = distribution_select.entropy()
kl_loss = kl_divergence(distribution_select, distribution_latent)
return entropy_loss, kl_loss
class PositionWiseFeedForward(nn.Module):
def __init__(self, emb_dim, ff_emb_dim, dropout_ff=0.0):
super().__init__()
self.fc = nn.Sequential(
nn.Linear(emb_dim, ff_emb_dim),
nn.ReLU(),
nn.Linear(ff_emb_dim, emb_dim)
)
self.dropout = nn.Dropout(dropout_ff)
def forward(self, x):
out = self.fc(x)
out = self.dropout(out)
return out
class TokenOPTTransformerBlock(nn.Module):
def __init__(self, emb_dim, n_heads, ff_emb_dim, rnn_hidden_dim, dropout_attn=0.0, dropout_attn_out=0.0, dropout_ff=0.0):
super().__init__()
self.attn = MultiHeadTokenOPTAttention(emb_dim, n_heads, rnn_hidden_dim, dropout_attn, dropout_attn_out)
self.ff = PositionWiseFeedForward(emb_dim, ff_emb_dim, dropout_ff)
self.norm1 = nn.LayerNorm(emb_dim)
self.norm2 = nn.LayerNorm(emb_dim)
def forward(self, inputs):
x, h, mask = inputs
attn_x = self.attn(x, h, mask)
x = self.norm1(attn_x + x)
ff_x = self.ff(x)
x = self.norm2(ff_x + x)
return x, h, mask
class TokenOPTTransformer(nn.Module):
def __init__(self, n_blocks, emb_dim, n_heads, ff_emb_dim, rnn_hidden_dim,
dropout_attn=0.0, dropout_attn_out=0.0, dropout_ff=0.0):
super().__init__()
self.transformer_blocks = nn.Sequential(*[
TokenOPTTransformerBlock(emb_dim, n_heads, ff_emb_dim, rnn_hidden_dim, dropout_attn, dropout_attn_out, dropout_ff)
for _ in range(n_blocks)])
self.fc = nn.Linear(emb_dim, emb_dim)
def forward(self, x, h, mask=None):
out, _, _ = self.transformer_blocks((x, h, mask))
out = self.fc(out)
return out
| 6,357 | 32.287958 | 126 | py |
OPT | OPT-main/src/modules/layers/opt_attention.py | import torch
import torch.nn as nn
from entmax import sparsemax
import torch.nn.functional as F
class ScaledDotProductOPTAttention(nn.Module):
def __init__(self, temperature, dropout_attn=0.0):
super().__init__()
self.temperature = temperature
self.dropout_attn = nn.Dropout(dropout_attn)
def forward(self, q, k, v, mask=None):
attn = torch.bmm(q / self.temperature, k.transpose(1, 2))
if mask is not None:
attn = attn.masked_fill(mask.bool(), float('-inf'))
inf_mask = mask.bool().all(dim=2).unsqueeze(2).repeat(1, 1, mask.size()[2])
attn = attn.masked_fill(inf_mask, 0)
attn = sparsemax(attn, dim=2)
if mask is not None:
attn = attn.masked_fill(inf_mask, 0)
attn = self.dropout_attn(attn)
out = torch.bmm(attn, v)
return out, attn
class MultiHeadOPTAttention(nn.Module):
def __init__(self, emb_dim, n_heads, dropout_attn=0.0, dropout_attn_out=0.0):
super().__init__()
self.emb_dim = emb_dim
self.n_heads = n_heads
self.fc_select = nn.Linear(emb_dim, n_heads)
self.w_k = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.w_q = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.w_v = nn.Linear(emb_dim, n_heads * emb_dim, bias=False)
self.attention = ScaledDotProductOPTAttention(temperature=emb_dim ** 0.5, dropout_attn=dropout_attn)
self.dropout_attn_out = nn.Dropout(dropout_attn_out)
self.disentangle_x = None
self.disentangle_classifier = nn.Linear(emb_dim, n_heads)
self.disentangle_loss = nn.CrossEntropyLoss(reduce=False)
def forward(self, x, mask=None):
b, t, e = x.size()
n_heads = self.n_heads
k = self.w_k(x).view(b, t, n_heads, e)
q = self.w_q(x).view(b, t, n_heads, e)
v = self.w_v(x).view(b, t, n_heads, e)
k = k.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
q = q.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
v = v.permute(2, 0, 1, 3).contiguous().view(n_heads * b, t, e)
if mask is not None:
mask = mask.repeat(n_heads, 1, 1)
out, _ = self.attention(q, k, v, mask=mask)
out = out.view(n_heads, b, t, e)
out = out.permute(1, 2, 0, 3).contiguous()
self.disentangle_x = out.view(b * t, n_heads, e)
out = out.view(b, t, n_heads, e)
attn_select = self.fc_select(torch.mean(x, dim=1))
attn_select = F.softmax(attn_select, dim=1)
attn_select = attn_select.view(b, 1, 1, n_heads)
out = torch.matmul(attn_select, out)
out = out.squeeze(2)
out = self.dropout_attn_out(out)
return out
def cal_disentangle_loss(self):
x = self.disentangle_x
dist = torch.bmm(x, x.permute(0, 2, 1))
positive_res = torch.exp(torch.diagonal(dist, dim1=-2, dim2=-1) - torch.max(dist, dim=2)[0])
negative_res = torch.sum(torch.exp(dist - torch.max(dist, dim=2)[0].unsqueeze(2)), dim=2)
loss = torch.mean(-torch.log(positive_res / negative_res), dim=1)
return loss
class PositionWiseFeedForward(nn.Module):
def __init__(self, emb_dim, ff_emb_dim, dropout_ff=0.0):
super().__init__()
self.fc = nn.Sequential(
nn.Linear(emb_dim, ff_emb_dim),
nn.ReLU(),
nn.Linear(ff_emb_dim, emb_dim)
)
self.dropout = nn.Dropout(dropout_ff)
def forward(self, x):
out = self.fc(x)
out = self.dropout(out)
return out
class OPTTransformerBlock(nn.Module):
def __init__(self, emb_dim, n_heads, ff_emb_dim, dropout_attn=0.0, dropout_attn_out=0.0, dropout_ff=0.0):
super().__init__()
self.attn = MultiHeadOPTAttention(emb_dim, n_heads, dropout_attn, dropout_attn_out)
self.ff = PositionWiseFeedForward(emb_dim, ff_emb_dim, dropout_ff)
self.norm1 = nn.LayerNorm(emb_dim)
self.norm2 = nn.LayerNorm(emb_dim)
def forward(self, inputs):
x, mask = inputs
attn_x = self.attn(x, mask)
x = self.norm1(attn_x + x)
ff_x = self.ff(x)
x = self.norm2(ff_x + x)
return x, mask
class OPTTransformer(nn.Module):
def __init__(self, n_blocks, emb_dim, n_heads, ff_emb_dim,
dropout_attn=0.0, dropout_attn_out=0.0, dropout_ff=0.0):
super().__init__()
self.transformer_blocks = nn.Sequential(*[
OPTTransformerBlock(emb_dim, n_heads, ff_emb_dim, dropout_attn, dropout_attn_out, dropout_ff)
for _ in range(n_blocks)])
self.fc = nn.Linear(emb_dim, emb_dim)
def forward(self, x, mask=None):
out, _ = self.transformer_blocks((x, mask))
out = self.fc(out)
return out
| 4,856 | 29.54717 | 109 | py |
OPT | OPT-main/src/components/episode_buffer.py | import torch as th
import numpy as np
from types import SimpleNamespace as SN
class EpisodeBatch:
def __init__(self,
scheme,
groups,
batch_size,
max_seq_length,
data=None,
preprocess=None,
device="cpu"):
self.scheme = scheme.copy()
self.groups = groups
self.batch_size = batch_size
self.max_seq_length = max_seq_length
self.preprocess = {} if preprocess is None else preprocess
self.device = device
if data is not None:
self.data = data
else:
self.data = SN()
self.data.transition_data = {}
self.data.episode_data = {}
self._setup_data(self.scheme, self.groups, batch_size, max_seq_length, self.preprocess)
def _setup_data(self, scheme, groups, batch_size, max_seq_length, preprocess):
if preprocess is not None:
for k in preprocess:
assert k in scheme
new_k = preprocess[k][0]
transforms = preprocess[k][1]
vshape = self.scheme[k]["vshape"]
dtype = self.scheme[k]["dtype"]
for transform in transforms:
vshape, dtype = transform.infer_output_info(vshape, dtype)
self.scheme[new_k] = {
"vshape": vshape,
"dtype": dtype
}
if "group" in self.scheme[k]:
self.scheme[new_k]["group"] = self.scheme[k]["group"]
if "episode_const" in self.scheme[k]:
self.scheme[new_k]["episode_const"] = self.scheme[k]["episode_const"]
assert "filled" not in scheme, '"filled" is a reserved key for masking.'
scheme.update({
"filled": {"vshape": (1,), "dtype": th.long},
})
for field_key, field_info in scheme.items():
assert "vshape" in field_info, "Scheme must define vshape for {}".format(field_key)
vshape = field_info["vshape"]
episode_const = field_info.get("episode_const", False)
group = field_info.get("group", None)
dtype = field_info.get("dtype", th.float32)
if isinstance(vshape, int):
vshape = (vshape,)
if group:
assert group in groups, "Group {} must have its number of members defined in _groups_".format(group)
shape = (groups[group], *vshape)
else:
shape = vshape
if episode_const:
self.data.episode_data[field_key] = th.zeros((batch_size, *shape), dtype=dtype, device=self.device)
else:
self.data.transition_data[field_key] = th.zeros((batch_size, max_seq_length, *shape), dtype=dtype, device=self.device)
def extend(self, scheme, groups=None):
self._setup_data(scheme, self.groups if groups is None else groups, self.batch_size, self.max_seq_length)
def to(self, device):
for k, v in self.data.transition_data.items():
self.data.transition_data[k] = v.to(device)
for k, v in self.data.episode_data.items():
self.data.episode_data[k] = v.to(device)
self.device = device
def update(self, data, bs=slice(None), ts=slice(None), mark_filled=True):
slices = self._parse_slices((bs, ts))
for k, v in data.items():
if k in self.data.transition_data:
target = self.data.transition_data
if mark_filled:
target["filled"][slices] = 1
mark_filled = False
_slices = slices
elif k in self.data.episode_data:
target = self.data.episode_data
_slices = slices[0]
else:
raise KeyError("{} not found in transition or episode data".format(k))
dtype = self.scheme[k].get("dtype", th.float32)
v = th.tensor(v, dtype=dtype, device=self.device)
self._check_safe_view(v, target[k][_slices])
target[k][_slices] = v.view_as(target[k][_slices])
if k in self.preprocess:
new_k = self.preprocess[k][0]
v = target[k][_slices]
for transform in self.preprocess[k][1]:
v = transform.transform(v)
target[new_k][_slices] = v.view_as(target[new_k][_slices])
def _check_safe_view(self, v, dest):
idx = len(v.shape) - 1
for s in dest.shape[::-1]:
if v.shape[idx] != s:
if s != 1:
raise ValueError("Unsafe reshape of {} to {}".format(v.shape, dest.shape))
else:
idx -= 1
def __getitem__(self, item):
if isinstance(item, str):
if item in self.data.episode_data:
return self.data.episode_data[item]
elif item in self.data.transition_data:
return self.data.transition_data[item]
else:
raise ValueError
elif isinstance(item, tuple) and all([isinstance(it, str) for it in item]):
new_data = self._new_data_sn()
for key in item:
if key in self.data.transition_data:
new_data.transition_data[key] = self.data.transition_data[key]
elif key in self.data.episode_data:
new_data.episode_data[key] = self.data.episode_data[key]
else:
raise KeyError("Unrecognised key {}".format(key))
# Update the scheme to only have the requested keys
new_scheme = {key: self.scheme[key] for key in item}
new_groups = {self.scheme[key]["group"]: self.groups[self.scheme[key]["group"]]
for key in item if "group" in self.scheme[key]}
ret = EpisodeBatch(new_scheme, new_groups, self.batch_size, self.max_seq_length, data=new_data, device=self.device)
return ret
else:
item = self._parse_slices(item)
new_data = self._new_data_sn()
for k, v in self.data.transition_data.items():
new_data.transition_data[k] = v[item]
for k, v in self.data.episode_data.items():
new_data.episode_data[k] = v[item[0]]
ret_bs = self._get_num_items(item[0], self.batch_size)
ret_max_t = self._get_num_items(item[1], self.max_seq_length)
ret = EpisodeBatch(self.scheme, self.groups, ret_bs, ret_max_t, data=new_data, device=self.device)
return ret
def _get_num_items(self, indexing_item, max_size):
if isinstance(indexing_item, list) or isinstance(indexing_item, np.ndarray):
return len(indexing_item)
elif isinstance(indexing_item, slice):
_range = indexing_item.indices(max_size)
return 1 + (_range[1] - _range[0] - 1)//_range[2]
def _new_data_sn(self):
new_data = SN()
new_data.transition_data = {}
new_data.episode_data = {}
return new_data
def _parse_slices(self, items):
parsed = []
# Only batch slice given, add full time slice
if (isinstance(items, slice) # slice a:b
or isinstance(items, int) # int i
or (isinstance(items, (list, np.ndarray, th.LongTensor, th.cuda.LongTensor))) # [a,b,c]
):
items = (items, slice(None))
# Need the time indexing to be contiguous
if isinstance(items[1], list):
raise IndexError("Indexing across Time must be contiguous")
for item in items:
#TODO: stronger checks to ensure only supported options get through
if isinstance(item, int):
# Convert single indices to slices
parsed.append(slice(item, item+1))
else:
# Leave slices and lists as is
parsed.append(item)
return parsed
def max_t_filled(self):
return th.sum(self.data.transition_data["filled"], 1).max(0)[0]
def __repr__(self):
return "EpisodeBatch. Batch Size:{} Max_seq_len:{} Keys:{} Groups:{}".format(self.batch_size,
self.max_seq_length,
self.scheme.keys(),
self.groups.keys())
class ReplayBuffer(EpisodeBatch):
def __init__(self, scheme, groups, buffer_size, max_seq_length, preprocess=None, device="cpu"):
super(ReplayBuffer, self).__init__(scheme, groups, buffer_size, max_seq_length, preprocess=preprocess, device=device)
self.buffer_size = buffer_size # same as self.batch_size but more explicit
self.buffer_index = 0
self.episodes_in_buffer = 0
def insert_episode_batch(self, ep_batch):
if self.buffer_index + ep_batch.batch_size <= self.buffer_size:
self.update(ep_batch.data.transition_data,
slice(self.buffer_index, self.buffer_index + ep_batch.batch_size),
slice(0, ep_batch.max_seq_length),
mark_filled=False)
self.update(ep_batch.data.episode_data,
slice(self.buffer_index, self.buffer_index + ep_batch.batch_size))
self.buffer_index = (self.buffer_index + ep_batch.batch_size)
self.episodes_in_buffer = max(self.episodes_in_buffer, self.buffer_index)
self.buffer_index = self.buffer_index % self.buffer_size
assert self.buffer_index < self.buffer_size
else:
buffer_left = self.buffer_size - self.buffer_index
self.insert_episode_batch(ep_batch[0:buffer_left, :])
self.insert_episode_batch(ep_batch[buffer_left:, :])
def can_sample(self, batch_size):
return self.episodes_in_buffer >= batch_size
def sample(self, batch_size):
assert self.can_sample(batch_size)
if self.episodes_in_buffer == batch_size:
return self[:batch_size]
else:
# Uniform sampling only atm
ep_ids = np.random.choice(self.episodes_in_buffer, batch_size, replace=False)
return self[ep_ids]
def __repr__(self):
return "ReplayBuffer. {}/{} episodes. Keys:{} Groups:{}".format(self.episodes_in_buffer,
self.buffer_size,
self.scheme.keys(),
self.groups.keys())
| 10,894 | 42.75502 | 134 | py |
OPT | OPT-main/src/components/action_selectors.py | import torch as th
from torch.distributions import Categorical
from .epsilon_schedules import DecayThenFlatSchedule
REGISTRY = {}
class MultinomialActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
self.test_greedy = getattr(args, "test_greedy", True)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
masked_policies = agent_inputs.clone()
masked_policies[avail_actions == 0.0] = 0.0
self.epsilon = self.schedule.eval(t_env)
if test_mode and self.test_greedy:
picked_actions = masked_policies.max(dim=2)[1]
else:
picked_actions = Categorical(masked_policies).sample().long()
return picked_actions
REGISTRY["multinomial"] = MultinomialActionSelector
class EpsilonGreedyActionSelector():
def __init__(self, args):
self.args = args
self.schedule = DecayThenFlatSchedule(args.epsilon_start, args.epsilon_finish, args.epsilon_anneal_time,
decay="linear")
self.epsilon = self.schedule.eval(0)
def select_action(self, agent_inputs, avail_actions, t_env, test_mode=False):
# Assuming agent_inputs is a batch of Q-Values for each agent bav
self.epsilon = self.schedule.eval(t_env)
if test_mode:
# Greedy action selection only
self.epsilon = 0.0
# mask actions that are excluded from selection
masked_q_values = agent_inputs.clone()
masked_q_values[avail_actions == 0.0] = -float("inf") # should never be selected!
random_numbers = th.rand_like(agent_inputs[:, :, 0])
pick_random = (random_numbers < self.epsilon).long()
random_actions = Categorical(avail_actions.float()).sample().long()
picked_actions = pick_random * random_actions + (1 - pick_random) * masked_q_values.max(dim=2)[1]
return picked_actions
REGISTRY["epsilon_greedy"] = EpsilonGreedyActionSelector
| 2,225 | 32.727273 | 112 | py |
OPT | OPT-main/src/components/transforms.py | import torch as th
class Transform:
def transform(self, tensor):
raise NotImplementedError
def infer_output_info(self, vshape_in, dtype_in):
raise NotImplementedError
class OneHot(Transform):
def __init__(self, out_dim):
self.out_dim = out_dim
def transform(self, tensor):
y_onehot = tensor.new(*tensor.shape[:-1], self.out_dim).zero_()
y_onehot.scatter_(-1, tensor.long(), 1)
return y_onehot.float()
def infer_output_info(self, vshape_in, dtype_in):
return (self.out_dim,), th.float32 | 568 | 24.863636 | 71 | py |
OPT | OPT-main/src/runners/parallel_runner.py | from envs import REGISTRY as env_REGISTRY
from functools import partial
from components.episode_buffer import EpisodeBatch
from multiprocessing import Pipe, Process
import numpy as np
import torch as th
# Based (very) heavily on SubprocVecEnv from OpenAI Baselines
# https://github.com/openai/baselines/blob/master/baselines/common/vec_env/subproc_vec_env.py
class ParallelRunner:
def __init__(self, args, logger):
self.args = args
self.logger = logger
self.batch_size = self.args.batch_size_run
# Make subprocesses for the envs
self.parent_conns, self.worker_conns = zip(*[Pipe() for _ in range(self.batch_size)])
env_fn = env_REGISTRY[self.args.env]
self.ps = [Process(target=env_worker, args=(worker_conn, CloudpickleWrapper(partial(env_fn, **self.args.env_args))))
for worker_conn in self.worker_conns]
for p in self.ps:
p.daemon = True
p.start()
self.parent_conns[0].send(("get_env_info", None))
self.env_info = self.parent_conns[0].recv()
self.episode_limit = self.env_info["episode_limit"]
self.t = 0
self.t_env = 0
self.train_returns = []
self.test_returns = []
self.train_stats = {}
self.test_stats = {}
self.log_train_stats_t = -100000
def setup(self, scheme, groups, preprocess, mac):
self.new_batch = partial(EpisodeBatch, scheme, groups, self.batch_size, self.episode_limit + 1,
preprocess=preprocess, device=self.args.device)
self.mac = mac
self.scheme = scheme
self.groups = groups
self.preprocess = preprocess
def get_env_info(self):
return self.env_info
def save_replay(self):
pass
def close_env(self):
for parent_conn in self.parent_conns:
parent_conn.send(("close", None))
def reset(self):
self.batch = self.new_batch()
# Reset the envs
for parent_conn in self.parent_conns:
parent_conn.send(("reset", None))
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Get the obs, state and avail_actions back
for parent_conn in self.parent_conns:
data = parent_conn.recv()
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
self.batch.update(pre_transition_data, ts=0)
self.t = 0
self.env_steps_this_run = 0
def run(self, test_mode=False):
self.reset()
all_terminated = False
episode_returns = [0 for _ in range(self.batch_size)]
episode_lengths = [0 for _ in range(self.batch_size)]
self.mac.init_hidden(batch_size=self.batch_size)
terminated = [False for _ in range(self.batch_size)]
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
final_env_infos = [] # may store extra stats like battle won. this is filled in ORDER OF TERMINATION
while True:
# Pass the entire batch of experiences up till now to the agents
# Receive the actions for each agent at this timestep in a batch for each un-terminated env
actions = self.mac.select_actions(self.batch, t_ep=self.t, t_env=self.t_env, bs=envs_not_terminated, test_mode=test_mode)
cpu_actions = actions.to("cpu").numpy()
# Update the actions taken
actions_chosen = {
"actions": actions.unsqueeze(1)
}
self.batch.update(actions_chosen, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Send actions to each env
action_idx = 0
for idx, parent_conn in enumerate(self.parent_conns):
if idx in envs_not_terminated: # We produced actions for this env
if not terminated[idx]: # Only send the actions to the env if it hasn't terminated
parent_conn.send(("step", cpu_actions[action_idx]))
action_idx += 1 # actions is not a list over every env
# Update envs_not_terminated
envs_not_terminated = [b_idx for b_idx, termed in enumerate(terminated) if not termed]
all_terminated = all(terminated)
if all_terminated:
break
# Post step data we will insert for the current timestep
post_transition_data = {
"reward": [],
"terminated": []
}
# Data for the next step we will insert in order to select an action
pre_transition_data = {
"state": [],
"avail_actions": [],
"obs": []
}
# Receive data back for each unterminated env
for idx, parent_conn in enumerate(self.parent_conns):
if not terminated[idx]:
data = parent_conn.recv()
# Remaining data for this current timestep
post_transition_data["reward"].append((data["reward"],))
episode_returns[idx] += data["reward"]
episode_lengths[idx] += 1
if not test_mode:
self.env_steps_this_run += 1
env_terminated = False
if data["terminated"]:
final_env_infos.append(data["info"])
if data["terminated"] and not data["info"].get("episode_limit", False):
env_terminated = True
terminated[idx] = data["terminated"]
post_transition_data["terminated"].append((env_terminated,))
# Data for the next timestep needed to select an action
pre_transition_data["state"].append(data["state"])
pre_transition_data["avail_actions"].append(data["avail_actions"])
pre_transition_data["obs"].append(data["obs"])
# Add post_transiton data into the batch
self.batch.update(post_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=False)
# Move onto the next timestep
self.t += 1
# Add the pre-transition data
self.batch.update(pre_transition_data, bs=envs_not_terminated, ts=self.t, mark_filled=True)
if not test_mode:
self.t_env += self.env_steps_this_run
# Get stats back for each env
for parent_conn in self.parent_conns:
parent_conn.send(("get_stats",None))
env_stats = []
for parent_conn in self.parent_conns:
env_stat = parent_conn.recv()
env_stats.append(env_stat)
cur_stats = self.test_stats if test_mode else self.train_stats
cur_returns = self.test_returns if test_mode else self.train_returns
log_prefix = "test_" if test_mode else ""
infos = [cur_stats] + final_env_infos
cur_stats.update({k: sum(d.get(k, 0) for d in infos) for k in set.union(*[set(d) for d in infos])})
cur_stats["n_episodes"] = self.batch_size + cur_stats.get("n_episodes", 0)
cur_stats["ep_length"] = sum(episode_lengths) + cur_stats.get("ep_length", 0)
cur_returns.extend(episode_returns)
n_test_runs = max(1, self.args.test_nepisode // self.batch_size) * self.batch_size
if test_mode and (len(self.test_returns) == n_test_runs):
self._log(cur_returns, cur_stats, log_prefix)
elif self.t_env - self.log_train_stats_t >= self.args.runner_log_interval:
self._log(cur_returns, cur_stats, log_prefix)
if hasattr(self.mac.action_selector, "epsilon"):
self.logger.log_stat("epsilon", self.mac.action_selector.epsilon, self.t_env)
self.log_train_stats_t = self.t_env
return self.batch
def _log(self, returns, stats, prefix):
self.logger.log_stat(prefix + "return_mean", np.mean(returns), self.t_env)
self.logger.log_stat(prefix + "return_std", np.std(returns), self.t_env)
returns.clear()
for k, v in stats.items():
if k != "n_episodes":
self.logger.log_stat(prefix + k + "_mean" , v/stats["n_episodes"], self.t_env)
stats.clear()
def env_worker(remote, env_fn):
# Make environment
env = env_fn.x()
while True:
cmd, data = remote.recv()
if cmd == "step":
actions = data
# Take a step in the environment
reward, terminated, env_info = env.step(actions)
# Return the observations, avail_actions and state to make the next action
state = env.get_state()
avail_actions = env.get_avail_actions()
obs = env.get_obs()
remote.send({
# Data for the next timestep needed to pick an action
"state": state,
"avail_actions": avail_actions,
"obs": obs,
# Rest of the data for the current timestep
"reward": reward,
"terminated": terminated,
"info": env_info
})
elif cmd == "reset":
env.reset()
remote.send({
"state": env.get_state(),
"avail_actions": env.get_avail_actions(),
"obs": env.get_obs()
})
elif cmd == "close":
env.close()
remote.close()
break
elif cmd == "get_env_info":
remote.send(env.get_env_info())
elif cmd == "get_stats":
remote.send(env.get_stats())
else:
raise NotImplementedError
class CloudpickleWrapper():
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
| 10,322 | 37.518657 | 133 | py |
OPT | OPT-main/src/controllers/basic_controller.py | from modules.agents import REGISTRY as agent_REGISTRY
from components.action_selectors import REGISTRY as action_REGISTRY
import torch as th
# This multi-agent controller shares parameters between agents
class BasicMAC:
def __init__(self, scheme, groups, args):
self.n_agents = args.n_agents
self.args = args
input_shape = self._get_input_shape(scheme)
self._build_agents(input_shape)
self.agent_output_type = args.agent_output_type
self.action_selector = action_REGISTRY[args.action_selector](args)
self.hidden_states = None
def select_actions(self, ep_batch, t_ep, t_env, bs=slice(None), test_mode=False):
# Only select actions for the selected batch elements in bs
avail_actions = ep_batch["avail_actions"][:, t_ep]
agent_outputs = self.forward(ep_batch, t_ep, test_mode=test_mode)
chosen_actions = self.action_selector.select_action(agent_outputs[bs], avail_actions[bs], t_env, test_mode=test_mode)
return chosen_actions
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
avail_actions = ep_batch["avail_actions"][:, t]
agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
# Softmax the agent outputs if they're policy logits
if self.agent_output_type == "pi_logits":
if getattr(self.args, "mask_before_softmax", True):
# Make the logits for unavailable actions very negative to minimise their affect on the softmax
reshaped_avail_actions = avail_actions.reshape(ep_batch.batch_size * self.n_agents, -1)
agent_outs[reshaped_avail_actions == 0] = -1e10
agent_outs = th.nn.functional.softmax(agent_outs, dim=-1)
if not test_mode:
# Epsilon floor
epsilon_action_num = agent_outs.size(-1)
if getattr(self.args, "mask_before_softmax", True):
# With probability epsilon, we will pick an available action uniformly
epsilon_action_num = reshaped_avail_actions.sum(dim=1, keepdim=True).float()
agent_outs = ((1 - self.action_selector.epsilon) * agent_outs
+ th.ones_like(agent_outs) * self.action_selector.epsilon/epsilon_action_num)
if getattr(self.args, "mask_before_softmax", True):
# Zero out the unavailable actions
agent_outs[reshaped_avail_actions == 0] = 0.0
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
self.hidden_states = self.agent.init_hidden().unsqueeze(0).expand(batch_size, self.n_agents, -1) # bav
def parameters(self):
return self.agent.parameters()
def load_state(self, other_mac):
self.agent.load_state_dict(other_mac.agent.state_dict())
def cuda(self):
self.agent.cuda()
def cpu(self):
self.agent.cpu()
def eval(self):
self.agent.eval()
def train(self):
self.agent.train()
def save_models(self, path):
th.save(self.agent.state_dict(), "{}/agent.th".format(path))
def load_models(self, path):
self.agent.load_state_dict(th.load("{}/agent.th".format(path), map_location=lambda storage, loc: storage))
def _build_agents(self, input_shape):
self.agent = agent_REGISTRY[self.args.agent](input_shape, self.args)
def _build_inputs(self, batch, t):
# Assumes homogenous agents with flat observations.
# Other MACs might want to e.g. delegate building inputs to each agent
bs = batch.batch_size
inputs = []
inputs.append(batch["obs"][:, t]) # b1av
if self.args.obs_last_action:
if t == 0:
inputs.append(th.zeros_like(batch["actions_onehot"][:, t]))
else:
inputs.append(batch["actions_onehot"][:, t-1])
if self.args.obs_agent_id:
inputs.append(th.eye(self.n_agents, device=batch.device).unsqueeze(0).expand(bs, -1, -1))
inputs = th.cat([x.reshape(bs*self.n_agents, -1) for x in inputs], dim=1)
return inputs
def _get_input_shape(self, scheme):
input_shape = scheme["obs"]["vshape"]
if self.args.obs_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
if self.args.obs_agent_id:
input_shape += self.n_agents
return input_shape
| 4,552 | 40.018018 | 125 | py |
OPT | OPT-main/src/controllers/entity_controller.py | from .basic_controller import BasicMAC
import torch as th
# This multi-agent controller shares parameters between agents
# takes entities + observation masks as input
class EntityMAC(BasicMAC):
def __init__(self, scheme, groups, args):
super(EntityMAC, self).__init__(scheme, groups, args)
def forward(self, ep_batch, t, test_mode=False):
if t is None:
t = slice(0, ep_batch["entities"].shape[1])
single_step = False
else:
t = slice(t, t + 1)
single_step = True
agent_inputs = self._build_inputs(ep_batch, t)
agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
if self.agent_output_type == "pi_logits":
assert False, "unsupported agent_output_type"
if single_step:
return agent_outs.squeeze(1)
return agent_outs
def _build_inputs(self, batch, t):
# Assumes homogenous agents with entity + observation mask inputs.
bs = batch.batch_size
entities = []
entities.append(batch["entities"][:, t]) # bs, ts, n_entities, vshape
if self.args.entity_last_action:
last_action = th.zeros((bs, t.stop - t.start, self.args.n_entities, self.args.n_actions),
device=batch.device, dtype=batch["entities"].dtype)
if t.start == 0:
last_action[:, 1:, :self.args.n_agents] = batch["actions_onehot"][:, slice(0, t.stop - 1)]
else:
last_action[:, :, :self.args.n_agents] = batch["actions_onehot"][:, slice(t.start - 1, t.stop - 1)]
entities.append(last_action)
entities = th.cat(entities, dim=3)
return entities, batch["obs_mask"][:, t], batch["entity_mask"][:, t]
def _get_input_shape(self, scheme):
input_shape = scheme["entities"]["vshape"]
if self.args.entity_last_action:
input_shape += scheme["actions_onehot"]["vshape"][0]
return input_shape
| 2,021 | 38.647059 | 115 | py |
OPT | OPT-main/src/controllers/token_controller.py | from .basic_controller import BasicMAC
import torch as th
# This multi-agent controller shares parameters between agents
class TokenMAC(BasicMAC):
def __init__(self, scheme, groups, args):
super(TokenMAC, self).__init__(scheme, groups, args)
def forward(self, ep_batch, t, test_mode=False):
agent_inputs = self._build_inputs(ep_batch, t)
agent_outs, self.hidden_states = self.agent(agent_inputs, self.hidden_states)
if self.agent_output_type == "pi_logits":
assert False, "unsupported agent_output_type"
return agent_outs.view(ep_batch.batch_size, self.n_agents, -1)
def init_hidden(self, batch_size):
if self.args.agent in ['token_dyan']:
self.hidden_states = self.agent.init_hidden().unsqueeze(0).expand(batch_size, self.n_agents, -1)
elif self.args.agent in ['token_updet']:
self.hidden_states = self.agent.init_hidden().unsqueeze(0).expand(batch_size, self.n_agents, 1, -1)
else:
self.hidden_states = self.agent.init_hidden().unsqueeze(0).expand(batch_size, self.n_agents, self.args.n_tokens, -1)
def _build_inputs(self, batch, t):
# currently we only support battles with marines (e.g. 3m 8m 5m_vs_6m)
# you can implement your own with any other agent type.
inputs = []
raw_obs = batch["obs"][:, t]
reshaped_obs = raw_obs.reshape(-1, self.args.n_tokens, self.args.obs_token_dim)
inputs.append(reshaped_obs)
inputs = th.cat(inputs, dim=1)
mask = None
return inputs, mask
def _get_input_shape(self, scheme):
input_shape = self.args.obs_token_dim
return input_shape
| 1,697 | 39.428571 | 128 | py |
OPT | OPT-main/src/utils/rl_utils.py | import torch as th
def build_td_lambda_targets(rewards, terminated, mask, target_qs, n_agents, gamma, td_lambda):
# Assumes <target_qs > in B*T*A and <reward >, <terminated >, <mask > in (at least) B*T-1*1
# Initialise last lambda -return for not terminated episodes
ret = target_qs.new_zeros(*target_qs.shape)
ret[:, -1] = target_qs[:, -1] * (1 - th.sum(terminated, dim=1))
# Backwards recursive update of the "forward view"
for t in range(ret.shape[1] - 2, -1, -1):
ret[:, t] = td_lambda * gamma * ret[:, t + 1] + mask[:, t] \
* (rewards[:, t] + (1 - td_lambda) * gamma * target_qs[:, t + 1] * (1 - terminated[:, t]))
# Returns lambda-return from t=0 to t=T-1, i.e. in B*T-1*A
return ret[:, 0:-1]
| 774 | 47.4375 | 110 | py |
OPT | OPT-main/src/learners/entity_opt_q_learner.py | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.entity_opt_qmix import EntityOPTQMixer
import torch as th
from torch.optim import RMSprop, Adam
class QLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "entity_opt_qmix":
self.mixer = EntityOPTQMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
if self.args.optimizer == 'adam':
self.optimiser = Adam(params=self.params, lr=args.lr)
else:
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def _build_inputs(self, batch):
entities = []
bs, ts, ne, ed = batch["entities"].shape
entities.append(batch["entities"])
if self.args.entity_last_action:
last_actions = th.zeros(bs, ts, ne, self.args.n_actions,
device=batch.device, dtype=batch["entities"].dtype)
last_actions[:, 1:, :self.args.n_agents] = batch["actions_onehot"][:, :-1]
entities.append(last_actions)
entities = th.cat(entities, dim=3)
inputs = (entities[:, :-1], batch["entity_mask"][:, :-1])
target_inputs = (entities[:, 1:], batch["entity_mask"][:, 1:])
return inputs, target_inputs
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1]) # mask the last_data after terminated
avail_actions = batch["avail_actions"]
# enable things like dropout on mac and mixer, but not target_mac and target_mixer
self.mac.train()
self.mixer.train()
self.target_mac.eval()
self.target_mixer.eval()
self.mac.agent.set_pattern(use_pattern=True)
# Calculate estimated Q-Values
self.mac.init_hidden(batch.batch_size)
mac_out = self.mac.forward(batch, t=None)
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# Calculate the Q-Values necessary for the target
with th.no_grad():
self.target_mac.init_hidden(batch.batch_size)
target_mac_out = self.target_mac.forward(batch, t=None)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = target_mac_out[:, 1:]
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
inputs, target_inputs = self._build_inputs(batch)
chosen_action_qvals = self.mixer(chosen_action_qvals, inputs)
target_max_qvals = self.target_mixer(target_max_qvals, target_inputs)
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
td_loss = (masked_td_error ** 2).sum() / mask.sum()
loss = td_loss
mixer_disentangle_loss = self.mixer.get_disentangle_loss()
mixer_disentangle_loss = (mixer_disentangle_loss * mask.squeeze(2)).sum() / mask.sum()
loss = loss + self.args.mixer_disentangle_alpha * mixer_disentangle_loss
mac_disentangle_loss = self.mac.agent.get_disentangle_loss()
mac_disentangle_loss = (mac_disentangle_loss[:, :-1] * mask.squeeze(2)).sum() / mask.sum()
loss = loss + self.args.mac_disentangle_alpha * mac_disentangle_loss
mac_cmi_entropy_loss, mac_cmi_kl_loss = self.mac.agent.get_cmi_loss()
mac_cmi_entropy_loss = (mac_cmi_entropy_loss[:, :-1] * mask.squeeze(2)).sum() / mask.sum()
mac_cmi_kl_loss = (mac_cmi_kl_loss[:, :-1] * mask.squeeze(2)).sum() / mask.sum()
loss = loss + self.args.mac_cmi_entropy_alpha * mac_cmi_entropy_loss
loss = loss + self.args.mac_cmi_kl_alpha * mac_cmi_kl_loss
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
self.mac.agent.set_pattern(use_pattern=False)
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("train/loss", loss.item(), t_env)
self.logger.log_stat("train/td_loss", td_loss.item(), t_env)
self.logger.log_stat("train/grad_norm", grad_norm.item(), t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("train/td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("train/q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("train/target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.target_mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| 8,046 | 43.214286 | 138 | py |
OPT | OPT-main/src/learners/token_opt_q_learner.py | import copy
from components.episode_buffer import EpisodeBatch
from modules.mixers.token_opt_qmix import TokenOPTQMixer
import torch as th
from torch.optim import RMSprop, Adam
class QLearner:
def __init__(self, mac, scheme, logger, args):
self.args = args
self.mac = mac
self.logger = logger
self.params = list(mac.parameters())
self.last_target_update_episode = 0
self.mixer = None
if args.mixer is not None:
if args.mixer == "token_opt_qmix":
self.mixer = TokenOPTQMixer(args)
else:
raise ValueError("Mixer {} not recognised.".format(args.mixer))
self.params += list(self.mixer.parameters())
self.target_mixer = copy.deepcopy(self.mixer)
if self.args.optimizer == 'adam':
self.optimiser = Adam(params=self.params, lr=args.lr)
else:
self.optimiser = RMSprop(params=self.params, lr=args.lr, alpha=args.optim_alpha, eps=args.optim_eps)
# a little wasteful to deepcopy (e.g. duplicates action selector), but should work for any MAC
self.target_mac = copy.deepcopy(mac)
self.log_stats_t = -self.args.learner_log_interval - 1
def _build_inputs(self, batch):
inputs = batch["state"][:, :-1].reshape(batch.batch_size, -1, self.args.n_tokens, self.args.state_token_dim)
target_inputs = batch["state"][:, 1:].reshape(batch.batch_size, -1, self.args.n_tokens, self.args.state_token_dim)
return inputs, target_inputs
def train(self, batch: EpisodeBatch, t_env: int, episode_num: int):
# Get the relevant quantities
rewards = batch["reward"][:, :-1]
actions = batch["actions"][:, :-1]
terminated = batch["terminated"][:, :-1].float()
mask = batch["filled"][:, :-1].float()
mask[:, 1:] = mask[:, 1:] * (1 - terminated[:, :-1]) # mask the last_data after terminated
avail_actions = batch["avail_actions"]
self.mac.agent.set_pattern(use_pattern=True)
# Calculate estimated Q-Values
mac_out = []
self.mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
agent_outs = self.mac.forward(batch, t=t)
mac_out.append(agent_outs)
mac_out = th.stack(mac_out, dim=1) # Concat over time
# Pick the Q-Values for the actions taken by each agent
chosen_action_qvals = th.gather(mac_out[:, :-1], dim=3, index=actions).squeeze(3) # Remove the last dim
# Calculate the Q-Values necessary for the target
with th.no_grad():
target_mac_out = []
self.target_mac.init_hidden(batch.batch_size)
for t in range(batch.max_seq_length):
target_agent_outs = self.target_mac.forward(batch, t=t)
target_mac_out.append(target_agent_outs)
# We don't need the first timesteps Q-Value estimate for calculating targets
target_mac_out = th.stack(target_mac_out[1:], dim=1) # Concat across time
# Mask out unavailable actions
target_mac_out[avail_actions[:, 1:] == 0] = -9999999
# Max over target Q-Values
if self.args.double_q:
# Get actions that maximise live Q (for double q-learning)
mac_out_detach = mac_out.clone().detach()
mac_out_detach[avail_actions == 0] = -9999999
cur_max_actions = mac_out_detach[:, 1:].max(dim=3, keepdim=True)[1]
target_max_qvals = th.gather(target_mac_out, 3, cur_max_actions).squeeze(3)
else:
target_max_qvals = target_mac_out.max(dim=3)[0]
# Mix
if self.mixer is not None:
inputs, target_inputs = self._build_inputs(batch)
chosen_action_qvals = self.mixer(chosen_action_qvals, inputs)
target_max_qvals = self.target_mixer(target_max_qvals, target_inputs)
# Calculate 1-step Q-Learning targets
targets = rewards + self.args.gamma * (1 - terminated) * target_max_qvals
# Td-error
td_error = (chosen_action_qvals - targets.detach())
mask = mask.expand_as(td_error)
# 0-out the targets that came from padded data
masked_td_error = td_error * mask
# Normal L2 loss, take mean over actual data
td_loss = (masked_td_error ** 2).sum() / mask.sum()
loss = td_loss
mixer_disentangle_loss = self.mixer.get_disentangle_loss()
mixer_disentangle_loss = (mixer_disentangle_loss * mask.squeeze(2)).sum() / mask.sum()
loss = loss + self.args.mixer_disentangle_alpha * mixer_disentangle_loss
mac_disentangle_loss = self.mac.agent.get_disentangle_loss()
mac_disentangle_loss = th.mean(mac_disentangle_loss.reshape(batch.batch_size, self.args.n_agents, batch.max_seq_length), dim=1)
mac_disentangle_loss = (mac_disentangle_loss[:, :-1] * mask.squeeze(2)).sum() / mask.sum()
loss = loss + self.args.mac_disentangle_alpha * mac_disentangle_loss
mac_cmi_entropy_loss, mac_cmi_kl_loss = self.mac.agent.get_cmi_loss()
mac_cmi_entropy_loss = th.mean(mac_cmi_entropy_loss.reshape(batch.batch_size, self.args.n_agents, batch.max_seq_length), dim=1)
mac_cmi_kl_loss = th.mean(mac_cmi_kl_loss.reshape(batch.batch_size, self.args.n_agents, batch.max_seq_length), dim=1)
mac_cmi_entropy_loss = (mac_cmi_entropy_loss[:, :-1] * mask.squeeze(2)).sum() / mask.sum()
mac_cmi_kl_loss = (mac_cmi_kl_loss[:, :-1] * mask.squeeze(2)).sum() / mask.sum()
loss = loss + self.args.mac_cmi_entropy_alpha * mac_cmi_entropy_loss
loss = loss + self.args.mac_cmi_kl_alpha * mac_cmi_kl_loss
# Optimise
self.optimiser.zero_grad()
loss.backward()
grad_norm = th.nn.utils.clip_grad_norm_(self.params, self.args.grad_norm_clip)
self.optimiser.step()
if (episode_num - self.last_target_update_episode) / self.args.target_update_interval >= 1.0:
self._update_targets()
self.last_target_update_episode = episode_num
self.mac.agent.set_pattern(use_pattern=False)
if t_env - self.log_stats_t >= self.args.learner_log_interval:
self.logger.log_stat("train/loss", loss.item(), t_env)
self.logger.log_stat("train/td_loss", td_loss.item(), t_env)
self.logger.log_stat("train/grad_norm", grad_norm.item(), t_env)
mask_elems = mask.sum().item()
self.logger.log_stat("train/td_error_abs", (masked_td_error.abs().sum().item()/mask_elems), t_env)
self.logger.log_stat("train/q_taken_mean", (chosen_action_qvals * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.logger.log_stat("train/target_mean", (targets * mask).sum().item()/(mask_elems * self.args.n_agents), t_env)
self.log_stats_t = t_env
def _update_targets(self):
self.target_mac.load_state(self.mac)
if self.mixer is not None:
self.target_mixer.load_state_dict(self.mixer.state_dict())
self.logger.console_logger.info("Updated target network")
def cuda(self):
self.mac.cuda()
self.target_mac.cuda()
if self.mixer is not None:
self.mixer.cuda()
self.target_mixer.cuda()
def save_models(self, path):
self.mac.save_models(path)
if self.mixer is not None:
th.save(self.mixer.state_dict(), "{}/mixer.th".format(path))
th.save(self.optimiser.state_dict(), "{}/opt.th".format(path))
def load_models(self, path):
self.mac.load_models(path)
# Not quite right but I don't want to save target networks
self.target_mac.load_models(path)
if self.mixer is not None:
self.mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.target_mixer.load_state_dict(th.load("{}/mixer.th".format(path), map_location=lambda storage, loc: storage))
self.optimiser.load_state_dict(th.load("{}/opt.th".format(path), map_location=lambda storage, loc: storage))
| 8,202 | 45.607955 | 138 | py |
MST | MST-main/real/train_code/utils.py | import numpy as np
import scipy.io as sio
import os
import glob
import re
import torch
import torch.nn as nn
import math
import random
def _as_floats(im1, im2):
float_type = np.result_type(im1.dtype, im2.dtype, np.float32)
im1 = np.asarray(im1, dtype=float_type)
im2 = np.asarray(im2, dtype=float_type)
return im1, im2
def compare_mse(im1, im2):
im1, im2 = _as_floats(im1, im2)
return np.mean(np.square(im1 - im2), dtype=np.float64)
def compare_psnr(im_true, im_test, data_range=None):
im_true, im_test = _as_floats(im_true, im_test)
err = compare_mse(im_true, im_test)
return 10 * np.log10((data_range ** 2) / err)
def psnr(img1, img2):
mse = np.mean((img1/255. - img2/255.) ** 2)
if mse < 1.0e-10:
return 100
PIXEL_MAX = 1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def PSNR_GPU(im_true, im_fake):
im_true *= 255
im_fake *= 255
im_true = im_true.round()
im_fake = im_fake.round()
data_range = 255
esp = 1e-12
C = im_true.size()[0]
H = im_true.size()[1]
W = im_true.size()[2]
Itrue = im_true.clone()
Ifake = im_fake.clone()
mse = nn.MSELoss(reduce=False)
err = mse(Itrue, Ifake).sum() / (C*H*W)
psnr = 10. * np.log((data_range**2)/(err.data + esp)) / np.log(10.)
return psnr
def PSNR_Nssr(im_true, im_fake):
mse = ((im_true - im_fake)**2).mean()
psnr = 10. * np.log10(1/mse)
return psnr
def dataparallel(model, ngpus, gpu0=0):
if ngpus==0:
assert False, "only support gpu mode"
gpu_list = list(range(gpu0, gpu0+ngpus))
assert torch.cuda.device_count() >= gpu0 + ngpus
if ngpus > 1:
if not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model, gpu_list).cuda()
else:
model = model.cuda()
elif ngpus == 1:
model = model.cuda()
return model
def findLastCheckpoint(save_dir):
file_list = glob.glob(os.path.join(save_dir, 'model_*.pth'))
if file_list:
epochs_exist = []
for file_ in file_list:
result = re.findall(".*model_(.*).pth.*", file_)
epochs_exist.append(int(result[0]))
initial_epoch = max(epochs_exist)
else:
initial_epoch = 0
return initial_epoch
# load HSIs
def prepare_data(path, file_num):
HR_HSI = np.zeros((((512,512,28,file_num))))
for idx in range(file_num):
# read HrHSI
path1 = os.path.join(path) + 'scene%02d.mat' % (idx+1)
# path1 = os.path.join(path) + HR_code + '.mat'
data = sio.loadmat(path1)
HR_HSI[:,:,:,idx] = data['data_slice'] / 65535.0
HR_HSI[HR_HSI < 0.] = 0.
HR_HSI[HR_HSI > 1.] = 1.
return HR_HSI
def loadpath(pathlistfile):
fp = open(pathlistfile)
pathlist = fp.read().splitlines()
fp.close()
random.shuffle(pathlist)
return pathlist
def time2file_name(time):
year = time[0:4]
month = time[5:7]
day = time[8:10]
hour = time[11:13]
minute = time[14:16]
second = time[17:19]
time_filename = year + '_' + month + '_' + day + '_' + hour + '_' + minute + '_' + second
return time_filename
# def prepare_data_cave(path, file_list, file_num):
# HR_HSI = np.zeros((((512,512,28,file_num))))
# for idx in range(file_num):
# #### read HrHSI
# HR_code = file_list[idx]
# path1 = os.path.join(path) + HR_code + '.mat'
# data = sio.loadmat(path1)
# HR_HSI[:,:,:,idx] = data['data_slice'] / 65535.0
# HR_HSI[HR_HSI < 0] = 0
# HR_HSI[HR_HSI > 1] = 1
# return HR_HSI
#
# def prepare_data_KASIT(path, file_list, file_num):
# HR_HSI = np.zeros((((2704,3376,28,file_num))))
# for idx in range(file_num):
# #### read HrHSI
# HR_code = file_list[idx]
# path1 = os.path.join(path) + HR_code + '.mat'
# data = sio.loadmat(path1)
# HR_HSI[:,:,:,idx] = data['HSI']
# HR_HSI[HR_HSI < 0] = 0
# HR_HSI[HR_HSI > 1] = 1
# return HR_HSI
def prepare_data_cave(path, file_num):
HR_HSI = np.zeros((((512,512,28,file_num))))
file_list = os.listdir(path)
# for idx in range(1):
for idx in range(file_num):
print(f'loading CAVE {idx}')
#### read HrHSI
HR_code = file_list[idx]
path1 = os.path.join(path) + HR_code
data = sio.loadmat(path1)
HR_HSI[:,:,:,idx] = data['data_slice'] / 65535.0
HR_HSI[HR_HSI < 0] = 0
HR_HSI[HR_HSI > 1] = 1
return HR_HSI
def prepare_data_KAIST(path, file_num):
HR_HSI = np.zeros((((2704,3376,28,file_num))))
file_list = os.listdir(path)
# for idx in range(1):
for idx in range(file_num):
print(f'loading KAIST {idx}')
#### read HrHSI
HR_code = file_list[idx]
path1 = os.path.join(path) + HR_code
data = sio.loadmat(path1)
HR_HSI[:,:,:,idx] = data['HSI']
HR_HSI[HR_HSI < 0] = 0
HR_HSI[HR_HSI > 1] = 1
return HR_HSI
def init_mask(mask, Phi, Phi_s, mask_type):
if mask_type == 'Phi':
input_mask = Phi
elif mask_type == 'Phi_PhiPhiT':
input_mask = (Phi, Phi_s)
elif mask_type == 'Mask':
input_mask = mask
elif mask_type == None:
input_mask = None
return input_mask | 5,293 | 27.771739 | 93 | py |
MST | MST-main/real/train_code/dataset.py | import torch.utils.data as tud
import random
import torch
import numpy as np
import scipy.io as sio
class dataset(tud.Dataset):
def __init__(self, opt, CAVE, KAIST):
super(dataset, self).__init__()
self.isTrain = opt.isTrain
self.size = opt.size
# self.path = opt.data_path
if self.isTrain == True:
self.num = opt.trainset_num
else:
self.num = opt.testset_num
self.CAVE = CAVE
self.KAIST = KAIST
## load mask
data = sio.loadmat(opt.mask_path)
self.mask = data['mask']
self.mask_3d = np.tile(self.mask[:, :, np.newaxis], (1, 1, 28))
def __getitem__(self, index):
if self.isTrain == True:
# index1 = 0
index1 = random.randint(0, 29)
d = random.randint(0, 1)
if d == 0:
hsi = self.CAVE[:,:,:,index1]
else:
hsi = self.KAIST[:, :, :, index1]
else:
index1 = index
hsi = self.HSI[:, :, :, index1]
shape = np.shape(hsi)
px = random.randint(0, shape[0] - self.size)
py = random.randint(0, shape[1] - self.size)
label = hsi[px:px + self.size:1, py:py + self.size:1, :]
# while np.max(label)==0:
# px = random.randint(0, shape[0] - self.size)
# py = random.randint(0, shape[1] - self.size)
# label = hsi[px:px + self.size:1, py:py + self.size:1, :]
# print(np.min(), np.max())
pxm = random.randint(0, 660 - self.size)
pym = random.randint(0, 660 - self.size)
mask_3d = self.mask_3d[pxm:pxm + self.size:1, pym:pym + self.size:1, :]
mask_3d_shift = np.zeros((self.size, self.size + (28 - 1) * 2, 28))
mask_3d_shift[:, 0:self.size, :] = mask_3d
for t in range(28):
mask_3d_shift[:, :, t] = np.roll(mask_3d_shift[:, :, t], 2 * t, axis=1)
mask_3d_shift_s = np.sum(mask_3d_shift ** 2, axis=2, keepdims=False)
mask_3d_shift_s[mask_3d_shift_s == 0] = 1
if self.isTrain == True:
rotTimes = random.randint(0, 3)
vFlip = random.randint(0, 1)
hFlip = random.randint(0, 1)
# Random rotation
for j in range(rotTimes):
label = np.rot90(label)
# Random vertical Flip
for j in range(vFlip):
label = label[:, ::-1, :].copy()
# Random horizontal Flip
for j in range(hFlip):
label = label[::-1, :, :].copy()
temp = mask_3d * label
temp_shift = np.zeros((self.size, self.size + (28 - 1) * 2, 28))
temp_shift[:, 0:self.size, :] = temp
for t in range(28):
temp_shift[:, :, t] = np.roll(temp_shift[:, :, t], 2 * t, axis=1)
meas = np.sum(temp_shift, axis=2)
input = meas / 28 * 2 * 1.2
QE, bit = 0.4, 2048
input = np.random.binomial((input * bit / QE).astype(int), QE)
input = np.float32(input) / np.float32(bit)
label = torch.FloatTensor(label.copy()).permute(2,0,1)
input = torch.FloatTensor(input.copy())
mask_3d_shift = torch.FloatTensor(mask_3d_shift.copy()).permute(2,0,1)
mask_3d_shift_s = torch.FloatTensor(mask_3d_shift_s.copy())
return input, label, mask_3d, mask_3d_shift, mask_3d_shift_s
def __len__(self):
return self.num
| 3,450 | 34.57732 | 83 | py |
MST | MST-main/real/train_code/train.py | from architecture import *
from utils import *
from dataset import dataset
import torch.utils.data as tud
import torch
import torch.nn.functional as F
import time
import datetime
from torch.autograd import Variable
import os
from option import opt
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_id
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
if not torch.cuda.is_available():
raise Exception('NO GPU!')
# load training data
CAVE = prepare_data_cave(opt.data_path_CAVE, 30)
KAIST = prepare_data_KAIST(opt.data_path_KAIST, 30)
# saving path
date_time = str(datetime.datetime.now())
date_time = time2file_name(date_time)
opt.outf = os.path.join(opt.outf, date_time)
if not os.path.exists(opt.outf):
os.makedirs(opt.outf)
# model
if opt.method == 'hdnet':
model, FDL_loss = model_generator(opt.method, opt.pretrained_model_path).cuda()
else:
model = model_generator(opt.method, opt.pretrained_model_path).cuda()
# optimizing
optimizer = torch.optim.Adam(model.parameters(), lr=opt.learning_rate, betas=(0.9, 0.999))
if opt.scheduler == 'MultiStepLR':
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)
elif opt.scheduler == 'CosineAnnealingLR':
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, opt.max_epoch, eta_min=1e-6)
criterion = nn.L1Loss()
if __name__ == "__main__":
print("Random Seed: ", opt.seed)
torch.manual_seed(opt.seed)
torch.cuda.manual_seed(opt.seed)
## pipline of training
for epoch in range(1, opt.max_epoch):
model.train()
Dataset = dataset(opt, CAVE, KAIST)
loader_train = tud.DataLoader(Dataset, num_workers=8, batch_size=opt.batch_size, shuffle=True)
scheduler.step(epoch)
epoch_loss = 0
start_time = time.time()
for i, (input, label, Mask, Phi, Phi_s) in enumerate(loader_train):
input, label, Phi, Phi_s = Variable(input), Variable(label), Variable(Phi), Variable(Phi_s)
input, label, Phi, Phi_s = input.cuda(), label.cuda(), Phi.cuda(), Phi_s.cuda()
input_mask = init_mask(Mask, Phi, Phi_s, opt.input_mask)
if opt.method in ['cst_s', 'cst_m', 'cst_l']:
out, diff_pred = model(input, input_mask)
loss = criterion(out, label)
diff_gt = torch.mean(torch.abs(out.detach() - label),dim=1, keepdim=True) # [b,1,h,w]
loss_sparsity = F.mse_loss(diff_gt, diff_pred)
loss = loss + 2 * loss_sparsity
else:
out = model(input, input_mask)
loss = criterion(out, label)
if opt.method == 'hdnet':
fdl_loss = FDL_loss(out, label)
loss = loss + 0.7 * fdl_loss
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % (1000) == 0:
print('%4d %4d / %4d loss = %.10f time = %s' % (
epoch + 1, i, len(Dataset) // opt.batch_size, epoch_loss / ((i + 1) * opt.batch_size),
datetime.datetime.now()))
elapsed_time = time.time() - start_time
print('epcoh = %4d , loss = %.10f , time = %4.2f s' % (epoch + 1, epoch_loss / len(Dataset), elapsed_time))
torch.save(model, os.path.join(opt.outf, 'model_%03d.pth' % (epoch + 1)))
| 3,493 | 35.395833 | 115 | py |
MST | MST-main/real/train_code/architecture/MST_Plus_Plus.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.dim = dim
def forward(self, x_in):
"""
x_in: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp))
v = v
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head,
heads,
num_blocks,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, stage=2, num_blocks=[2,4,4]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Embedding
fea = self.embedding(x)
# Encoder
fea_encoder = []
for (MSAB, FeaDownSample) in self.encoder_layers:
fea = MSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
fea = LeWinBlcok(fea)
# Mapping
out = self.mapping(fea) + x
return out
class MST_Plus_Plus(nn.Module):
def __init__(self, in_channels=28, out_channels=28, n_feat=28, stage=3):
super(MST_Plus_Plus, self).__init__()
self.stage = stage
self.conv_in = nn.Conv2d(in_channels, n_feat, kernel_size=3, padding=(3 - 1) // 2,bias=False)
modules_body = [MST(dim=n_feat, stage=2, num_blocks=[1,1,1]) for _ in range(stage)]
self.fution = nn.Conv2d(28, 28, 1, padding=0, bias=True)
self.body = nn.Sequential(*modules_body)
self.conv_out = nn.Conv2d(n_feat, out_channels, kernel_size=3, padding=(3 - 1) // 2,bias=False)
def initial_x(self, y, input_mask=None):
"""
:param y: [b,1,256,310]
:param Phi: [b,28,256,310]
:return: z: [b,28,256,310]
"""
nC, step = 28, 2
bs, row, col = y.shape
x = torch.zeros(bs, nC, row, row).cuda().float()
for i in range(nC):
x[:, i, :, :] = y[:, :, step * i:step * i + col - (nC - 1) * step]
x = self.fution(x)
return x
def forward(self, y):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
x = self.initial_x(y)
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
x = self.conv_in(x)
h = self.body(x)
h = self.conv_out(h)
h += x
return h[:, :, :h_inp, :w_inp]
| 10,188 | 30.544892 | 116 | py |
MST | MST-main/real/train_code/architecture/DGSMP.py | import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
import torch.nn.functional as F
class Resblock(nn.Module):
def __init__(self, HBW):
super(Resblock, self).__init__()
self.block1 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
self.block2 = nn.Sequential(nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(HBW, HBW, kernel_size=3, stride=1, padding=1))
def forward(self, x):
tem = x
r1 = self.block1(x)
out = r1 + tem
r2 = self.block2(out)
out = r2 + out
return out
class Encoding(nn.Module):
def __init__(self):
super(Encoding, self).__init__()
self.E1 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E2 = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E3 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E4 = nn.Sequential(nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.E5 = nn.Sequential(nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
def forward(self, x):
## encoding blocks
E1 = self.E1(x)
E2 = self.E2(F.avg_pool2d(E1, kernel_size=2, stride=2))
E3 = self.E3(F.avg_pool2d(E2, kernel_size=2, stride=2))
E4 = self.E4(F.avg_pool2d(E3, kernel_size=2, stride=2))
E5 = self.E5(F.avg_pool2d(E4, kernel_size=2, stride=2))
return E1, E2, E3, E4, E5
class Decoding(nn.Module):
def __init__(self, Ch=28, kernel_size=[7,7,7]):
super(Decoding, self).__init__()
self.upMode = 'bilinear'
self.Ch = Ch
out_channel1 = Ch * kernel_size[0]
out_channel2 = Ch * kernel_size[1]
out_channel3 = Ch * kernel_size[2]
self.D1 = nn.Sequential(nn.Conv2d(in_channels=128+128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D2 = nn.Sequential(nn.Conv2d(in_channels=128+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D3 = nn.Sequential(nn.Conv2d(in_channels=64+64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.D4 = nn.Sequential(nn.Conv2d(in_channels=64+32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU()
)
self.w_generator = nn.Sequential(nn.Conv2d(in_channels=32, out_channels=32, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=32, out_channels=self.Ch, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(in_channels=self.Ch, out_channels=self.Ch, kernel_size=1, stride=1, padding=0)
)
self.filter_g_1 = nn.Sequential(nn.Conv2d(64 + 32, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel1, out_channel1, 1, 1, 0)
)
self.filter_g_2 = nn.Sequential(nn.Conv2d(64 + 32, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel2, out_channel2, 1, 1, 0)
)
self.filter_g_3 = nn.Sequential(nn.Conv2d(64 + 32, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, kernel_size=3, stride=1, padding=1),
nn.ReLU(),
nn.Conv2d(out_channel3, out_channel3, 1, 1, 0)
)
def forward(self, E1, E2, E3, E4, E5):
## decoding blocks
D1 = self.D1(torch.cat([E4, F.interpolate(E5, scale_factor=2, mode=self.upMode)], dim=1))
D2 = self.D2(torch.cat([E3, F.interpolate(D1, scale_factor=2, mode=self.upMode)], dim=1))
D3 = self.D3(torch.cat([E2, F.interpolate(D2, scale_factor=2, mode=self.upMode)], dim=1))
D4 = self.D4(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
## estimating the regularization parameters w
w = self.w_generator(D4)
## generate 3D filters
f1 = self.filter_g_1(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f2 = self.filter_g_2(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
f3 = self.filter_g_3(torch.cat([E1, F.interpolate(D3, scale_factor=2, mode=self.upMode)], dim=1))
return w, f1, f2, f3
class HSI_CS(nn.Module):
def __init__(self, Ch, stages):
super(HSI_CS, self).__init__()
self.Ch = Ch
self.s = stages
self.filter_size = [7,7,7] ## 3D filter size
## The modules for learning the measurement matrix A and A^T
self.AT = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
self.A = nn.Sequential(nn.Conv2d(Ch, 64, kernel_size=3, stride=1, padding=1), nn.LeakyReLU(),
Resblock(64), Resblock(64),
nn.Conv2d(64, Ch, kernel_size=3, stride=1, padding=1), nn.LeakyReLU())
## Encoding blocks
self.Encoding = Encoding()
## Decoding blocks
self.Decoding = Decoding(Ch=self.Ch, kernel_size=self.filter_size)
## Dense connection
self.conv = nn.Conv2d(Ch, 32, kernel_size=3, stride=1, padding=1)
self.Den_con1 = nn.Conv2d(32 , 32, kernel_size=1, stride=1, padding=0)
self.Den_con2 = nn.Conv2d(32 * 2, 32, kernel_size=1, stride=1, padding=0)
self.Den_con3 = nn.Conv2d(32 * 3, 32, kernel_size=1, stride=1, padding=0)
self.Den_con4 = nn.Conv2d(32 * 4, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con5 = nn.Conv2d(32 * 5, 32, kernel_size=1, stride=1, padding=0)
# self.Den_con6 = nn.Conv2d(32 * 6, 32, kernel_size=1, stride=1, padding=0)
self.delta_0 = Parameter(torch.ones(1), requires_grad=True)
self.delta_1 = Parameter(torch.ones(1), requires_grad=True)
self.delta_2 = Parameter(torch.ones(1), requires_grad=True)
self.delta_3 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_4 = Parameter(torch.ones(1), requires_grad=True)
# self.delta_5 = Parameter(torch.ones(1), requires_grad=True)
self._initialize_weights()
torch.nn.init.normal_(self.delta_0, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_1, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_2, mean=0.1, std=0.01)
torch.nn.init.normal_(self.delta_3, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_4, mean=0.1, std=0.01)
# torch.nn.init.normal_(self.delta_5, mean=0.1, std=0.01)
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
elif isinstance(m, nn.Linear):
nn.init.xavier_normal_(m.weight.data)
nn.init.constant_(m.bias.data, 0.0)
def Filtering_1(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [self.filter_size[0] // 2, self.filter_size[0] // 2, 0, 0], mode='replicate')
img_stack = []
for i in range(self.filter_size[0]):
img_stack.append(cube_pad[:, :, :, i:i + width])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_2(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube, [0, 0, self.filter_size[1] // 2, self.filter_size[1] // 2], mode='replicate')
img_stack = []
for i in range(self.filter_size[1]):
img_stack.append(cube_pad[:, :, i:i + height, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def Filtering_3(self, cube, core):
batch_size, bandwidth, height, width = cube.size()
cube_pad = F.pad(cube.unsqueeze(0).unsqueeze(0), pad=(0, 0, 0, 0, self.filter_size[2] // 2, self.filter_size[2] // 2)).squeeze(0).squeeze(0)
img_stack = []
for i in range(self.filter_size[2]):
img_stack.append(cube_pad[:, i:i + bandwidth, :, :])
img_stack = torch.stack(img_stack, dim=1)
out = torch.sum(core.mul_(img_stack), dim=1, keepdim=False)
return out
def recon(self, res1, res2, Xt, i):
if i == 0 :
delta = self.delta_0
elif i == 1:
delta = self.delta_1
elif i == 2:
delta = self.delta_2
elif i == 3:
delta = self.delta_3
# elif i == 4:
# delta = self.delta_4
# elif i == 5:
# delta = self.delta_5
Xt = Xt - 2 * delta * (res1 + res2)
return Xt
def y2x(self, y):
## Spilt operator
sz = y.size()
if len(sz) == 3:
y = y.unsqueeze(1)
bs = sz[0]
sz = y.size()
x = torch.zeros([bs, 28, sz[2], sz[2]]).cuda()
for t in range(28):
temp = y[:, :, :, 0 + 2 * t : sz[2] + 2 * t]
x[:, t, :, :] = temp.squeeze(1)
return x
def x2y(self, x):
## Shift and Sum operator
sz = x.size()
if len(sz) == 3:
x = x.unsqueeze(0).unsqueeze(0)
bs = 1
else:
bs = sz[0]
sz = x.size()
y = torch.zeros([bs, sz[2], sz[2]+2*27]).cuda()
for t in range(28):
y[:, :, 0 + 2 * t : sz[2] + 2 * t] = x[:, t, :, :] + y[:, :, 0 + 2 * t : sz[2] + 2 * t]
return y
def forward(self, y, input_mask=None):
## The measurements y is split into a 3D data cube of size H × W × L to initialize x.
y = y / 28 * 2
Xt = self.y2x(y)
feature_list = []
for i in range(0, self.s):
AXt = self.x2y(self.A(Xt)) # y = Ax
Res1 = self.AT(self.y2x(AXt - y)) # A^T * (Ax − y)
fea = self.conv(Xt)
if i == 0:
feature_list.append(fea)
fufea = self.Den_con1(fea)
elif i == 1:
feature_list.append(fea)
fufea = self.Den_con2(torch.cat(feature_list, 1))
elif i == 2:
feature_list.append(fea)
fufea = self.Den_con3(torch.cat(feature_list, 1))
elif i == 3:
feature_list.append(fea)
fufea = self.Den_con4(torch.cat(feature_list, 1))
# elif i == 4:
# feature_list.append(fea)
# fufea = self.Den_con5(torch.cat(feature_list, 1))
# elif i == 5:
# feature_list.append(fea)
# fufea = self.Den_con6(torch.cat(feature_list, 1))
E1, E2, E3, E4, E5 = self.Encoding(fufea)
W, f1, f2, f3 = self.Decoding(E1, E2, E3, E4, E5)
batch_size, p, height, width = f1.size()
f1 = F.normalize(f1.view(batch_size, self.filter_size[0], self.Ch, height, width),dim=1)
batch_size, p, height, width = f2.size()
f2 = F.normalize(f2.view(batch_size, self.filter_size[1], self.Ch, height, width),dim=1)
batch_size, p, height, width = f3.size()
f3 = F.normalize(f3.view(batch_size, self.filter_size[2], self.Ch, height, width),dim=1)
## Estimating the local means U
u1 = self.Filtering_1(Xt, f1)
u2 = self.Filtering_2(u1, f2)
U = self.Filtering_3(u2, f3)
## w * (x − u)
Res2 = (Xt - U).mul(W)
## Reconstructing HSIs
Xt = self.recon(Res1, Res2, Xt, i)
return Xt
| 15,283 | 46.318885 | 148 | py |
MST | MST-main/real/train_code/architecture/DAUHST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch import einsum
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
class HS_MSA(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=28,
heads=8,
only_local_branch=False
):
super().__init__()
self.dim = dim
self.heads = heads
self.scale = dim_head ** -0.5
self.window_size = window_size
self.only_local_branch = only_local_branch
# position embedding
if only_local_branch:
seq_l = window_size[0] * window_size[1]
self.pos_emb = nn.Parameter(torch.Tensor(1, heads, seq_l, seq_l))
trunc_normal_(self.pos_emb)
else:
seq_l1 = window_size[0] * window_size[1]
self.pos_emb1 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l1, seq_l1))
h,w = 256//self.heads,320//self.heads
seq_l2 = h*w//seq_l1
self.pos_emb2 = nn.Parameter(torch.Tensor(1, 1, heads//2, seq_l2, seq_l2))
trunc_normal_(self.pos_emb1)
trunc_normal_(self.pos_emb2)
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x.shape
w_size = self.window_size
assert h % w_size[0] == 0 and w % w_size[1] == 0, 'fmap dimensions must be divisible by the window size'
if self.only_local_branch:
x_inp = rearrange(x, 'b (h b0) (w b1) c -> (b h w) (b0 b1) c', b0=w_size[0], b1=w_size[1])
q = self.to_q(x_inp)
k, v = self.to_kv(x_inp).chunk(2, dim=-1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.heads), (q, k, v))
q *= self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
sim = sim + self.pos_emb
attn = sim.softmax(dim=-1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
out = rearrange(out, '(b h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
else:
q = self.to_q(x)
k, v = self.to_kv(x).chunk(2, dim=-1)
q1, q2 = q[:,:,:,:c//2], q[:,:,:,c//2:]
k1, k2 = k[:,:,:,:c//2], k[:,:,:,c//2:]
v1, v2 = v[:,:,:,:c//2], v[:,:,:,c//2:]
# local branch
q1, k1, v1 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q1, k1, v1))
q1, k1, v1 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q1, k1, v1))
q1 *= self.scale
sim1 = einsum('b n h i d, b n h j d -> b n h i j', q1, k1)
sim1 = sim1 + self.pos_emb1
attn1 = sim1.softmax(dim=-1)
out1 = einsum('b n h i j, b n h j d -> b n h i d', attn1, v1)
out1 = rearrange(out1, 'b n h mm d -> b n mm (h d)')
# non-local branch
q2, k2, v2 = map(lambda t: rearrange(t, 'b (h b0) (w b1) c -> b (h w) (b0 b1) c',
b0=w_size[0], b1=w_size[1]), (q2, k2, v2))
q2, k2, v2 = map(lambda t: t.permute(0, 2, 1, 3), (q2.clone(), k2.clone(), v2.clone()))
q2, k2, v2 = map(lambda t: rearrange(t, 'b n mm (h d) -> b n h mm d', h=self.heads//2), (q2, k2, v2))
q2 *= self.scale
sim2 = einsum('b n h i d, b n h j d -> b n h i j', q2, k2)
sim2 = sim2 + self.pos_emb2
attn2 = sim2.softmax(dim=-1)
out2 = einsum('b n h i j, b n h j d -> b n h i d', attn2, v2)
out2 = rearrange(out2, 'b n h mm d -> b n mm (h d)')
out2 = out2.permute(0, 2, 1, 3)
out = torch.cat([out1,out2],dim=-1).contiguous()
out = self.to_out(out)
out = rearrange(out, 'b (h w) (b0 b1) c -> b (h b0) (w b1) c', h=h // w_size[0], w=w // w_size[1],
b0=w_size[0])
return out
class HSAB(nn.Module):
def __init__(
self,
dim,
window_size=(8, 8),
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
PreNorm(dim, HS_MSA(dim=dim, window_size=window_size, dim_head=dim_head, heads=heads, only_local_branch=(heads==1))),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class HST(nn.Module):
def __init__(self, in_dim=28, out_dim=28, dim=28, num_blocks=[1,1,1]):
super(HST, self).__init__()
self.dim = dim
self.scales = len(num_blocks)
# Input projection
self.embedding = nn.Conv2d(in_dim, self.dim, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_scale = dim
for i in range(self.scales-1):
self.encoder_layers.append(nn.ModuleList([
HSAB(dim=dim_scale, num_blocks=num_blocks[i], dim_head=dim, heads=dim_scale // dim),
nn.Conv2d(dim_scale, dim_scale * 2, 4, 2, 1, bias=False),
]))
dim_scale *= 2
# Bottleneck
self.bottleneck = HSAB(dim=dim_scale, dim_head=dim, heads=dim_scale // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(self.scales-1):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_scale, dim_scale // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_scale, dim_scale // 2, 1, 1, bias=False),
HSAB(dim=dim_scale // 2, num_blocks=num_blocks[self.scales - 2 - i], dim_head=dim,
heads=(dim_scale // 2) // dim),
]))
dim_scale //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, out_dim, 3, 1, 1, bias=False)
#### activation function
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
b, c, h_inp, w_inp = x.shape
hb, wb = 16, 16
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
# Embedding
fea = self.embedding(x)
x = x[:,:28,:,:]
# Encoder
fea_encoder = []
for (HSAB, FeaDownSample) in self.encoder_layers:
fea = HSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, HSAB) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.scales-2-i]], dim=1))
fea = HSAB(fea)
# Mapping
out = self.mapping(fea) + x
return out[:, :, :h_inp, :w_inp]
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class HyPaNet(nn.Module):
def __init__(self, in_nc=29, out_nc=8, channel=64):
super(HyPaNet, self).__init__()
self.fution = nn.Conv2d(in_nc, channel, 1, 1, 0, bias=True)
self.down_sample = nn.Conv2d(channel, channel, 3, 2, 1, bias=True)
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.mlp = nn.Sequential(
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, channel, 1, padding=0, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(channel, out_nc, 1, padding=0, bias=True),
nn.Softplus())
self.relu = nn.ReLU(inplace=True)
self.out_nc = out_nc
def forward(self, x):
x = self.down_sample(self.relu(self.fution(x)))
x = self.avg_pool(x)
x = self.mlp(x) + 1e-6
return x[:,:self.out_nc//2,:,:], x[:,self.out_nc//2:,:,:]
class DAUHST(nn.Module):
def __init__(self, num_iterations=1):
super(DAUHST, self).__init__()
self.para_estimator = HyPaNet(in_nc=28, out_nc=num_iterations*2)
self.fution = nn.Conv2d(56, 28, 1, padding=0, bias=True)
self.num_iterations = num_iterations
self.denoisers = nn.ModuleList([])
for _ in range(num_iterations):
self.denoisers.append(
HST(in_dim=29, out_dim=28, dim=28, num_blocks=[1,1,1]),
)
def initial(self, y, Phi):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:return: temp: [b,28,256,310]; alpha: [b, num_iterations]; beta: [b, num_iterations]
"""
nC, step = 28, 2
y = y / nC * 2
bs,row,col = y.shape
y_shift = torch.zeros(bs, nC, row, col).cuda().float()
for i in range(nC):
y_shift[:, i, :, step * i:step * i + col - (nC - 1) * step] = y[:, :, step * i:step * i + col - (nC - 1) * step]
z = self.fution(torch.cat([y_shift, Phi], dim=1))
alpha, beta = self.para_estimator(self.fution(torch.cat([y_shift, Phi], dim=1)))
return z, alpha, beta
def forward(self, y, input_mask=None):
"""
:param y: [b,256,310]
:param Phi: [b,28,256,310]
:param Phi_PhiT: [b,256,310]
:return: z_crop: [b,28,256,256]
"""
Phi, Phi_s = input_mask
z, alphas, betas = self.initial(y, Phi)
for i in range(self.num_iterations):
alpha, beta = alphas[:,i,:,:], betas[:,i:i+1,:,:]
Phi_z = A(z, Phi)
x = z + At(torch.div(y-Phi_z,alpha+Phi_s), Phi)
x = shift_back_3d(x)
beta_repeat = beta.repeat(1,1,x.shape[2], x.shape[3])
z = self.denoisers[i](torch.cat([x, beta_repeat],dim=1))
if i<self.num_iterations-1:
z = shift_3d(z)
return z[:, :, :, 0:256]
| 13,343 | 35.26087 | 133 | py |
MST | MST-main/real/train_code/architecture/CST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
from collections import defaultdict, Counter
import numpy as np
from tqdm import tqdm
import random
def uniform(a, b, shape, device='cuda'):
return (b - a) * torch.rand(shape, device=device) + a
class AsymmetricTransform:
def Q(self, *args, **kwargs):
raise NotImplementedError('Query transform not implemented')
def K(self, *args, **kwargs):
raise NotImplementedError('Key transform not implemented')
class LSH:
def __call__(self, *args, **kwargs):
raise NotImplementedError('LSH scheme not implemented')
def compute_hash_agreement(self, q_hash, k_hash):
return (q_hash == k_hash).min(dim=-1)[0].sum(dim=-1)
class XBOXPLUS(AsymmetricTransform):
def set_norms(self, x):
self.x_norms = x.norm(p=2, dim=-1, keepdim=True)
self.MX = torch.amax(self.x_norms, dim=-2, keepdim=True)
def X(self, x):
device = x.device
ext = torch.sqrt((self.MX**2).to(device) - (self.x_norms**2).to(device))
zero = torch.tensor(0.0, device=x.device).repeat(x.shape[:-1], 1).unsqueeze(-1)
return torch.cat((x, ext, zero), -1)
def lsh_clustering(x, n_rounds, r=1):
salsh = SALSH(n_rounds=n_rounds, dim=x.shape[-1], r=r, device=x.device)
x_hashed = salsh(x).reshape((n_rounds,) + x.shape[:-1])
return x_hashed.argsort(dim=-1)
class SALSH(LSH):
def __init__(self, n_rounds, dim, r, device='cuda'):
super(SALSH, self).__init__()
self.alpha = torch.normal(0, 1, (dim, n_rounds), device=device)
self.beta = uniform(0, r, shape=(1, n_rounds), device=device)
self.dim = dim
self.r = r
def __call__(self, vecs):
projection = vecs @ self.alpha
projection_shift = projection + self.beta
projection_rescale = projection_shift / self.r
return projection_rescale.permute(2, 0, 1)
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def batch_scatter(output, src, dim, index):
"""
:param output: [b,n,c]
:param src: [b,n,c]
:param dim: int
:param index: [b,n]
:return: output: [b,n,c]
"""
b,k,c = src.shape
index = index[:, :, None].expand(-1, -1, c)
output, src, index = map(lambda t: rearrange(t, 'b k c -> (b c) k'), (output, src, index))
output.scatter_(dim,index,src)
output = rearrange(output, '(b c) k -> b k c', b=b)
return output
def batch_gather(x, index, dim):
"""
:param x: [b,n,c]
:param index: [b,n//2]
:param dim: int
:return: output: [b,n//2,c]
"""
b,n,c = x.shape
index = index[:,:,None].expand(-1,-1,c)
x, index = map(lambda t: rearrange(t, 'b n c -> (b c) n'), (x, index))
output = torch.gather(x,dim,index)
output = rearrange(output, '(b c) n -> b n c', b=b)
return output
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class SAH_MSA(nn.Module):
def __init__(self, heads=4, n_rounds=2, channels=64, patch_size=144,
r=1):
super(SAH_MSA, self).__init__()
self.heads = heads
self.n_rounds = n_rounds
inner_dim = channels*3
self.to_q = nn.Linear(channels, inner_dim, bias=False)
self.to_k = nn.Linear(channels, inner_dim, bias=False)
self.to_v = nn.Linear(channels, inner_dim, bias=False)
self.to_out = nn.Linear(inner_dim, channels, bias=False)
self.xbox_plus = XBOXPLUS()
self.clustering_params = {
'r': r,
'n_rounds': self.n_rounds
}
self.q_attn_size = patch_size[0] * patch_size[1]
self.k_attn_size = patch_size[0] * patch_size[1]
def forward(self, input):
"""
:param input: [b,n,c]
:return: output: [b,n,c]
"""
B, N, C_inp = input.shape
query = self.to_q(input)
key = self.to_k(input)
value = self.to_v(input)
input_hash = input.view(B, N, self.heads, C_inp//self.heads)
x_hash = rearrange(input_hash, 'b t h e -> (b h) t e')
bs, x_seqlen, dim = x_hash.shape
with torch.no_grad():
self.xbox_plus.set_norms(x_hash)
Xs = self.xbox_plus.X(x_hash)
x_positions = lsh_clustering(Xs, **self.clustering_params)
x_positions = x_positions.reshape(self.n_rounds, bs, -1)
del Xs
C = query.shape[-1]
query = query.view(B, N, self.heads, C // self.heads)
key = key.view(B, N, self.heads, C // self.heads)
value = value.view(B, N, self.heads, C // self.heads)
query = rearrange(query, 'b t h e -> (b h) t e') # [bs, q_seqlen,c]
key = rearrange(key, 'b t h e -> (b h) t e')
value = rearrange(value, 'b s h d -> (b h) s d')
bs, q_seqlen, dim = query.shape
bs, k_seqlen, dim = key.shape
v_dim = value.shape[-1]
x_rev_positions = torch.argsort(x_positions, dim=-1)
x_offset = torch.arange(bs, device=query.device).unsqueeze(-1) * x_seqlen
x_flat = (x_positions + x_offset).reshape(-1)
s_queries = query.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.q_attn_size, dim)
s_keys = key.reshape(-1, dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, dim)
s_values = value.reshape(-1, v_dim).index_select(0, x_flat).reshape(-1, self.k_attn_size, v_dim)
inner = s_queries @ s_keys.transpose(2, 1)
norm_factor = 1
inner = inner / norm_factor
# free memory
del x_positions
# softmax denominator
dots_logsumexp = torch.logsumexp(inner, dim=-1, keepdim=True)
# softmax
dots = torch.exp(inner - dots_logsumexp)
# dropout
# n_rounds outs
bo = (dots @ s_values).reshape(self.n_rounds, bs, q_seqlen, -1)
# undo sort
x_offset = torch.arange(bs * self.n_rounds, device=query.device).unsqueeze(-1) * x_seqlen
x_rev_flat = (x_rev_positions.reshape(-1, x_seqlen) + x_offset).reshape(-1)
o = bo.reshape(-1, v_dim).index_select(0, x_rev_flat).reshape(self.n_rounds, bs, q_seqlen, -1)
slogits = dots_logsumexp.reshape(self.n_rounds, bs, -1)
logits = torch.gather(slogits, 2, x_rev_positions)
# free memory
del x_rev_positions
# weighted sum multi-round attention
probs = torch.exp(logits - torch.logsumexp(logits, dim=0, keepdim=True))
out = torch.sum(o * probs.unsqueeze(-1), dim=0)
out = rearrange(out, '(b h) t d -> b t h d', h=self.heads)
out = out.reshape(B, N, -1)
out = self.to_out(out)
return out
class SAHAB(nn.Module):
def __init__(
self,
dim,
patch_size=(16, 16),
heads=8,
shift_size=0,
sparse=False
):
super().__init__()
self.blocks = nn.ModuleList([])
self.attn = PreNorm(dim, SAH_MSA(heads=heads, n_rounds=2, r=1, channels=dim, patch_size=patch_size))
self.ffn = PreNorm(dim, FeedForward(dim=dim))
self.shift_size = shift_size
self.patch_size = patch_size
self.sparse = sparse
def forward(self, x, mask=None):
"""
x: [b,h,w,c]
mask: [b,h,w]
return out: [b,h,w,c]
"""
b,h,w,c = x.shape
if self.shift_size > 0:
x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
mask = torch.roll(mask, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
w_size = self.patch_size
# Split into large patches
x = rearrange(x, 'b (nh hh) (nw ww) c-> b (nh nw) (hh ww c)', hh=w_size[0] * 2, ww=w_size[1] * 2)
mask = rearrange(mask, 'b (nh hh) (nw ww) -> b (nh nw) (hh ww)', hh=w_size[0] * 2, ww=w_size[1] * 2)
N = x.shape[1]
mask = torch.mean(mask,dim=2,keepdim=False) # [b,nh*nw]
if self.sparse:
mask_select = mask.topk(mask.shape[1] // 2, dim=1)[1] # [b,nh*nw//2]
x_select = batch_gather(x, mask_select, 1) # [b,nh*nw//2,hh*ww*c]
x_select = x_select.reshape(b*N//2,-1,c)
x_select = self.attn(x_select)+x_select
x_select = x_select.view(b,N//2,-1)
x = batch_scatter(x.clone(), x_select, 1, mask_select)
else:
x = x.view(b*N,-1,c)
x = self.attn(x) + x
x = x.view(b, N, -1)
x = rearrange(x, 'b (nh nw) (hh ww c) -> b (nh hh) (nw ww) c', nh=h//(w_size[0] * 2), hh=w_size[0] * 2, ww=w_size[1] * 2)
if self.shift_size > 0:
x = torch.roll(x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
x = self.ffn(x) + x
return x
class SAHABs(nn.Module):
def __init__(
self,
dim,
patch_size=(8, 8),
heads=8,
num_blocks=2,
sparse=False
):
super().__init__()
blocks = []
for _ in range(num_blocks):
blocks.append(
SAHAB(heads=heads, dim=dim, patch_size=patch_size,sparse=sparse,
shift_size=0 if (_ % 2 == 0) else patch_size[0]))
self.blocks = nn.Sequential(*blocks)
def forward(self, x, mask=None):
"""
x: [b,c,h,w]
mask: [b,1,h,w]
return x: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
mask = mask.squeeze(1)
for block in self.blocks:
x = block(x, mask)
x = x.permute(0, 3, 1, 2)
return x
class ASPPConv(nn.Sequential):
def __init__(self, in_channels, out_channels, dilation):
modules = [
nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False),
nn.ReLU()
]
super(ASPPConv, self).__init__(*modules)
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels):
super(ASPPPooling, self).__init__(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_channels, out_channels, 1, bias=False),
nn.ReLU())
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
def __init__(self, in_channels, atrous_rates, out_channels):
super(ASPP, self).__init__()
modules = []
rates = tuple(atrous_rates)
for rate in rates:
modules.append(ASPPConv(in_channels, out_channels, rate))
modules.append(ASPPPooling(in_channels, out_channels))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False),
nn.ReLU(),
nn.Dropout(0.5))
def forward(self, x):
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
class Sparsity_Estimator(nn.Module):
def __init__(self, dim=28, expand=2, sparse=False):
super(Sparsity_Estimator, self).__init__()
self.dim = dim
self.stage = 2
self.sparse = sparse
# Input projection
self.in_proj = nn.Conv2d(28, dim, 1, 1, 0, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(2):
self.encoder_layers.append(nn.ModuleList([
nn.Conv2d(dim_stage, dim_stage * expand, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage * expand, dim_stage * expand, 3, 2, 1, bias=False, groups=dim_stage * expand),
nn.Conv2d(dim_stage * expand, dim_stage*expand, 1, 1, 0, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = ASPP(dim_stage, [3,6], dim_stage)
# Decoder:
self.decoder_layers = nn.ModuleList([])
for i in range(2):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage // 2, dim_stage, 1, 1, 0, bias=False),
nn.Conv2d(dim_stage, dim_stage, 3, 1, 1, bias=False, groups=dim_stage),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, 0, bias=False),
]))
dim_stage //= 2
# Output projection
if sparse:
self.out_conv2 = nn.Conv2d(self.dim, self.dim+1, 3, 1, 1, bias=False)
else:
self.out_conv2 = nn.Conv2d(self.dim, self.dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def forward(self, x):
"""
x: [b,c,h,w]
return out:[b,c,h,w]
"""
# Input projection
fea = self.lrelu(self.in_proj(x))
# Encoder
fea_encoder = [] # [c 2c 4c 8c]
for (Conv1, Conv2, Conv3) in self.encoder_layers:
fea_encoder.append(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
# Bottleneck
fea = self.bottleneck(fea)+fea
# Decoder
for i, (FeaUpSample, Conv1, Conv2, Conv3) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Conv3(self.lrelu(Conv2(self.lrelu(Conv1(fea)))))
fea = fea + fea_encoder[self.stage-1-i]
# Output projection
out = self.out_conv2(fea)
if self.sparse:
error_map = out[:,-1:,:,:]
return out[:,:-1], error_map
return out
class CST(nn.Module):
def __init__(self, dim=28, stage=2, num_blocks=[2, 2, 2], sparse=False):
super(CST, self).__init__()
self.dim = dim
self.stage = stage
self.sparse = sparse
# Fution physical mask and shifted measurement
self.fution = nn.Conv2d(28, 28, 1, 1, 0, bias=False)
# Sparsity Estimator
if num_blocks==[2,4,6]:
self.fe = nn.Sequential(Sparsity_Estimator(dim=28,expand=2,sparse=False),
Sparsity_Estimator(dim=28, expand=2, sparse=sparse))
else:
self.fe = Sparsity_Estimator(dim=28, expand=2, sparse=sparse)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
SAHABs(dim=dim_stage, num_blocks=num_blocks[i], heads=dim_stage // dim, sparse=sparse),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
nn.AvgPool2d(kernel_size=2, stride=2),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = SAHABs(
dim=dim_stage, heads=dim_stage // dim, num_blocks=num_blocks[-1], sparse=sparse)
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
SAHABs(dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i],
heads=(dim_stage // 2) // dim, sparse=sparse),
]))
dim_stage //= 2
# Output projection
self.out_proj = nn.Conv2d(self.dim, dim, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
self.apply(self._init_weights)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def initial_x(self, y):
"""
:param y: [b,1,256,310]
:return: x: [b,28,256,310]
"""
nC, step = 28, 2
bs, row, col = y.shape
x = torch.zeros(bs, nC, row, row).cuda().float()
for i in range(nC):
x[:, i, :, :] = y[:, :, step * i:step * i + col - (nC - 1) * step]
x = self.fution(x)
return x
def forward(self, x, input_mask=None):
"""
x: [b,h,w]
return out:[b,c,h,w]
"""
x = self.initial_x(x)
# Feature Extraction
if self.sparse:
fea,mask = self.fe(x)
else:
fea = self.fe(x)
mask = torch.randn((b,1,h,w)).cuda()
# Encoder
fea_encoder = []
masks = []
for (Blcok, FeaDownSample, MaskDownSample) in self.encoder_layers:
fea = Blcok(fea, mask)
masks.append(mask)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
mask = MaskDownSample(mask)
# Bottleneck
fea = self.bottleneck(fea, mask)
# Decoder
for i, (FeaUpSample, Blcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = fea + fea_encoder[self.stage - 1 - i]
mask = masks[self.stage - 1 - i]
fea = Blcok(fea, mask)
# Output projection
out = self.out_proj(fea) + x
if self.sparse:
return out, mask
return out
| 20,061 | 32.381032 | 129 | py |
MST | MST-main/real/train_code/architecture/MST.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
import math
import warnings
from torch.nn.init import _calculate_fan_in_and_fan_out
def _no_grad_trunc_normal_(tensor, mean, std, a, b):
def norm_cdf(x):
return (1. + math.erf(x / math.sqrt(2.))) / 2.
if (mean < a - 2 * std) or (mean > b + 2 * std):
warnings.warn("mean is more than 2 std from [a, b] in nn.init.trunc_normal_. "
"The distribution of values may be incorrect.",
stacklevel=2)
with torch.no_grad():
l = norm_cdf((a - mean) / std)
u = norm_cdf((b - mean) / std)
tensor.uniform_(2 * l - 1, 2 * u - 1)
tensor.erfinv_()
tensor.mul_(std * math.sqrt(2.))
tensor.add_(mean)
tensor.clamp_(min=a, max=b)
return tensor
def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):
# type: (Tensor, float, float, float, float) -> Tensor
return _no_grad_trunc_normal_(tensor, mean, std, a, b)
def variance_scaling_(tensor, scale=1.0, mode='fan_in', distribution='normal'):
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
if mode == 'fan_in':
denom = fan_in
elif mode == 'fan_out':
denom = fan_out
elif mode == 'fan_avg':
denom = (fan_in + fan_out) / 2
variance = scale / denom
if distribution == "truncated_normal":
trunc_normal_(tensor, std=math.sqrt(variance) / .87962566103423978)
elif distribution == "normal":
tensor.normal_(std=math.sqrt(variance))
elif distribution == "uniform":
bound = math.sqrt(3 * variance)
tensor.uniform_(-bound, bound)
else:
raise ValueError(f"invalid distribution {distribution}")
def lecun_normal_(tensor):
variance_scaling_(tensor, mode='fan_in', distribution='truncated_normal')
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, *args, **kwargs):
x = self.norm(x)
return self.fn(x, *args, **kwargs)
class GELU(nn.Module):
def forward(self, x):
return F.gelu(x)
def conv(in_channels, out_channels, kernel_size, bias=False, padding = 1, stride = 1):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias, stride=stride)
def shift_back(inputs,step=2): # input [bs,28,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
down_sample = 256//row
step = float(step)/float(down_sample*down_sample)
out_col = row
for i in range(nC):
inputs[:,i,:,:out_col] = \
inputs[:,i,:,int(step*i):int(step*i)+out_col]
return inputs[:, :, :, :out_col]
class MS_MSA(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
):
super().__init__()
self.num_heads = heads
self.dim_head = dim_head
self.to_q = nn.Linear(dim, dim_head * heads, bias=False)
self.to_k = nn.Linear(dim, dim_head * heads, bias=False)
self.to_v = nn.Linear(dim, dim_head * heads, bias=False)
self.rescale = nn.Parameter(torch.ones(heads, 1, 1))
self.proj = nn.Linear(dim_head * heads, dim, bias=True)
self.pos_emb = nn.Sequential(
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
GELU(),
nn.Conv2d(dim, dim, 3, 1, 1, bias=False, groups=dim),
)
self.dim = dim
def forward(self, x_in):
"""
x_in: [b,h,w,c]
return out: [b,h,w,c]
"""
b, h, w, c = x_in.shape
x = x_in.reshape(b,h*w,c)
q_inp = self.to_q(x)
k_inp = self.to_k(x)
v_inp = self.to_v(x)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=self.num_heads),
(q_inp, k_inp, v_inp))
# q: b,heads,hw,c
q = q.transpose(-2, -1)
k = k.transpose(-2, -1)
v = v.transpose(-2, -1)
q = F.normalize(q, dim=-1, p=2)
k = F.normalize(k, dim=-1, p=2)
attn = (k @ q.transpose(-2, -1)) # A = K^T*Q
attn = attn * self.rescale
attn = attn.softmax(dim=-1)
x = attn @ v # b,heads,d,hw
x = x.permute(0, 3, 1, 2) # Transpose
x = x.reshape(b, h * w, self.num_heads * self.dim_head)
out_c = self.proj(x).view(b, h, w, c)
out_p = self.pos_emb(v_inp.reshape(b,h,w,c).permute(0, 3, 1, 2)).permute(0, 2, 3, 1)
out = out_c + out_p
return out
class FeedForward(nn.Module):
def __init__(self, dim, mult=4):
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(dim, dim * mult, 1, 1, bias=False),
GELU(),
nn.Conv2d(dim * mult, dim * mult, 3, 1, 1, bias=False, groups=dim * mult),
GELU(),
nn.Conv2d(dim * mult, dim, 1, 1, bias=False),
)
def forward(self, x):
"""
x: [b,h,w,c]
return out: [b,h,w,c]
"""
out = self.net(x.permute(0, 3, 1, 2))
return out.permute(0, 2, 3, 1)
class MSAB(nn.Module):
def __init__(
self,
dim,
dim_head=64,
heads=8,
num_blocks=2,
):
super().__init__()
self.blocks = nn.ModuleList([])
for _ in range(num_blocks):
self.blocks.append(nn.ModuleList([
MS_MSA(dim=dim, dim_head=dim_head, heads=heads),
PreNorm(dim, FeedForward(dim=dim))
]))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0, 2, 3, 1)
for (attn, ff) in self.blocks:
x = attn(x) + x
x = ff(x) + x
out = x.permute(0, 3, 1, 2)
return out
class MST(nn.Module):
def __init__(self, dim=28, stage=3, num_blocks=[2,2,2]):
super(MST, self).__init__()
self.dim = dim
self.stage = stage
# Input projection
self.embedding = nn.Conv2d(28, self.dim, 3, 1, 1, bias=False)
self.fution = nn.Conv2d(28, 28, 3, 1, 1, bias=False)
# Encoder
self.encoder_layers = nn.ModuleList([])
dim_stage = dim
for i in range(stage):
self.encoder_layers.append(nn.ModuleList([
MSAB(
dim=dim_stage, num_blocks=num_blocks[i], dim_head=dim, heads=dim_stage // dim),
nn.Conv2d(dim_stage, dim_stage * 2, 4, 2, 1, bias=False),
]))
dim_stage *= 2
# Bottleneck
self.bottleneck = MSAB(
dim=dim_stage, dim_head=dim, heads=dim_stage // dim, num_blocks=num_blocks[-1])
# Decoder
self.decoder_layers = nn.ModuleList([])
for i in range(stage):
self.decoder_layers.append(nn.ModuleList([
nn.ConvTranspose2d(dim_stage, dim_stage // 2, stride=2, kernel_size=2, padding=0, output_padding=0),
nn.Conv2d(dim_stage, dim_stage // 2, 1, 1, bias=False),
MSAB(
dim=dim_stage // 2, num_blocks=num_blocks[stage - 1 - i], dim_head=dim,
heads=(dim_stage // 2) // dim),
]))
dim_stage //= 2
# Output projection
self.mapping = nn.Conv2d(self.dim, 28, 3, 1, 1, bias=False)
#### activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def initial_x(self, y):
"""
:param y: [b,1,256,310]
:param Phi: [b,28,256,310]
:return: z: [b,28,256,310]
"""
nC, step = 28, 2
bs, row, col = y.shape
x = torch.zeros(bs, nC, row, row).cuda().float()
for i in range(nC):
x[:, i, :, :] = y[:, :, step * i:step * i + col - (nC - 1) * step]
x = self.fution(x)
return x
def forward(self, x, input_mask=None):
"""
x: [b,h,w]
return out:[b,c,h,w]
"""
x = self.initial_x(x)
# Embedding
fea = self.lrelu(self.embedding(x))
# Encoder
fea_encoder = []
for (MSAB, FeaDownSample) in self.encoder_layers:
fea = MSAB(fea)
fea_encoder.append(fea)
fea = FeaDownSample(fea)
# Bottleneck
fea = self.bottleneck(fea)
# Decoder
for i, (FeaUpSample, Fution, LeWinBlcok) in enumerate(self.decoder_layers):
fea = FeaUpSample(fea)
fea = Fution(torch.cat([fea, fea_encoder[self.stage-1-i]], dim=1))
fea = LeWinBlcok(fea)
# Mapping
out = self.mapping(fea) + x
return out
| 8,814 | 28.881356 | 116 | py |
MST | MST-main/real/train_code/architecture/BIRNAT.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class self_attention(nn.Module):
def __init__(self, ch):
super(self_attention, self).__init__()
self.conv1 = nn.Conv2d(ch, ch // 8, 1)
self.conv2 = nn.Conv2d(ch, ch // 8, 1)
self.conv3 = nn.Conv2d(ch, ch, 1)
self.conv4 = nn.Conv2d(ch, ch, 1)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.ch = ch
def forward(self, x):
batch_size = x.shape[0]
f = self.conv1(x)
g = self.conv2(x)
h = self.conv3(x)
ht = h.reshape([batch_size, self.ch, -1])
ft = f.reshape([batch_size, self.ch // 8, -1])
n = torch.matmul(ft.permute([0, 2, 1]), g.reshape([batch_size, self.ch // 8, -1]))
beta = F.softmax(n, dim=1)
o = torch.matmul(ht, beta)
o = o.reshape(x.shape) # [bs, C, h, w]
o = self.conv4(o)
x = self.gamma1 * o + x
return x
class res_part(nn.Module):
def __init__(self, in_ch, out_ch):
super(res_part, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
self.conv3 = nn.Sequential(
nn.Conv2d(in_ch, in_ch, 3, padding=1),
# nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, in_ch, 3, padding=1),
)
def forward(self, x):
x1 = self.conv1(x)
x = x1 + x
x1 = self.conv2(x)
x = x1 + x
x1 = self.conv3(x)
x = x1 + x
return x
class down_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(down_feature, self).__init__()
self.conv = nn.Sequential(
# nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
# nn.Conv2d(20, 40, 5, stride=2, padding=2),
# nn.Conv2d(40, out_ch, 5, stride=2, padding=2),
nn.Conv2d(in_ch, 20, 5, stride=1, padding=2),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 40, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(40, out_ch, 3, stride=1, padding=1),
)
def forward(self, x):
x = self.conv(x)
return x
class up_feature(nn.Module):
def __init__(self, in_ch, out_ch):
super(up_feature, self).__init__()
self.conv = nn.Sequential(
# nn.ConvTranspose2d(in_ch, 40, 3, stride=2, padding=1, output_padding=1),
# nn.ConvTranspose2d(40, 20, 3, stride=2, padding=1, output_padding=1),
nn.Conv2d(in_ch, 40, 3, stride=1, padding=1),
nn.Conv2d(40, 30, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(30, 20, 3, stride=1, padding=1),
nn.Conv2d(20, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
nn.Conv2d(20, out_ch, 1),
# nn.Sigmoid(),
)
def forward(self, x):
x = self.conv(x)
return x
class cnn1(nn.Module):
# 输入meas concat mask
# 3 下采样
def __init__(self, B):
super(cnn1, self).__init__()
self.conv1 = nn.Conv2d(B + 1, 32, kernel_size=5, stride=1, padding=2)
self.relu1 = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1)
self.relu2 = nn.LeakyReLU(inplace=True)
self.conv3 = nn.Conv2d(64, 64, kernel_size=1, stride=1)
self.relu3 = nn.LeakyReLU(inplace=True)
self.conv4 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)
self.relu4 = nn.LeakyReLU(inplace=True)
self.conv5 = nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1)
self.relu5 = nn.LeakyReLU(inplace=True)
self.conv51 = nn.Conv2d(64, 32, kernel_size=3, stride=1, padding=1)
self.relu51 = nn.LeakyReLU(inplace=True)
self.conv52 = nn.Conv2d(32, 16, kernel_size=1, stride=1)
self.relu52 = nn.LeakyReLU(inplace=True)
self.conv6 = nn.Conv2d(16, 1, kernel_size=3, stride=1, padding=1)
self.res_part1 = res_part(128, 128)
self.res_part2 = res_part(128, 128)
self.res_part3 = res_part(128, 128)
self.conv7 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu7 = nn.LeakyReLU(inplace=True)
self.conv8 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.conv9 = nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1)
self.relu9 = nn.LeakyReLU(inplace=True)
self.conv10 = nn.Conv2d(128, 128, kernel_size=1, stride=1)
self.att1 = self_attention(128)
def forward(self, meas=None, nor_meas=None, PhiTy=None):
data = torch.cat([torch.unsqueeze(nor_meas, dim=1), PhiTy], dim=1)
out = self.conv1(data)
out = self.relu1(out)
out = self.conv2(out)
out = self.relu2(out)
out = self.conv3(out)
out = self.relu3(out)
out = self.conv4(out)
out = self.relu4(out)
out = self.res_part1(out)
out = self.conv7(out)
out = self.relu7(out)
out = self.conv8(out)
out = self.res_part2(out)
out = self.conv9(out)
out = self.relu9(out)
out = self.conv10(out)
out = self.res_part3(out)
# out = self.att1(out)
out = self.conv5(out)
out = self.relu5(out)
out = self.conv51(out)
out = self.relu51(out)
out = self.conv52(out)
out = self.relu52(out)
out = self.conv6(out)
return out
class forward_rnn(nn.Module):
def __init__(self):
super(forward_rnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt1, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
xt = xt1
step = 2
[bs, nC, row, col] = xt1.shape
out = xt1
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, ii, :, :], out[:, ii, :, :])
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, ii, :, :], torch.squeeze(nor_meas))
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out = torch.cat([out, xt], dim=1)
return out, ht
class backrnn(nn.Module):
def __init__(self):
super(backrnn, self).__init__()
self.extract_feature1 = down_feature(1, 20)
self.up_feature1 = up_feature(60, 1)
self.conv_x1 = nn.Sequential(
nn.Conv2d(1, 16, 5, stride=1, padding=2),
nn.Conv2d(16, 32, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.conv_x2 = nn.Sequential(
nn.Conv2d(1, 10, 5, stride=1, padding=2),
nn.Conv2d(10, 10, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(10, 40, 3, stride=2, padding=1),
nn.Conv2d(40, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, stride=1, padding=1),
nn.LeakyReLU(inplace=True),
nn.ConvTranspose2d(20, 10, kernel_size=3, stride=2, padding=1, output_padding=1),
)
self.h_h = nn.Sequential(
nn.Conv2d(60, 30, 3, padding=1),
nn.Conv2d(30, 20, 1),
nn.LeakyReLU(inplace=True),
nn.Conv2d(20, 20, 3, padding=1),
)
self.res_part1 = res_part(60, 60)
self.res_part2 = res_part(60, 60)
def forward(self, xt8, meas=None, nor_meas=None, PhiTy=None, mask3d_batch=None, h=None, cs_rate=28):
ht = h
step = 2
[bs, nC, row, col] = xt8.shape
xt = torch.unsqueeze(xt8[:, cs_rate - 1, :, :], 1)
out = torch.zeros(bs, cs_rate, row, col).cuda()
out[:, cs_rate - 1, :, :] = xt[:, 0, :, :]
x11 = self.conv_x1(torch.unsqueeze(nor_meas, 1))
for i in range(cs_rate - 1):
d1 = torch.zeros(bs, row, col).cuda()
d2 = torch.zeros(bs, row, col).cuda()
for ii in range(i + 1):
d1 = d1 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], out[:, cs_rate - 1 - ii, :, :].clone())
for ii in range(i + 2, cs_rate):
d2 = d2 + torch.mul(mask3d_batch[:, cs_rate - 1 - ii, :, :], xt8[:, cs_rate - 1 - ii, :, :].clone())
x12 = self.conv_x2(torch.unsqueeze(meas - d1 - d2, 1))
x2 = self.extract_feature1(xt)
h = torch.cat([ht, x11, x12, x2], dim=1)
h = self.res_part1(h)
h = self.res_part2(h)
ht = self.h_h(h)
xt = self.up_feature1(h)
out[:, cs_rate - 2 - i, :, :] = xt[:, 0, :, :]
return out
def shift_gt_back(inputs, step=2): # input [bs,256,310] output [bs, 28, 256, 256]
[bs, nC, row, col] = inputs.shape
output = torch.zeros(bs, nC, row, col - (nC - 1) * step).cuda().float()
for i in range(nC):
output[:, i, :, :] = inputs[:, i, :, step * i:step * i + col - (nC - 1) * step]
return output
def shift(inputs, step=2):
[bs, nC, row, col] = inputs.shape
if inputs.is_cuda:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).cuda().float()
else:
output = torch.zeros(bs, nC, row, col + (nC - 1) * step).float()
for i in range(nC):
output[:, i, :, step * i:step * i + col] = inputs[:, i, :, :]
return output
class BIRNAT(nn.Module):
def __init__(self):
super(BIRNAT, self).__init__()
self.cs_rate = 28
self.first_frame_net = cnn1(self.cs_rate).cuda()
self.rnn1 = forward_rnn().cuda()
self.rnn2 = backrnn().cuda()
def gen_meas_torch(self, meas, shift_mask):
batch_size, H = meas.shape[0:2]
mask_s = torch.sum(shift_mask, 1)
nor_meas = torch.div(meas, mask_s)
temp = torch.mul(torch.unsqueeze(nor_meas, dim=1).expand([batch_size, 28, H, shift_mask.shape[3]]), shift_mask)
return nor_meas, temp
def forward(self, meas, shift_mask=None):
if shift_mask==None:
shift_mask = torch.zeros(1, 28, 256, 310).cuda()
H, W = meas.shape[-2:]
nor_meas, PhiTy = self.gen_meas_torch(meas, shift_mask)
h0 = torch.zeros(meas.shape[0], 20, H, W).cuda()
xt1 = self.first_frame_net(meas, nor_meas, PhiTy)
model_out1, h1 = self.rnn1(xt1, meas, nor_meas, PhiTy, shift_mask, h0, self.cs_rate)
model_out2 = self.rnn2(model_out1, meas, nor_meas, PhiTy, shift_mask, h1, self.cs_rate)
model_out2 = shift_gt_back(model_out2)
return model_out2
| 13,326 | 35.412568 | 119 | py |
MST | MST-main/real/train_code/architecture/GAP_Net.py | import torch.nn.functional as F
import torch
import torch.nn as nn
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
# nn.Conv2d(64, 64, (1,2), padding=(0,1)),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
class DoubleConv(nn.Module):
"""(convolution => [BN] => ReLU) * 2"""
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class GAP_net(nn.Module):
def __init__(self):
super(GAP_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
def forward(self, y, input_mask=None):
if input_mask==None:
Phi = torch.rand((1,28,256,310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
x = At(y, Phi) # v0=H^T y
### 1-3
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet1(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet2(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet3(x)
x = shift_3d(x)
### 4-6
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet4(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet5(x)
x = shift_3d(x)
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet6(x)
x = shift_3d(x)
# ### 7-9
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet7(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet8(x)
x = shift_3d(x)
x_list.append(x[:, :, :, 0:256])
yb = A(x, Phi)
x = x + At(torch.div(y - yb, Phi_s), Phi)
x = shift_back_3d(x)
x = self.unet9(x)
x = shift_3d(x)
return x[:, :, :, 0:256] | 5,524 | 28.232804 | 81 | py |
MST | MST-main/real/train_code/architecture/Lambda_Net.py | import torch.nn as nn
import torch
import torch.nn.functional as F
from einops import rearrange
from torch import einsum
class LambdaNetAttention(nn.Module):
def __init__(
self,
dim,
):
super().__init__()
self.dim = dim
self.to_q = nn.Linear(dim, dim//8, bias=False)
self.to_k = nn.Linear(dim, dim//8, bias=False)
self.to_v = nn.Linear(dim, dim, bias=False)
self.rescale = (dim//8)**-0.5
self.gamma = nn.Parameter(torch.ones(1))
def forward(self, x):
"""
x: [b,c,h,w]
return out: [b,c,h,w]
"""
x = x.permute(0,2,3,1)
b, h, w, c = x.shape
# Reshape to (B,N,C), where N = window_size[0]*window_size[1] is the length of sentence
x_inp = rearrange(x, 'b h w c -> b (h w) c')
# produce query, key and value
q = self.to_q(x_inp)
k = self.to_k(x_inp)
v = self.to_v(x_inp)
# attention
sim = einsum('b i d, b j d -> b i j', q, k)*self.rescale
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b i j, b j d -> b i d', attn, v)
# merge blocks back to original feature map
out = rearrange(out, 'b (h w) c -> b h w c', h=h, w=w)
out = self.gamma*out + x
return out.permute(0,3,1,2)
class triple_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(triple_conv, self).__init__()
self.t_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.t_conv(x)
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
)
def forward(self, x):
x = self.d_conv(x)
return x
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class Lambda_Net(nn.Module):
def __init__(self, out_ch=28):
super(Lambda_Net, self).__init__()
self.conv_in = nn.Conv2d(1+28, 28, 3, padding=1)
# encoder
self.conv_down1 = triple_conv(28, 32)
self.conv_down2 = triple_conv(32, 64)
self.conv_down3 = triple_conv(64, 128)
self.conv_down4 = triple_conv(128, 256)
self.conv_down5 = double_conv(256, 512)
self.conv_down6 = double_conv(512, 1024)
self.maxpool = nn.MaxPool2d(2)
# decoder
self.upsample5 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2)
self.upsample4 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2)
self.upsample3 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2)
self.upsample2 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2)
self.upsample1 = nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2)
self.conv_up1 = triple_conv(32+32, 32)
self.conv_up2 = triple_conv(64+64, 64)
self.conv_up3 = triple_conv(128+128, 128)
self.conv_up4 = triple_conv(256+256, 256)
self.conv_up5 = double_conv(512+512, 512)
# attention
self.attention = LambdaNetAttention(dim=128)
self.conv_last1 = nn.Conv2d(32, 6, 3,1,1)
self.conv_last2 = nn.Conv2d(38, 32, 3,1,1)
self.conv_last3 = nn.Conv2d(32, 12, 3,1,1)
self.conv_last4 = nn.Conv2d(44, 32, 3,1,1)
self.conv_last5 = nn.Conv2d(32, out_ch, 1)
self.act = nn.ReLU()
def forward(self, x, input_mask=None):
if input_mask == None:
input_mask = torch.zeros((1,28,256,310)).cuda()
x = x/28*2
x = self.conv_in(torch.cat([x.unsqueeze(1), input_mask], dim=1))
b, c, h_inp, w_inp = x.shape
hb, wb = 32, 32
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
res0 = x
conv1 = self.conv_down1(x)
x = self.maxpool(conv1)
conv2 = self.conv_down2(x)
x = self.maxpool(conv2)
conv3 = self.conv_down3(x)
x = self.maxpool(conv3)
conv4 = self.conv_down4(x)
x = self.maxpool(conv4)
conv5 = self.conv_down5(x)
x = self.maxpool(conv5)
conv6 = self.conv_down6(x)
x = self.upsample5(conv6)
x = torch.cat([x, conv5], dim=1)
x = self.conv_up5(x)
x = self.upsample4(x)
x = torch.cat([x, conv4], dim=1)
x = self.conv_up4(x)
x = self.upsample3(x)
x = torch.cat([x, conv3], dim=1)
x = self.conv_up3(x)
x = self.attention(x)
x = self.upsample2(x)
x = torch.cat([x, conv2], dim=1)
x = self.conv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.conv_up1(x)
res1 = x
out1 = self.act(self.conv_last1(x))
x = self.conv_last2(torch.cat([res1,out1],dim=1))
res2 = x
out2 = self.act(self.conv_last3(x))
out3 = self.conv_last4(torch.cat([res2, out2], dim=1))
out = self.conv_last5(out3)+res0
out = out[:, :, :h_inp, :w_inp]
return shift_back_3d(out)[:, :, :, :256]
| 5,680 | 30.38674 | 95 | py |
MST | MST-main/real/train_code/architecture/ADMM_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
def A(x,Phi):
temp = x*Phi
y = torch.sum(temp,1)
return y
def At(y,Phi):
temp = torch.unsqueeze(y, 1).repeat(1,Phi.shape[1],1,1)
x = temp*Phi
return x
class double_conv(nn.Module):
def __init__(self, in_channels, out_channels):
super(double_conv, self).__init__()
self.d_conv = nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 3, padding=1),
nn.ReLU(inplace=True)
)
def forward(self, x):
x = self.d_conv(x)
return x
class Unet(nn.Module):
def __init__(self, in_ch, out_ch):
super(Unet, self).__init__()
self.dconv_down1 = double_conv(in_ch, 32)
self.dconv_down2 = double_conv(32, 64)
self.dconv_down3 = double_conv(64, 128)
self.maxpool = nn.MaxPool2d(2)
self.upsample2 = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.upsample1 = nn.Sequential(
nn.ConvTranspose2d(64, 32, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
self.dconv_up2 = double_conv(64 + 64, 64)
self.dconv_up1 = double_conv(32 + 32, 32)
self.conv_last = nn.Conv2d(32, out_ch, 1)
self.afn_last = nn.Tanh()
def forward(self, x):
b, c, h_inp, w_inp = x.shape
hb, wb = 8, 8
pad_h = (hb - h_inp % hb) % hb
pad_w = (wb - w_inp % wb) % wb
x = F.pad(x, [0, pad_w, 0, pad_h], mode='reflect')
inputs = x
conv1 = self.dconv_down1(x)
x = self.maxpool(conv1)
conv2 = self.dconv_down2(x)
x = self.maxpool(conv2)
conv3 = self.dconv_down3(x)
x = self.upsample2(conv3)
x = torch.cat([x, conv2], dim=1)
x = self.dconv_up2(x)
x = self.upsample1(x)
x = torch.cat([x, conv1], dim=1)
x = self.dconv_up1(x)
x = self.conv_last(x)
x = self.afn_last(x)
out = x + inputs
return out[:, :, :h_inp, :w_inp]
def shift_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=step*i, dims=2)
return inputs
def shift_back_3d(inputs,step=2):
[bs, nC, row, col] = inputs.shape
for i in range(nC):
inputs[:,i,:,:] = torch.roll(inputs[:,i,:,:], shifts=(-1)*step*i, dims=2)
return inputs
class ADMM_net(nn.Module):
def __init__(self):
super(ADMM_net, self).__init__()
self.unet1 = Unet(28, 28)
self.unet2 = Unet(28, 28)
self.unet3 = Unet(28, 28)
self.unet4 = Unet(28, 28)
self.unet5 = Unet(28, 28)
self.unet6 = Unet(28, 28)
self.unet7 = Unet(28, 28)
self.unet8 = Unet(28, 28)
self.unet9 = Unet(28, 28)
self.gamma1 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma2 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma3 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma4 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma5 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma6 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma7 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma8 = torch.nn.Parameter(torch.Tensor([0]))
self.gamma9 = torch.nn.Parameter(torch.Tensor([0]))
def forward(self, y, input_mask=None):
if input_mask == None:
Phi = torch.rand((1, 28, 256, 310)).cuda()
Phi_s = torch.rand((1, 256, 310)).cuda()
else:
Phi, Phi_s = input_mask
x_list = []
theta = At(y,Phi)
b = torch.zeros_like(Phi)
### 1-3
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma1),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet1(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma2),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet2(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma3),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet3(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 4-6
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma4),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet4(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma5),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet5(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma6),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet6(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
### 7-9
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma7),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet7(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma8),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet8(x1)
theta = shift_3d(theta)
b = b- (x-theta)
x_list.append(theta)
yb = A(theta+b,Phi)
x = theta+b + At(torch.div(y-yb,Phi_s+self.gamma9),Phi)
x1 = x-b
x1 = shift_back_3d(x1)
theta = self.unet9(x1)
theta = shift_3d(theta)
return theta[:, :, :, 0:256]
| 6,191 | 29.653465 | 81 | py |
MST | MST-main/real/train_code/architecture/TSA_Net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
_NORM_BONE = False
def conv_block(in_planes, out_planes, the_kernel=3, the_stride=1, the_padding=1, flag_norm=False, flag_norm_act=True):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=the_kernel, stride=the_stride, padding=the_padding)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_planes)
if flag_norm:
return nn.Sequential(conv,norm,activation) if flag_norm_act else nn.Sequential(conv,activation,norm)
else:
return nn.Sequential(conv,activation)
def conv1x1_block(in_planes, out_planes, flag_norm=False):
conv = nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=1, padding=0,bias=False)
norm = nn.BatchNorm2d(out_planes)
return nn.Sequential(conv,norm) if flag_norm else conv
def fully_block(in_dim, out_dim, flag_norm=False, flag_norm_act=True):
fc = nn.Linear(in_dim, out_dim)
activation = nn.ReLU(inplace=True)
norm = nn.BatchNorm2d(out_dim)
if flag_norm:
return nn.Sequential(fc,norm,activation) if flag_norm_act else nn.Sequential(fc,activation,norm)
else:
return nn.Sequential(fc,activation)
class Res2Net(nn.Module):
def __init__(self, inChannel, uPlane, scale=4):
super(Res2Net, self).__init__()
self.uPlane = uPlane
self.scale = scale
self.conv_init = nn.Conv2d(inChannel, uPlane * scale, kernel_size=1, bias=False)
self.bn_init = nn.BatchNorm2d(uPlane * scale)
convs = []
bns = []
for i in range(self.scale - 1):
convs.append(nn.Conv2d(self.uPlane, self.uPlane, kernel_size=3, stride=1, padding=1, bias=False))
bns.append(nn.BatchNorm2d(self.uPlane))
self.convs = nn.ModuleList(convs)
self.bns = nn.ModuleList(bns)
self.conv_end = nn.Conv2d(uPlane * scale, inChannel, kernel_size=1, bias=False)
self.bn_end = nn.BatchNorm2d(inChannel)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
out = self.conv_init(x)
out = self.bn_init(out)
out = self.relu(out)
spx = torch.split(out, self.uPlane, 1)
for i in range(self.scale - 1):
if i == 0:
sp = spx[i]
else:
sp = sp + spx[i]
sp = self.convs[i](sp)
sp = self.relu(self.bns[i](sp))
if i == 0:
out = sp
else:
out = torch.cat((out, sp), 1)
out = torch.cat((out, spx[self.scale - 1]), 1)
out = self.conv_end(out)
out = self.bn_end(out)
return out
_NORM_ATTN = True
_NORM_FC = False
class TSA_Transform(nn.Module):
""" Spectral-Spatial Self-Attention """
def __init__(self, uSpace, inChannel, outChannel, nHead, uAttn, mode=[0, 1], flag_mask=False, gamma_learn=False):
super(TSA_Transform, self).__init__()
''' ------------------------------------------
uSpace:
uHeight: the [-2] dim of the 3D tensor
uWidth: the [-1] dim of the 3D tensor
inChannel:
the number of Channel of the input tensor
outChannel:
the number of Channel of the output tensor
nHead:
the number of Head of the input tensor
uAttn:
uSpatial: the dim of the spatial features
uSpectral: the dim of the spectral features
mask:
The Spectral Smoothness Mask
{mode} and {gamma_learn} is just for variable selection
------------------------------------------ '''
self.nHead = nHead
self.uAttn = uAttn
self.outChannel = outChannel
self.uSpatial = nn.Parameter(torch.tensor(float(uAttn[0])), requires_grad=False)
self.uSpectral = nn.Parameter(torch.tensor(float(uAttn[1])), requires_grad=False)
self.mask = nn.Parameter(Spectral_Mask(outChannel), requires_grad=False) if flag_mask else None
self.attn_scale = nn.Parameter(torch.tensor(1.1), requires_grad=False) if flag_mask else None
self.gamma = nn.Parameter(torch.tensor(1.0), requires_grad=gamma_learn)
if sum(mode) > 0:
down_sample = []
scale = 1
cur_channel = outChannel
for i in range(sum(mode)):
scale *= 2
down_sample.append(conv_block(cur_channel, 2 * cur_channel, 3, 2, 1, _NORM_ATTN))
cur_channel = 2 * cur_channel
self.cur_channel = cur_channel
self.down_sample = nn.Sequential(*down_sample)
self.up_sample = nn.ConvTranspose2d(outChannel * scale, outChannel, scale, scale)
else:
self.down_sample = None
self.up_sample = None
spec_dim = int(uSpace[0] / 4 - 3) * int(uSpace[1] / 4 - 3)
self.preproc = conv1x1_block(inChannel, outChannel, _NORM_ATTN)
self.query_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.query_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.query_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.key_x = Feature_Spatial(outChannel, nHead, int(uSpace[1] / 4), uAttn[0], mode)
self.key_y = Feature_Spatial(outChannel, nHead, int(uSpace[0] / 4), uAttn[0], mode)
self.key_lambda = Feature_Spectral(outChannel, nHead, spec_dim, uAttn[1])
self.value = conv1x1_block(outChannel, nHead * outChannel, _NORM_ATTN)
self.aggregation = nn.Linear(nHead * outChannel, outChannel)
def forward(self, image):
feat = self.preproc(image)
feat_qx = self.query_x(feat, 'X')
feat_qy = self.query_y(feat, 'Y')
feat_qlambda = self.query_lambda(feat)
feat_kx = self.key_x(feat, 'X')
feat_ky = self.key_y(feat, 'Y')
feat_klambda = self.key_lambda(feat)
feat_value = self.value(feat)
feat_qx = torch.cat(torch.split(feat_qx, 1, dim=1)).squeeze(dim=1)
feat_qy = torch.cat(torch.split(feat_qy, 1, dim=1)).squeeze(dim=1)
feat_kx = torch.cat(torch.split(feat_kx, 1, dim=1)).squeeze(dim=1)
feat_ky = torch.cat(torch.split(feat_ky, 1, dim=1)).squeeze(dim=1)
feat_qlambda = torch.cat(torch.split(feat_qlambda, self.uAttn[1], dim=-1))
feat_klambda = torch.cat(torch.split(feat_klambda, self.uAttn[1], dim=-1))
feat_value = torch.cat(torch.split(feat_value, self.outChannel, dim=1))
energy_x = torch.bmm(feat_qx, feat_kx.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_y = torch.bmm(feat_qy, feat_ky.permute(0, 2, 1)) / torch.sqrt(self.uSpatial)
energy_lambda = torch.bmm(feat_qlambda, feat_klambda.permute(0, 2, 1)) / torch.sqrt(self.uSpectral)
attn_x = F.softmax(energy_x, dim=-1)
attn_y = F.softmax(energy_y, dim=-1)
attn_lambda = F.softmax(energy_lambda, dim=-1)
if self.mask is not None:
attn_lambda = (attn_lambda + self.mask) / torch.sqrt(self.attn_scale)
pro_feat = feat_value if self.down_sample is None else self.down_sample(feat_value)
batchhead, dim_c, dim_x, dim_y = pro_feat.size()
attn_x_repeat = attn_x.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_x, dim_x)
attn_y_repeat = attn_y.unsqueeze(dim=1).repeat(1, dim_c, 1, 1).view(-1, dim_y, dim_y)
pro_feat = pro_feat.view(-1, dim_x, dim_y)
pro_feat = torch.bmm(pro_feat, attn_y_repeat.permute(0, 2, 1))
pro_feat = torch.bmm(pro_feat.permute(0, 2, 1), attn_x_repeat.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, dim_c, dim_x, dim_y)
if self.up_sample is not None:
pro_feat = self.up_sample(pro_feat)
_, _, dim_x, dim_y = pro_feat.size()
pro_feat = pro_feat.contiguous().view(batchhead, self.outChannel, -1).permute(0, 2, 1)
pro_feat = torch.bmm(pro_feat, attn_lambda.permute(0, 2, 1)).permute(0, 2, 1)
pro_feat = pro_feat.view(batchhead, self.outChannel, dim_x, dim_y)
pro_feat = torch.cat(torch.split(pro_feat, int(batchhead / self.nHead), dim=0), dim=1).permute(0, 2, 3, 1)
pro_feat = self.aggregation(pro_feat).permute(0, 3, 1, 2)
out = self.gamma * pro_feat + feat
return out, (attn_x, attn_y, attn_lambda)
class Feature_Spatial(nn.Module):
""" Spatial Feature Generation Component """
def __init__(self, inChannel, nHead, shiftDim, outDim, mode):
super(Feature_Spatial, self).__init__()
kernel = [(1, 5), (3, 5)]
stride = [(1, 2), (2, 2)]
padding = [(0, 2), (1, 2)]
self.conv1 = conv_block(inChannel, nHead, kernel[mode[0]], stride[mode[0]], padding[mode[0]], _NORM_ATTN)
self.conv2 = conv_block(nHead, nHead, kernel[mode[1]], stride[mode[1]], padding[mode[1]], _NORM_ATTN)
self.fully = fully_block(shiftDim, outDim, _NORM_FC)
def forward(self, image, direction):
if direction == 'Y':
image = image.permute(0, 1, 3, 2)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = self.fully(feat)
return feat
class Feature_Spectral(nn.Module):
""" Spectral Feature Generation Component """
def __init__(self, inChannel, nHead, viewDim, outDim):
super(Feature_Spectral, self).__init__()
self.inChannel = inChannel
self.conv1 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.conv2 = conv_block(inChannel, inChannel, 5, 2, 0, _NORM_ATTN)
self.fully = fully_block(viewDim, int(nHead * outDim), _NORM_FC)
def forward(self, image):
bs = image.size(0)
feat = self.conv1(image)
feat = self.conv2(feat)
feat = feat.view(bs, self.inChannel, -1)
feat = self.fully(feat)
return feat
def Spectral_Mask(dim_lambda):
'''After put the available data into the model, we use this mask to avoid outputting the estimation of itself.'''
orig = (np.cos(np.linspace(-1, 1, num=2 * dim_lambda - 1) * np.pi) + 1.0) / 2.0
att = np.zeros((dim_lambda, dim_lambda))
for i in range(dim_lambda):
att[i, :] = orig[dim_lambda - 1 - i:2 * dim_lambda - 1 - i]
AM_Mask = torch.from_numpy(att.astype(np.float32)).unsqueeze(0)
return AM_Mask
class TSA_Net(nn.Module):
def __init__(self, in_ch=28, out_ch=28):
super(TSA_Net, self).__init__()
self.tconv_down1 = Encoder_Triblock(in_ch, 64, False)
self.tconv_down2 = Encoder_Triblock(64, 128, False)
self.tconv_down3 = Encoder_Triblock(128, 256)
self.tconv_down4 = Encoder_Triblock(256, 512)
self.bottom1 = conv_block(512, 1024)
self.bottom2 = conv_block(1024, 1024)
self.tconv_up4 = Decoder_Triblock(1024, 512)
self.tconv_up3 = Decoder_Triblock(512, 256)
self.transform3 = TSA_Transform((64, 64), 256, 256, 8, (64, 80), [0, 0])
self.tconv_up2 = Decoder_Triblock(256, 128)
self.transform2 = TSA_Transform((128, 128), 128, 128, 8, (64, 40), [1, 0])
self.tconv_up1 = Decoder_Triblock(128, 64)
self.transform1 = TSA_Transform((256, 256), 64, 28, 8, (48, 30), [1, 1], True)
self.conv_last = nn.Conv2d(out_ch, out_ch, 1)
self.afn_last = nn.Sigmoid()
def forward(self, x, input_mask=None):
enc1, enc1_pre = self.tconv_down1(x)
enc2, enc2_pre = self.tconv_down2(enc1)
enc3, enc3_pre = self.tconv_down3(enc2)
enc4, enc4_pre = self.tconv_down4(enc3)
# enc5,enc5_pre = self.tconv_down5(enc4)
bottom = self.bottom1(enc4)
bottom = self.bottom2(bottom)
# dec5 = self.tconv_up5(bottom,enc5_pre)
dec4 = self.tconv_up4(bottom, enc4_pre)
dec3 = self.tconv_up3(dec4, enc3_pre)
dec3, _ = self.transform3(dec3)
dec2 = self.tconv_up2(dec3, enc2_pre)
dec2, _ = self.transform2(dec2)
dec1 = self.tconv_up1(dec2, enc1_pre)
dec1, _ = self.transform1(dec1)
dec1 = self.conv_last(dec1)
output = self.afn_last(dec1)
return output
class Encoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Encoder_Triblock, self).__init__()
self.layer1 = conv_block(inChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
if flag_res:
self.layer2 = Res2Net(outChannel, int(outChannel / 4))
else:
self.layer2 = conv_block(outChannel, outChannel, nKernal, flag_norm=_NORM_BONE)
self.pool = nn.MaxPool2d(nPool) if flag_Pool else None
def forward(self, x):
feat = self.layer1(x)
feat = self.layer2(feat)
feat_pool = self.pool(feat) if self.pool is not None else feat
return feat_pool, feat
class Decoder_Triblock(nn.Module):
def __init__(self, inChannel, outChannel, flag_res=True, nKernal=3, nPool=2, flag_Pool=True):
super(Decoder_Triblock, self).__init__()
self.layer1 = nn.Sequential(
nn.ConvTranspose2d(inChannel, outChannel, kernel_size=2, stride=2),
nn.ReLU(inplace=True)
)
if flag_res:
self.layer2 = Res2Net(int(outChannel * 2), int(outChannel / 2))
else:
self.layer2 = conv_block(outChannel * 2, outChannel * 2, nKernal, flag_norm=_NORM_BONE)
self.layer3 = conv_block(outChannel * 2, outChannel, nKernal, flag_norm=_NORM_BONE)
def forward(self, feat_dec, feat_enc):
feat_dec = self.layer1(feat_dec)
diffY = feat_enc.size()[2] - feat_dec.size()[2]
diffX = feat_enc.size()[3] - feat_dec.size()[3]
if diffY != 0 or diffX != 0:
print('Padding for size mismatch ( Enc:', feat_enc.size(), 'Dec:', feat_dec.size(), ')')
feat_dec = F.pad(feat_dec, [diffX // 2, diffX - diffX // 2, diffY // 2, diffY - diffY // 2])
feat = torch.cat([feat_dec, feat_enc], dim=1)
feat = self.layer2(feat)
feat = self.layer3(feat)
return feat | 14,086 | 41.687879 | 118 | py |
MST | MST-main/real/train_code/architecture/__init__.py | import torch
from .MST import MST
from .GAP_Net import GAP_net
from .ADMM_Net import ADMM_net
from .TSA_Net import TSA_Net
from .HDNet import HDNet, FDL
from .DGSMP import HSI_CS
from .BIRNAT import BIRNAT
from .MST_Plus_Plus import MST_Plus_Plus
from .Lambda_Net import Lambda_Net
from .CST import CST
from .DAUHST import DAUHST
def model_generator(method, pretrained_model_path=None):
if method == 'mst_s':
model = MST(dim=28, stage=2, num_blocks=[2, 2, 2]).cuda()
elif method == 'mst_m':
model = MST(dim=28, stage=2, num_blocks=[2, 4, 4]).cuda()
elif method == 'mst_l':
model = MST(dim=28, stage=2, num_blocks=[4, 7, 5]).cuda()
elif method == 'gap_net':
model = GAP_net().cuda()
elif method == 'admm_net':
model = ADMM_net().cuda()
elif method == 'tsa_net':
model = TSA_Net().cuda()
elif method == 'hdnet':
model = HDNet().cuda()
fdl_loss = FDL(loss_weight=0.7,
alpha=2.0,
patch_factor=4,
ave_spectrum=True,
log_matrix=True,
batch_matrix=True,
).cuda()
elif method == 'dgsmp':
model = HSI_CS(Ch=28, stages=4).cuda()
elif method == 'birnat':
model = BIRNAT().cuda()
elif method == 'mst_plus_plus':
model = MST_Plus_Plus(in_channels=28, out_channels=28, n_feat=28, stage=3).cuda()
elif method == 'lambda_net':
model = Lambda_Net(out_ch=28).cuda()
elif method == 'cst_s':
model = CST(num_blocks=[1, 1, 2], sparse=True).cuda()
elif method == 'cst_m':
model = CST(num_blocks=[2, 2, 2], sparse=True).cuda()
elif method == 'cst_l':
model = CST(num_blocks=[2, 4, 6], sparse=True).cuda()
elif method == 'cst_l_plus':
model = CST(num_blocks=[2, 4, 6], sparse=False).cuda()
elif 'dauhst' in method:
num_iterations = int(method.split('_')[1][0])
model = DAUHST(num_iterations=num_iterations).cuda()
else:
print(f'Method {method} is not defined !!!!')
if pretrained_model_path is not None:
print(f'load model from {pretrained_model_path}')
checkpoint = torch.load(pretrained_model_path)
model.load_state_dict({k.replace('module.', ''): v for k, v in checkpoint.items()},
strict=True)
if method == 'hdnet':
return model,fdl_loss
return model | 2,403 | 36.5625 | 91 | py |
MST | MST-main/real/train_code/architecture/HDNet.py | import torch
import torch.nn as nn
import math
def default_conv(in_channels, out_channels, kernel_size, bias=True):
return nn.Conv2d(
in_channels, out_channels, kernel_size,
padding=(kernel_size//2), bias=bias)
class MeanShift(nn.Conv2d):
def __init__(
self, rgb_range,
rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):
super(MeanShift, self).__init__(3, 3, kernel_size=1)
std = torch.Tensor(rgb_std)
self.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)
self.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std
for p in self.parameters():
p.requires_grad = False
class BasicBlock(nn.Sequential):
def __init__(
self, conv, in_channels, out_channels, kernel_size, stride=1, bias=False,
bn=True, act=nn.ReLU(True)):
m = [conv(in_channels, out_channels, kernel_size, bias=bias)]
if bn:
m.append(nn.BatchNorm2d(out_channels))
if act is not None:
m.append(act)
super(BasicBlock, self).__init__(*m)
class ResBlock(nn.Module):
def __init__(
self, conv, n_feats, kernel_size,
bias=True, bn=False, act=nn.ReLU(True), res_scale=1):
super(ResBlock, self).__init__()
m = []
for i in range(2):
m.append(conv(n_feats, n_feats, kernel_size, bias=bias))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if i == 0:
m.append(act)
self.body = nn.Sequential(*m)
self.res_scale = res_scale
def forward(self, x):
res = self.body(x).mul(self.res_scale) # So res_scale is a scaler? just scale all elements in each feature's residual? Why?
res += x
return res
class Upsampler(nn.Sequential):
def __init__(self, conv, scale, n_feats, bn=False, act=False, bias=True):
m = []
if (scale & (scale - 1)) == 0:
for _ in range(int(math.log(scale, 2))):
m.append(conv(n_feats, 4 * n_feats, 3, bias))
m.append(nn.PixelShuffle(2))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
elif scale == 3:
m.append(conv(n_feats, 9 * n_feats, 3, bias))
m.append(nn.PixelShuffle(3))
if bn:
m.append(nn.BatchNorm2d(n_feats))
if act == 'relu':
m.append(nn.ReLU(True))
elif act == 'prelu':
m.append(nn.PReLU(n_feats))
else:
raise NotImplementedError
super(Upsampler, self).__init__(*m)
_NORM_BONE = False
def constant_init(module, val, bias=0):
if hasattr(module, 'weight') and module.weight is not None:
nn.init.constant_(module.weight, val)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
def kaiming_init(module,
a=0,
mode='fan_out',
nonlinearity='relu',
bias=0,
distribution='normal'):
assert distribution in ['uniform', 'normal']
if distribution == 'uniform':
nn.init.kaiming_uniform_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
else:
nn.init.kaiming_normal_(
module.weight, a=a, mode=mode, nonlinearity=nonlinearity)
if hasattr(module, 'bias') and module.bias is not None:
nn.init.constant_(module.bias, bias)
# depthwise-separable convolution (DSC)
class DSC(nn.Module):
def __init__(self, nin: int) -> None:
super(DSC, self).__init__()
self.conv_dws = nn.Conv2d(
nin, nin, kernel_size=1, stride=1, padding=0, groups=nin
)
self.bn_dws = nn.BatchNorm2d(nin, momentum=0.9)
self.relu_dws = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.conv_point = nn.Conv2d(
nin, 1, kernel_size=1, stride=1, padding=0, groups=1
)
self.bn_point = nn.BatchNorm2d(1, momentum=0.9)
self.relu_point = nn.ReLU(inplace=False)
self.softmax = nn.Softmax(dim=2)
def forward(self, x: torch.Tensor) -> torch.Tensor:
out = self.conv_dws(x)
out = self.bn_dws(out)
out = self.relu_dws(out)
out = self.maxpool(out)
out = self.conv_point(out)
out = self.bn_point(out)
out = self.relu_point(out)
m, n, p, q = out.shape
out = self.softmax(out.view(m, n, -1))
out = out.view(m, n, p, q)
out = out.expand(x.shape[0], x.shape[1], x.shape[2], x.shape[3])
out = torch.mul(out, x)
out = out + x
return out
# Efficient Feature Fusion(EFF)
class EFF(nn.Module):
def __init__(self, nin: int, nout: int, num_splits: int) -> None:
super(EFF, self).__init__()
assert nin % num_splits == 0
self.nin = nin
self.nout = nout
self.num_splits = num_splits
self.subspaces = nn.ModuleList(
[DSC(int(self.nin / self.num_splits)) for i in range(self.num_splits)]
)
def forward(self, x: torch.Tensor) -> torch.Tensor:
sub_feat = torch.chunk(x, self.num_splits, dim=1)
out = []
for idx, l in enumerate(self.subspaces):
out.append(self.subspaces[idx](sub_feat[idx]))
out = torch.cat(out, dim=1)
return out
# spatial-spectral domain attention learning(SDL)
class SDL_attention(nn.Module):
def __init__(self, inplanes, planes, kernel_size=1, stride=1):
super(SDL_attention, self).__init__()
self.inplanes = inplanes
self.inter_planes = planes // 2
self.planes = planes
self.kernel_size = kernel_size
self.stride = stride
self.padding = (kernel_size-1)//2
self.conv_q_right = nn.Conv2d(self.inplanes, 1, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_v_right = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False)
self.conv_up = nn.Conv2d(self.inter_planes, self.planes, kernel_size=1, stride=1, padding=0, bias=False)
self.softmax_right = nn.Softmax(dim=2)
self.sigmoid = nn.Sigmoid()
self.conv_q_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #g
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.conv_v_left = nn.Conv2d(self.inplanes, self.inter_planes, kernel_size=1, stride=stride, padding=0, bias=False) #theta
self.softmax_left = nn.Softmax(dim=2)
self.reset_parameters()
def reset_parameters(self):
kaiming_init(self.conv_q_right, mode='fan_in')
kaiming_init(self.conv_v_right, mode='fan_in')
kaiming_init(self.conv_q_left, mode='fan_in')
kaiming_init(self.conv_v_left, mode='fan_in')
self.conv_q_right.inited = True
self.conv_v_right.inited = True
self.conv_q_left.inited = True
self.conv_v_left.inited = True
# HR spatial attention
def spatial_attention(self, x):
input_x = self.conv_v_right(x)
batch, channel, height, width = input_x.size()
input_x = input_x.view(batch, channel, height * width)
context_mask = self.conv_q_right(x)
context_mask = context_mask.view(batch, 1, height * width)
context_mask = self.softmax_right(context_mask)
context = torch.matmul(input_x, context_mask.transpose(1,2))
context = context.unsqueeze(-1)
context = self.conv_up(context)
mask_ch = self.sigmoid(context)
out = x * mask_ch
return out
# HR spectral attention
def spectral_attention(self, x):
g_x = self.conv_q_left(x)
batch, channel, height, width = g_x.size()
avg_x = self.avg_pool(g_x)
batch, channel, avg_x_h, avg_x_w = avg_x.size()
avg_x = avg_x.view(batch, channel, avg_x_h * avg_x_w).permute(0, 2, 1)
theta_x = self.conv_v_left(x).view(batch, self.inter_planes, height * width)
context = torch.matmul(avg_x, theta_x)
context = self.softmax_left(context)
context = context.view(batch, 1, height, width)
mask_sp = self.sigmoid(context)
out = x * mask_sp
return out
def forward(self, x):
context_spectral = self.spectral_attention(x)
context_spatial = self.spatial_attention(x)
out = context_spatial + context_spectral
return out
class HDNet(nn.Module):
def __init__(self, in_ch=28, out_ch=28, conv=default_conv):
super(HDNet, self).__init__()
n_resblocks = 16
n_feats = 64
kernel_size = 3
act = nn.ReLU(True)
# define head module
m_head = [conv(in_ch, n_feats, kernel_size)]
# define body module
m_body = [
ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
) for _ in range(n_resblocks)
]
m_body.append(SDL_attention(inplanes = n_feats, planes = n_feats))
m_body.append(EFF(nin=n_feats, nout=n_feats, num_splits=4))
for i in range(1, n_resblocks):
m_body.append(ResBlock(
conv, n_feats, kernel_size, act=act, res_scale= 1
))
m_body.append(conv(n_feats, n_feats, kernel_size))
m_tail = [conv(n_feats, out_ch, kernel_size)]
self.head = nn.Sequential(*m_head)
self.body = nn.Sequential(*m_body)
self.tail = nn.Sequential(*m_tail)
def forward(self, x, input_mask=None):
x = self.head(x)
res = self.body(x)
res += x
x = self.tail(res)
return x
# frequency domain learning(FDL)
class FDL(nn.Module):
def __init__(self, loss_weight=1.0, alpha=1.0, patch_factor=1, ave_spectrum=False, log_matrix=False, batch_matrix=False):
super(FDL, self).__init__()
self.loss_weight = loss_weight
self.alpha = alpha
self.patch_factor = patch_factor
self.ave_spectrum = ave_spectrum
self.log_matrix = log_matrix
self.batch_matrix = batch_matrix
def tensor2freq(self, x):
patch_factor = self.patch_factor
_, _, h, w = x.shape
assert h % patch_factor == 0 and w % patch_factor == 0, (
'Patch factor should be divisible by image height and width')
patch_list = []
patch_h = h // patch_factor
patch_w = w // patch_factor
for i in range(patch_factor):
for j in range(patch_factor):
patch_list.append(x[:, :, i * patch_h:(i + 1) * patch_h, j * patch_w:(j + 1) * patch_w])
y = torch.stack(patch_list, 1)
return torch.rfft(y, 2, onesided=False, normalized=True)
def loss_formulation(self, recon_freq, real_freq, matrix=None):
if matrix is not None:
weight_matrix = matrix.detach()
else:
matrix_tmp = (recon_freq - real_freq) ** 2
matrix_tmp = torch.sqrt(matrix_tmp[..., 0] + matrix_tmp[..., 1]) ** self.alpha
if self.log_matrix:
matrix_tmp = torch.log(matrix_tmp + 1.0)
if self.batch_matrix:
matrix_tmp = matrix_tmp / matrix_tmp.max()
else:
matrix_tmp = matrix_tmp / matrix_tmp.max(-1).values.max(-1).values[:, :, :, None, None]
matrix_tmp[torch.isnan(matrix_tmp)] = 0.0
matrix_tmp = torch.clamp(matrix_tmp, min=0.0, max=1.0)
weight_matrix = matrix_tmp.clone().detach()
assert weight_matrix.min().item() >= 0 and weight_matrix.max().item() <= 1, (
'The values of spectrum weight matrix should be in the range [0, 1], '
'but got Min: %.10f Max: %.10f' % (weight_matrix.min().item(), weight_matrix.max().item()))
tmp = (recon_freq - real_freq) ** 2
freq_distance = tmp[..., 0] + tmp[..., 1]
loss = weight_matrix * freq_distance
return torch.mean(loss)
def forward(self, pred, target, matrix=None, **kwargs):
pred_freq = self.tensor2freq(pred)
target_freq = self.tensor2freq(target)
if self.ave_spectrum:
pred_freq = torch.mean(pred_freq, 0, keepdim=True)
target_freq = torch.mean(target_freq, 0, keepdim=True)
return self.loss_formulation(pred_freq, target_freq, matrix) * self.loss_weight
| 12,665 | 33.048387 | 132 | py |
MST | MST-main/real/test_code/test.py | import torch
import os
import argparse
from utils import dataparallel
import scipy.io as sio
import numpy as np
from torch.autograd import Variable
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
parser = argparse.ArgumentParser(description="PyTorch HSIFUSION")
parser.add_argument('--data_path', default='./Data/Testing_data/', type=str,help='path of data')
parser.add_argument('--mask_path', default='./Data/mask.mat', type=str,help='path of mask')
parser.add_argument("--size", default=660, type=int, help='the size of trainset image')
parser.add_argument("--trainset_num", default=2000, type=int, help='total number of trainset')
parser.add_argument("--testset_num", default=5, type=int, help='total number of testset')
parser.add_argument("--seed", default=1, type=int, help='Random_seed')
parser.add_argument("--batch_size", default=1, type=int, help='batch_size')
parser.add_argument("--isTrain", default=False, type=bool, help='train or test')
parser.add_argument("--pretrained_model_path", default=None, type=str)
opt = parser.parse_args()
print(opt)
def prepare_data(path, file_num):
HR_HSI = np.zeros((((660,714,file_num))))
for idx in range(file_num):
#### read HrHSI
path1 = os.path.join(path) + 'scene' + str(idx+1) + '.mat'
data = sio.loadmat(path1)
HR_HSI[:,:,idx] = data['meas_real']
HR_HSI[HR_HSI < 0] = 0.0
HR_HSI[HR_HSI > 1] = 1.0
return HR_HSI
def load_mask(path,size=660):
## load mask
data = sio.loadmat(path)
mask = data['mask']
mask_3d = np.tile(mask[:, :, np.newaxis], (1, 1, 28))
mask_3d_shift = np.zeros((size, size + (28 - 1) * 2, 28))
mask_3d_shift[:, 0:size, :] = mask_3d
for t in range(28):
mask_3d_shift[:, :, t] = np.roll(mask_3d_shift[:, :, t], 2 * t, axis=1)
mask_3d_shift_s = np.sum(mask_3d_shift ** 2, axis=2, keepdims=False)
mask_3d_shift_s[mask_3d_shift_s == 0] = 1
mask_3d_shift = torch.FloatTensor(mask_3d_shift.copy()).permute(2, 0, 1)
mask_3d_shift_s = torch.FloatTensor(mask_3d_shift_s.copy())
return mask_3d_shift.unsqueeze(0), mask_3d_shift_s.unsqueeze(0)
HR_HSI = prepare_data(opt.data_path, 5)
mask_3d_shift, mask_3d_shift_s = load_mask('./Data/mask.mat')
pretrained_model_path = "/data/lj/exp/hsi/nips2022/dgsmp_real_exp/exp8/hdnet_p384_b1_cosine/2022_05_13_23_05_15/model_150.pth"
model = torch.load(pretrained_model_path)
model = model.eval()
model = dataparallel(model, 1)
psnr_total = 0
k = 0
for j in range(5):
with torch.no_grad():
meas = HR_HSI[:,:,j]
meas = meas / meas.max() * 0.8
meas = torch.FloatTensor(meas)
# meas = torch.FloatTensor(meas).unsqueeze(2).permute(2, 0, 1)
input = meas.unsqueeze(0)
input = Variable(input)
input = input.cuda()
mask_3d_shift = mask_3d_shift.cuda()
mask_3d_shift_s = mask_3d_shift_s.cuda()
out = model(input, mask_3d_shift, mask_3d_shift_s)
result = out
result = result.clamp(min=0., max=1.)
k = k + 1
if not os.path.exists(save_path): # Create the model directory if it doesn't exist
os.makedirs(save_path)
res = result.cpu().permute(2,3,1,0).squeeze(3).numpy()
save_file = save_path + f'{j}.mat'
sio.savemat(save_file, {'res':res})
| 3,319 | 40.5 | 126 | py |
MST | MST-main/real/test_code/utils.py | import numpy as np
import scipy.io as sio
import os
import glob
import re
import torch
import torch.nn as nn
import math
import random
def _as_floats(im1, im2):
float_type = np.result_type(im1.dtype, im2.dtype, np.float32)
im1 = np.asarray(im1, dtype=float_type)
im2 = np.asarray(im2, dtype=float_type)
return im1, im2
def compare_mse(im1, im2):
im1, im2 = _as_floats(im1, im2)
return np.mean(np.square(im1 - im2), dtype=np.float64)
def compare_psnr(im_true, im_test, data_range=None):
im_true, im_test = _as_floats(im_true, im_test)
err = compare_mse(im_true, im_test)
return 10 * np.log10((data_range ** 2) / err)
def psnr(img1, img2):
mse = np.mean((img1/255. - img2/255.) ** 2)
if mse < 1.0e-10:
return 100
PIXEL_MAX = 1
return 20 * math.log10(PIXEL_MAX / math.sqrt(mse))
def PSNR_GPU(im_true, im_fake):
im_true *= 255
im_fake *= 255
im_true = im_true.round()
im_fake = im_fake.round()
data_range = 255
esp = 1e-12
C = im_true.size()[0]
H = im_true.size()[1]
W = im_true.size()[2]
Itrue = im_true.clone()
Ifake = im_fake.clone()
mse = nn.MSELoss(reduce=False)
err = mse(Itrue, Ifake).sum() / (C*H*W)
psnr = 10. * np.log((data_range**2)/(err.data + esp)) / np.log(10.)
return psnr
def PSNR_Nssr(im_true, im_fake):
mse = ((im_true - im_fake)**2).mean()
psnr = 10. * np.log10(1/mse)
return psnr
def dataparallel(model, ngpus, gpu0=0):
if ngpus==0:
assert False, "only support gpu mode"
gpu_list = list(range(gpu0, gpu0+ngpus))
assert torch.cuda.device_count() >= gpu0 + ngpus
if ngpus > 1:
if not isinstance(model, torch.nn.DataParallel):
model = torch.nn.DataParallel(model, gpu_list).cuda()
else:
model = model.cuda()
elif ngpus == 1:
model = model.cuda()
return model
def findLastCheckpoint(save_dir):
file_list = glob.glob(os.path.join(save_dir, 'model_*.pth'))
if file_list:
epochs_exist = []
for file_ in file_list:
result = re.findall(".*model_(.*).pth.*", file_)
epochs_exist.append(int(result[0]))
initial_epoch = max(epochs_exist)
else:
initial_epoch = 0
return initial_epoch
# load HSIs
def prepare_data(path, file_num):
HR_HSI = np.zeros((((512,512,28,file_num))))
for idx in range(file_num):
# read HrHSI
path1 = os.path.join(path) + 'scene%02d.mat' % (idx+1)
# path1 = os.path.join(path) + HR_code + '.mat'
data = sio.loadmat(path1)
HR_HSI[:,:,:,idx] = data['data_slice'] / 65535.0
HR_HSI[HR_HSI < 0.] = 0.
HR_HSI[HR_HSI > 1.] = 1.
return HR_HSI
def loadpath(pathlistfile):
fp = open(pathlistfile)
pathlist = fp.read().splitlines()
fp.close()
random.shuffle(pathlist)
return pathlist
def time2file_name(time):
year = time[0:4]
month = time[5:7]
day = time[8:10]
hour = time[11:13]
minute = time[14:16]
second = time[17:19]
time_filename = year + '_' + month + '_' + day + '_' + hour + '_' + minute + '_' + second
return time_filename
# def prepare_data_cave(path, file_list, file_num):
# HR_HSI = np.zeros((((512,512,28,file_num))))
# for idx in range(file_num):
# #### read HrHSI
# HR_code = file_list[idx]
# path1 = os.path.join(path) + HR_code + '.mat'
# data = sio.loadmat(path1)
# HR_HSI[:,:,:,idx] = data['data_slice'] / 65535.0
# HR_HSI[HR_HSI < 0] = 0
# HR_HSI[HR_HSI > 1] = 1
# return HR_HSI
#
# def prepare_data_KASIT(path, file_list, file_num):
# HR_HSI = np.zeros((((2704,3376,28,file_num))))
# for idx in range(file_num):
# #### read HrHSI
# HR_code = file_list[idx]
# path1 = os.path.join(path) + HR_code + '.mat'
# data = sio.loadmat(path1)
# HR_HSI[:,:,:,idx] = data['HSI']
# HR_HSI[HR_HSI < 0] = 0
# HR_HSI[HR_HSI > 1] = 1
# return HR_HSI
def prepare_data_cave(path, file_num):
HR_HSI = np.zeros((((512,512,28,file_num))))
file_list = os.listdir(path)
# for idx in range(1):
for idx in range(file_num):
print(f'loading CAVE {idx}')
#### read HrHSI
HR_code = file_list[idx]
path1 = os.path.join(path) + HR_code
data = sio.loadmat(path1)
HR_HSI[:,:,:,idx] = data['data_slice'] / 65535.0
HR_HSI[HR_HSI < 0] = 0
HR_HSI[HR_HSI > 1] = 1
return HR_HSI
def prepare_data_KAIST(path, file_num):
HR_HSI = np.zeros((((2704,3376,28,file_num))))
file_list = os.listdir(path)
# for idx in range(1):
for idx in range(file_num):
print(f'loading KAIST {idx}')
#### read HrHSI
HR_code = file_list[idx]
path1 = os.path.join(path) + HR_code
data = sio.loadmat(path1)
HR_HSI[:,:,:,idx] = data['HSI']
HR_HSI[HR_HSI < 0] = 0
HR_HSI[HR_HSI > 1] = 1
return HR_HSI
def init_mask(mask, Phi, Phi_s, mask_type):
if mask_type == 'Phi':
input_mask = Phi
elif mask_type == 'Phi_PhiPhiT':
input_mask = (Phi, Phi_s)
elif mask_type == 'Mask':
input_mask = mask
elif mask_type == None:
input_mask = None
return input_mask | 5,293 | 27.771739 | 93 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.