code stringlengths 17 6.64M |
|---|
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train, num_epochs):
MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5'))
history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train, num_epochs):
deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5'))
history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def testModel(model, X_tst, Y_tst):
from sklearn.metrics import classification_report, accuracy_score
target_names = ['Neural Networks', 'Case Based', 'Reinforcement Learning', 'Probabilistic Methods', 'Genetic Algorithms', 'Rule Learning', 'Theory']
y_pred = model.predict(X_tst, batch_size=16, verbose=0)
finals_pred = []
finals_test = []
for p in y_pred:
m = 0
ind = 0
final = 0
for i in p:
if (i > m):
m = i
final = ind
ind += 1
finals_pred.append(final)
for i in Y_tst:
ind = 0
for j in i:
if (j == 1):
finals_test.append(ind)
ind += 1
c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4)
reports.append(c)
print(c)
|
def RunAllTests(percentTraining, num_times, num_epochs):
for i in range(num_times):
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges')
(X, Y) = getTrainingData()
(X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining))
hyperedgesTrain(X_train, Y_train, num_epochs)
testModel(deephyperedges_transductive_model, X_test, Y_test)
|
def getFeaturesTrainingData():
i = 0
lists = []
labels = []
for vertex in G.nodes:
vertex_embedding_list = []
lists.append({'f': vertex_features[vertex].tolist()})
labels.append(vertex_labels[vertex])
X_unshuffled = []
for hlist in lists:
x = np.zeros((feature_dimension,))
x[:feature_dimension] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_Features = np.asarray(X_arr)
Y_Features = np.asarray(Y_arr)
return (X_Features, Y_Features)
|
def getTrainingData():
i = 0
lists = []
labels = []
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)) + feature_dimension),))
i = 0
x[:hyperedge_embedding_dimension] = hlist['h']
x[(hyperedge_embedding_dimension + (vertex_embedding_dimension * max_groupsize)):] = hlist['f']
for embedding in np_vertex_embeddings:
x[(hyperedge_embedding_dimension + (i * embedding.shape[0])):(hyperedge_embedding_dimension + ((i + 1) * embedding.shape[0]))] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def getMLPTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
lists.append({'h': hyperedge_embeddings[hyperedge_ids.index(h)].tolist(), 'f': vertex_features[h].tolist()})
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
x = np.zeros(((hyperedge_embedding_dimension + feature_dimension),))
x[:hyperedge_embedding_dimension] = hlist['h']
x[hyperedge_embedding_dimension:] = hlist['f']
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X_MLP = np.asarray(X_arr)
Y_MLP = np.asarray(Y_arr)
return (X_MLP, Y_MLP)
|
def getDSTrainingData():
i = 0
lists = []
labels = []
maxi = 0
for h in hyperedges:
vertex_embedding_list = []
hyperedge = hyperedges[h]
for vertex in hyperedge['members']:
i += 1
if ((i % 100000) == 0):
print(i)
try:
vertex_embedding_list.append(vertex_embeddings[vertex_ids.index(vertex)].tolist())
except:
print('Missed one: ', vertex)
lists.append({'v': vertex_embedding_list, 'f': vertex_features[h].tolist()})
lists.append
label = np.zeros((num_categories,))
label[(int(hyperedge['category']) - 1)] = 1
labels.append(label)
X_unshuffled = []
for hlist in lists:
np_vertex_embeddings = np.asarray(hlist['v'])
x = np.zeros((((vertex_embedding_dimension * max_groupsize) + feature_dimension),))
x[(vertex_embedding_dimension * max_groupsize):] = hlist['f']
i = 0
for embedding in np_vertex_embeddings:
x[(i * embedding.shape[0]):((i + 1) * embedding.shape[0])] = embedding
i += 1
X_unshuffled.append(x)
labels = np.asarray(labels)
(X_arr, Y_arr) = shuffle(X_unshuffled, labels)
X = np.asarray(X_arr)
Y = np.asarray(Y_arr)
return (X, Y)
|
def hyperedgesTrain(X_train, Y_train):
deephyperedges_transductive_model.load_weights((('models/' + dataset_name) + '/deephyperedges_transductive_model.h5'))
history = deephyperedges_transductive_model.fit(X_train, Y_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train):
MLP_transductive_model.load_weights((('models/' + dataset_name) + '/MLP_transductive_model.h5'))
history = MLP_transductive_model.fit(X_MLP_transductive_train, Y_MLP_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train):
deepsets_transductive_model.load_weights((('models/' + dataset_name) + '/deepsets_transductive_model.h5'))
history = deepsets_transductive_model.fit(X_deepset_transductive_train, Y_deepset_transductive_train, epochs=num_epochs, batch_size=batch_size, shuffle=True, validation_split=0, verbose=0)
|
def testModel(model, X_tst, Y_tst):
from sklearn.metrics import classification_report, accuracy_score
target_names = target_names = ['Type-1 Diabetes', 'Type-2 Diabetes', 'Type-3 Diabetes']
y_pred = model.predict(X_tst, batch_size=16, verbose=0)
finals_pred = []
finals_test = []
for p in y_pred:
m = 0
ind = 0
final = 0
for i in p:
if (i > m):
m = i
final = ind
ind += 1
finals_pred.append(final)
for i in Y_tst:
ind = 0
for j in i:
if (j == 1):
finals_test.append(ind)
ind += 1
c = classification_report(finals_test, finals_pred, target_names=target_names, digits=4)
print(c)
reports.append(c)
print(accuracy_score(finals_test, finals_pred))
|
def RunAllTests(percentTraining, num_times=10):
for i in range(num_times):
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep hyperedges')
(X, Y) = getTrainingData()
(X_train, X_test, Y_train, Y_test) = train_test_split(X, Y, train_size=percentTraining, test_size=(1 - percentTraining))
hyperedgesTrain(X_train, Y_train)
testModel(deephyperedges_transductive_model, X_test, Y_test)
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: MLP')
(X_MLP, Y_MLP) = getMLPTrainingData()
(X_MLP_transductive_train, X_MLP_transductive_test, Y_MLP_transductive_train, Y_MLP_transductive_test) = train_test_split(X_MLP, Y_MLP, train_size=percentTraining, test_size=(1 - percentTraining))
MLPTrain(X_MLP_transductive_train, Y_MLP_transductive_train)
testModel(MLP_transductive_model, X_MLP_transductive_test, Y_MLP_transductive_test)
print('percent: ', percentTraining, ', iteration: ', (i + 1), ', model: deep sets')
(X_deepset, Y_deepset) = getDSTrainingData()
(X_deepset_transductive_train, X_deepset_transductive_test, Y_deepset_transductive_train, Y_deepset_transductive_test) = train_test_split(X_deepset, Y_deepset, train_size=percentTraining, test_size=(1 - percentTraining))
DeepSetsTrain(X_deepset_transductive_train, Y_deepset_transductive_train)
testModel(deepsets_transductive_model, X_deepset_transductive_test, Y_deepset_transductive_test)
|
def smooth(scalars, weight):
last = scalars[0]
smoothed = list()
for point in scalars:
smoothed_val = ((last * weight) + ((1 - weight) * point))
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
|
def plot(deephyperedges_directory, MLP_directory, deepsets_directory, metric, dataset):
dhe_metrics = pd.read_csv(deephyperedges_directory)
x = []
y = []
for (index, row) in dhe_metrics.iterrows():
x.append(float(row['Step']))
y.append(float(row['Value']))
mlp_metrics = pd.read_csv(MLP_directory)
x_mlp = []
y_mlp = []
for (index, row) in mlp_metrics.iterrows():
x_mlp.append(float(row['Step']))
y_mlp.append(float(row['Value']))
ds_metrics = pd.read_csv(deepsets_directory)
x_ds = []
y_ds = []
for (index, row) in ds_metrics.iterrows():
x_ds.append(float(row['Step']))
y_ds.append(float(row['Value']))
sns.set()
ds_normal = '(0.0, 0.0, 0.7, 0.2)'
ds_smoothed = '(0.0, 0.0, 0.7, 1)'
dh_normal = '(0.0, 0.7, 0.0, 0.2)'
dh_smoothed = '(0.0, 0.7, 0.0, 1)'
mlp_normal = '(0.7, 0.2, 0.1, 0.2)'
mlp_smoothed = '(0.7, 0.2, 0.1, 1)'
plt.gca().set_prop_cycle(color=[mlp_normal, ds_normal, dh_normal, mlp_smoothed, ds_smoothed, dh_smoothed])
plt.plot(x_mlp, y_mlp)
plt.plot(x_ds, y_ds)
plt.plot(x, y)
plt.plot(x_mlp, smooth(y_mlp, 0.8))
plt.plot(x_ds, smooth(y_ds, 0.8))
plt.plot(x, smooth(y, 0.8))
plt.legend(['_nolegend_', '_nolegend_', '_nolegend_', 'MLP + TAS Walks', 'Deep Sets + SAT Walks', 'Deep Hyperedges'], loc='bottom right')
plt.savefig((((('images/paper/' + dataset) + '/') + metric) + '.png'), dpi=300)
plt.show()
|
def plotAll(dataset):
metric = 'run-.-tag-categorical_accuracy.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_accuracy', dataset)
metric = 'run-.-tag-loss.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'train_loss', dataset)
metric = 'run-.-tag-val_categorical_accuracy.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_accuracy', dataset)
metric = 'run-.-tag-val_loss.csv'
deephyperedges_directory = ((('images/paper/' + dataset) + '/deephyperedges/') + metric)
MLP_directory = ((('images/paper/' + dataset) + '/MLP/') + metric)
deepsets_directory = ((('images/paper/' + dataset) + '/deepsets/') + metric)
plot(deephyperedges_directory, MLP_directory, deepsets_directory, 'validation_loss', dataset)
|
def skip_submodules(app, what, name, obj, skip, options):
return (name.endswith('__init__') or name.startswith('diart.console') or name.startswith('diart.argdoc'))
|
def setup(sphinx):
sphinx.connect('autoapi-skip-member', skip_submodules)
|
class AudioLoader():
def __init__(self, sample_rate: int, mono: bool=True):
self.sample_rate = sample_rate
self.mono = mono
def load(self, filepath: FilePath) -> torch.Tensor:
'Load an audio file into a torch.Tensor.\n\n Parameters\n ----------\n filepath : FilePath\n Path to an audio file\n\n Returns\n -------\n waveform : torch.Tensor, shape (channels, samples)\n '
(waveform, sample_rate) = torchaudio.load(filepath)
if (self.mono and (waveform.shape[0] > 1)):
waveform = waveform.mean(dim=0, keepdim=True)
if (self.sample_rate != sample_rate):
waveform = resample(waveform, sample_rate, self.sample_rate)
return waveform
@staticmethod
def get_duration(filepath: FilePath) -> float:
'Get audio file duration in seconds.\n\n Parameters\n ----------\n filepath : FilePath\n Path to an audio file.\n\n Returns\n -------\n duration : float\n Duration in seconds.\n '
info = torchaudio.info(filepath)
return (info.num_frames / info.sample_rate)
|
class AggregationStrategy(ABC):
'Abstract class representing a strategy to aggregate overlapping buffers\n\n Parameters\n ----------\n cropping_mode: ("strict", "loose", "center"), optional\n Defines the mode to crop buffer chunks as in pyannote.core.\n See https://pyannote.github.io/pyannote-core/reference.html#pyannote.core.SlidingWindowFeature.crop\n Defaults to "loose".\n '
def __init__(self, cropping_mode: Literal[('strict', 'loose', 'center')]='loose'):
assert (cropping_mode in ['strict', 'loose', 'center']), f'Invalid cropping mode `{cropping_mode}`'
self.cropping_mode = cropping_mode
@staticmethod
def build(name: Literal[('mean', 'hamming', 'first')], cropping_mode: Literal[('strict', 'loose', 'center')]='loose') -> 'AggregationStrategy':
'Build an AggregationStrategy instance based on its name'
assert (name in ('mean', 'hamming', 'first'))
if (name == 'mean'):
return AverageStrategy(cropping_mode)
elif (name == 'hamming'):
return HammingWeightedAverageStrategy(cropping_mode)
else:
return FirstOnlyStrategy(cropping_mode)
def __call__(self, buffers: List[SlidingWindowFeature], focus: Segment) -> SlidingWindowFeature:
'Aggregate chunks over a specific region.\n\n Parameters\n ----------\n buffers: list of SlidingWindowFeature, shapes (frames, speakers)\n Buffers to aggregate\n focus: Segment\n Region to aggregate that is shared among the buffers\n\n Returns\n -------\n aggregation: SlidingWindowFeature, shape (cropped_frames, speakers)\n Aggregated values over the focus region\n '
aggregation = self.aggregate(buffers, focus)
resolution = (focus.duration / aggregation.shape[0])
resolution = SlidingWindow(start=focus.start, duration=resolution, step=resolution)
return SlidingWindowFeature(aggregation, resolution)
@abstractmethod
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
pass
|
class HammingWeightedAverageStrategy(AggregationStrategy):
'Compute the average weighted by the corresponding Hamming-window aligned to each buffer'
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
(num_frames, num_speakers) = buffers[0].data.shape
(hamming, intersection) = ([], [])
for buffer in buffers:
b = buffer.crop(focus, mode=self.cropping_mode, fixed=focus.duration)
h = np.expand_dims(np.hamming(num_frames), axis=(- 1))
h = SlidingWindowFeature(h, buffer.sliding_window)
h = h.crop(focus, mode=self.cropping_mode, fixed=focus.duration)
hamming.append(h.data)
intersection.append(b.data)
(hamming, intersection) = (np.stack(hamming), np.stack(intersection))
return (np.sum((hamming * intersection), axis=0) / np.sum(hamming, axis=0))
|
class AverageStrategy(AggregationStrategy):
'Compute a simple average over the focus region'
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
intersection = np.stack([buffer.crop(focus, mode=self.cropping_mode, fixed=focus.duration) for buffer in buffers])
return np.mean(intersection, axis=0)
|
class FirstOnlyStrategy(AggregationStrategy):
'Instead of aggregating, keep the first focus region in the buffer list'
def aggregate(self, buffers: List[SlidingWindowFeature], focus: Segment) -> np.ndarray:
return buffers[0].crop(focus, mode=self.cropping_mode, fixed=focus.duration)
|
class DelayedAggregation():
'Aggregate aligned overlapping windows of the same duration\n across sliding buffers with a specific step and latency.\n\n Parameters\n ----------\n step: float\n Shift between two consecutive buffers, in seconds.\n latency: float, optional\n Desired latency, in seconds. Defaults to step.\n The higher the latency, the more overlapping windows to aggregate.\n strategy: ("mean", "hamming", "first"), optional\n Specifies how to aggregate overlapping windows. Defaults to "hamming".\n "mean": simple average\n "hamming": average weighted by the Hamming window values (aligned to the buffer)\n "first": no aggregation, pick the first overlapping window\n cropping_mode: ("strict", "loose", "center"), optional\n Defines the mode to crop buffer chunks as in pyannote.core.\n See https://pyannote.github.io/pyannote-core/reference.html#pyannote.core.SlidingWindowFeature.crop\n Defaults to "loose".\n\n Example\n --------\n >>> duration = 5\n >>> frames = 500\n >>> step = 0.5\n >>> speakers = 2\n >>> start_time = 10\n >>> resolution = duration / frames\n >>> dagg = DelayedAggregation(step=step, latency=2, strategy="mean")\n >>> buffers = [\n >>> SlidingWindowFeature(\n >>> np.random.rand(frames, speakers),\n >>> SlidingWindow(start=(i + start_time) * step, duration=resolution, step=resolution)\n >>> )\n >>> for i in range(dagg.num_overlapping_windows)\n >>> ]\n >>> dagg.num_overlapping_windows\n ... 4\n >>> dagg(buffers).data.shape\n ... (51, 2) # Rounding errors are possible when cropping the buffers\n '
def __init__(self, step: float, latency: Optional[float]=None, strategy: Literal[('mean', 'hamming', 'first')]='hamming', cropping_mode: Literal[('strict', 'loose', 'center')]='loose'):
self.step = step
self.latency = latency
self.strategy = strategy
assert (cropping_mode in ['strict', 'loose', 'center']), f'Invalid cropping mode `{cropping_mode}`'
self.cropping_mode = cropping_mode
if (self.latency is None):
self.latency = self.step
assert (self.step <= self.latency), 'Invalid latency requested'
self.num_overlapping_windows = int(round((self.latency / self.step)))
self.aggregate = AggregationStrategy.build(self.strategy, self.cropping_mode)
def _prepend(self, output_window: SlidingWindowFeature, output_region: Segment, buffers: List[SlidingWindowFeature]):
last_buffer = buffers[(- 1)].extent
if ((len(buffers) == 1) and (last_buffer.start == 0)):
num_frames = output_window.data.shape[0]
first_region = Segment(0, output_region.end)
first_output = buffers[0].crop(first_region, mode=self.cropping_mode, fixed=first_region.duration)
first_output[(- num_frames):] = output_window.data
resolution = (output_region.end / first_output.shape[0])
output_window = SlidingWindowFeature(first_output, SlidingWindow(start=0, duration=resolution, step=resolution))
return output_window
def __call__(self, buffers: List[SlidingWindowFeature]) -> SlidingWindowFeature:
start = (buffers[(- 1)].extent.end - self.latency)
region = Segment(start, (start + self.step))
return self._prepend(self.aggregate(buffers, region), region, buffers)
|
@dataclass
class HyperParameter():
'Represents a pipeline hyper-parameter that can be tuned by diart'
name: Text
'Name of the hyper-parameter (e.g. tau_active)'
low: float
'Lowest value that this parameter can take'
high: float
'Highest value that this parameter can take'
@staticmethod
def from_name(name: Text) -> 'HyperParameter':
'Create a HyperParameter object given its name.\n\n Parameters\n ----------\n name: str\n Name of the hyper-parameter\n\n Returns\n -------\n HyperParameter\n '
if (name == 'tau_active'):
return TauActive
if (name == 'rho_update'):
return RhoUpdate
if (name == 'delta_new'):
return DeltaNew
raise ValueError(f"Hyper-parameter '{name}' not recognized")
|
class PipelineConfig(ABC):
'Configuration containing the required\n parameters to build and run a pipeline'
@property
@abstractmethod
def duration(self) -> float:
'The duration of an input audio chunk (in seconds)'
pass
@property
@abstractmethod
def step(self) -> float:
'The step between two consecutive input audio chunks (in seconds)'
pass
@property
@abstractmethod
def latency(self) -> float:
'The algorithmic latency of the pipeline (in seconds).\n At time `t` of the audio stream, the pipeline will\n output predictions for time `t - latency`.\n '
pass
@property
@abstractmethod
def sample_rate(self) -> int:
'The sample rate of the input audio stream'
pass
def get_file_padding(self, filepath: FilePath) -> Tuple[(float, float)]:
file_duration = AudioLoader(self.sample_rate, mono=True).get_duration(filepath)
right = utils.get_padding_right(self.latency, self.step)
left = utils.get_padding_left((file_duration + right), self.duration)
return (left, right)
|
class Pipeline(ABC):
'Represents a streaming audio pipeline'
@staticmethod
@abstractmethod
def get_config_class() -> type:
pass
@staticmethod
@abstractmethod
def suggest_metric() -> BaseMetric:
pass
@staticmethod
@abstractmethod
def hyper_parameters() -> Sequence[HyperParameter]:
pass
@property
@abstractmethod
def config(self) -> PipelineConfig:
pass
@abstractmethod
def reset(self):
pass
@abstractmethod
def set_timestamp_shift(self, shift: float):
pass
@abstractmethod
def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[Tuple[(Any, SlidingWindowFeature)]]:
'Runs the next steps of the pipeline\n given a list of consecutive audio chunks.\n\n Parameters\n ----------\n waveforms: Sequence[SlidingWindowFeature]\n Consecutive chunk waveforms for the pipeline to ingest\n\n Returns\n -------\n Sequence[Tuple[Any, SlidingWindowFeature]]\n For each input waveform, a tuple containing\n the pipeline output and its respective audio\n '
pass
|
class SpeakerDiarizationConfig(base.PipelineConfig):
def __init__(self, segmentation: (m.SegmentationModel | None)=None, embedding: (m.EmbeddingModel | None)=None, duration: float=5, step: float=0.5, latency: ((float | Literal[('max', 'min')]) | None)=None, tau_active: float=0.6, rho_update: float=0.3, delta_new: float=1, gamma: float=3, beta: float=10, max_speakers: int=20, normalize_embedding_weights: bool=False, device: (torch.device | None)=None, sample_rate: int=16000, **kwargs):
self.segmentation = (segmentation or m.SegmentationModel.from_pyannote('pyannote/segmentation'))
self.embedding = (embedding or m.EmbeddingModel.from_pyannote('pyannote/embedding'))
self._duration = duration
self._sample_rate = sample_rate
self._step = step
self._latency = latency
if ((self._latency is None) or (self._latency == 'min')):
self._latency = self._step
elif (self._latency == 'max'):
self._latency = self._duration
self.tau_active = tau_active
self.rho_update = rho_update
self.delta_new = delta_new
self.gamma = gamma
self.beta = beta
self.max_speakers = max_speakers
self.normalize_embedding_weights = normalize_embedding_weights
self.device = (device or torch.device(('cuda' if torch.cuda.is_available() else 'cpu')))
@property
def duration(self) -> float:
return self._duration
@property
def step(self) -> float:
return self._step
@property
def latency(self) -> float:
return self._latency
@property
def sample_rate(self) -> int:
return self._sample_rate
|
class SpeakerDiarization(base.Pipeline):
def __init__(self, config: (SpeakerDiarizationConfig | None)=None):
self._config = (SpeakerDiarizationConfig() if (config is None) else config)
msg = f'Latency should be in the range [{self._config.step}, {self._config.duration}]'
assert (self._config.step <= self._config.latency <= self._config.duration), msg
self.segmentation = SpeakerSegmentation(self._config.segmentation, self._config.device)
self.embedding = OverlapAwareSpeakerEmbedding(self._config.embedding, self._config.gamma, self._config.beta, norm=1, normalize_weights=self._config.normalize_embedding_weights, device=self._config.device)
self.pred_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='hamming', cropping_mode='loose')
self.audio_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='first', cropping_mode='center')
self.binarize = Binarize(self._config.tau_active)
self.timestamp_shift = 0
self.clustering = None
(self.chunk_buffer, self.pred_buffer) = ([], [])
self.reset()
@staticmethod
def get_config_class() -> type:
return SpeakerDiarizationConfig
@staticmethod
def suggest_metric() -> BaseMetric:
return DiarizationErrorRate(collar=0, skip_overlap=False)
@staticmethod
def hyper_parameters() -> Sequence[base.HyperParameter]:
return [base.TauActive, base.RhoUpdate, base.DeltaNew]
@property
def config(self) -> SpeakerDiarizationConfig:
return self._config
def set_timestamp_shift(self, shift: float):
self.timestamp_shift = shift
def reset(self):
self.set_timestamp_shift(0)
self.clustering = OnlineSpeakerClustering(self.config.tau_active, self.config.rho_update, self.config.delta_new, 'cosine', self.config.max_speakers)
(self.chunk_buffer, self.pred_buffer) = ([], [])
def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[tuple[(Annotation, SlidingWindowFeature)]]:
'Diarize the next audio chunks of an audio stream.\n\n Parameters\n ----------\n waveforms: Sequence[SlidingWindowFeature]\n A sequence of consecutive audio chunks from an audio stream.\n\n Returns\n -------\n Sequence[tuple[Annotation, SlidingWindowFeature]]\n Speaker diarization of each chunk alongside their corresponding audio.\n '
batch_size = len(waveforms)
msg = 'Pipeline expected at least 1 input'
assert (batch_size >= 1), msg
batch = torch.stack([torch.from_numpy(w.data) for w in waveforms])
expected_num_samples = int(np.rint((self.config.duration * self.config.sample_rate)))
msg = f'Expected {expected_num_samples} samples per chunk, but got {batch.shape[1]}'
assert (batch.shape[1] == expected_num_samples), msg
segmentations = self.segmentation(batch)
embeddings = self.embedding(batch, segmentations)
seg_resolution = (waveforms[0].extent.duration / segmentations.shape[1])
outputs = []
for (wav, seg, emb) in zip(waveforms, segmentations, embeddings):
sw = SlidingWindow(start=wav.extent.start, duration=seg_resolution, step=seg_resolution)
seg = SlidingWindowFeature(seg.cpu().numpy(), sw)
permuted_seg = self.clustering(seg, emb)
self.chunk_buffer.append(wav)
self.pred_buffer.append(permuted_seg)
agg_waveform = self.audio_aggregation(self.chunk_buffer)
agg_prediction = self.pred_aggregation(self.pred_buffer)
agg_prediction = self.binarize(agg_prediction)
if (self.timestamp_shift != 0):
shifted_agg_prediction = Annotation(agg_prediction.uri)
for (segment, track, speaker) in agg_prediction.itertracks(yield_label=True):
new_segment = Segment((segment.start + self.timestamp_shift), (segment.end + self.timestamp_shift))
shifted_agg_prediction[(new_segment, track)] = speaker
agg_prediction = shifted_agg_prediction
outputs.append((agg_prediction, agg_waveform))
if (len(self.chunk_buffer) == self.pred_aggregation.num_overlapping_windows):
self.chunk_buffer = self.chunk_buffer[1:]
self.pred_buffer = self.pred_buffer[1:]
return outputs
|
class SpeakerEmbedding():
def __init__(self, model: EmbeddingModel, device: Optional[torch.device]=None):
self.model = model
self.model.eval()
self.device = device
if (self.device is None):
self.device = torch.device('cpu')
self.model.to(self.device)
self.waveform_formatter = TemporalFeatureFormatter()
self.weights_formatter = TemporalFeatureFormatter()
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True, device: Optional[torch.device]=None) -> 'SpeakerEmbedding':
emb_model = EmbeddingModel.from_pretrained(model, use_hf_token)
return SpeakerEmbedding(emb_model, device)
def __call__(self, waveform: TemporalFeatures, weights: Optional[TemporalFeatures]=None) -> torch.Tensor:
'\n Calculate speaker embeddings of input audio.\n If weights are given, calculate many speaker embeddings from the same waveform.\n\n Parameters\n ----------\n waveform: TemporalFeatures, shape (samples, channels) or (batch, samples, channels)\n weights: Optional[TemporalFeatures], shape (frames, speakers) or (batch, frames, speakers)\n Per-speaker and per-frame weights. Defaults to no weights.\n\n Returns\n -------\n embeddings: torch.Tensor\n If weights are provided, the shape is (batch, speakers, embedding_dim),\n otherwise the shape is (batch, embedding_dim).\n If batch size == 1, the batch dimension is omitted.\n '
with torch.no_grad():
inputs = self.waveform_formatter.cast(waveform).to(self.device)
inputs = rearrange(inputs, 'batch sample channel -> batch channel sample')
if (weights is not None):
weights = self.weights_formatter.cast(weights).to(self.device)
(batch_size, _, num_speakers) = weights.shape
inputs = inputs.repeat(1, num_speakers, 1)
weights = rearrange(weights, 'batch frame spk -> (batch spk) frame')
inputs = rearrange(inputs, 'batch spk sample -> (batch spk) 1 sample')
output = rearrange(self.model(inputs, weights), '(batch spk) feat -> batch spk feat', batch=batch_size, spk=num_speakers)
else:
output = self.model(inputs)
return output.squeeze().cpu()
|
class OverlappedSpeechPenalty():
'Applies a penalty on overlapping speech and low-confidence regions to speaker segmentation scores.\n\n .. note::\n For more information, see `"Overlap-Aware Low-Latency Online Speaker Diarization\n based on End-to-End Local Segmentation" <https://github.com/juanmc2005/diart/blob/main/paper.pdf>`_\n (Section 2.2.1 Segmentation-driven speaker embedding). This block implements Equation 2.\n\n Parameters\n ----------\n gamma: float, optional\n Exponent to lower low-confidence predictions.\n Defaults to 3.\n beta: float, optional\n Temperature parameter (actually 1/beta) to lower joint speaker activations.\n Defaults to 10.\n normalize: bool, optional\n Whether to min-max normalize weights to be in the range [0, 1].\n Defaults to False.\n '
def __init__(self, gamma: float=3, beta: float=10, normalize: bool=False):
self.gamma = gamma
self.beta = beta
self.formatter = TemporalFeatureFormatter()
self.normalize = normalize
def __call__(self, segmentation: TemporalFeatures) -> TemporalFeatures:
weights = self.formatter.cast(segmentation)
with torch.inference_mode():
weights = F.overlapped_speech_penalty(weights, self.gamma, self.beta)
if self.normalize:
min_values = weights.min(dim=1, keepdim=True).values
max_values = weights.max(dim=1, keepdim=True).values
weights = ((weights - min_values) / (max_values - min_values))
weights.nan_to_num_(1e-08)
return self.formatter.restore_type(weights)
|
class EmbeddingNormalization():
def __init__(self, norm: Union[(float, torch.Tensor)]=1):
self.norm = norm
if (isinstance(self.norm, torch.Tensor) and (self.norm.ndim == 2)):
self.norm = self.norm.unsqueeze(0)
def __call__(self, embeddings: torch.Tensor) -> torch.Tensor:
with torch.inference_mode():
norm_embs = F.normalize_embeddings(embeddings, self.norm)
return norm_embs
|
class OverlapAwareSpeakerEmbedding():
"\n Extract overlap-aware speaker embeddings given an audio chunk and its segmentation.\n\n Parameters\n ----------\n model: EmbeddingModel\n A pre-trained embedding model.\n gamma: float, optional\n Exponent to lower low-confidence predictions.\n Defaults to 3.\n beta: float, optional\n Softmax's temperature parameter (actually 1/beta) to lower joint speaker activations.\n Defaults to 10.\n norm: float or torch.Tensor of shape (batch, speakers, 1) where batch is optional\n The target norm for the embeddings. It can be different for each speaker.\n Defaults to 1.\n normalize_weights: bool, optional\n Whether to min-max normalize embedding weights to be in the range [0, 1].\n device: Optional[torch.device]\n The device on which to run the embedding model.\n Defaults to GPU if available or CPU if not.\n "
def __init__(self, model: EmbeddingModel, gamma: float=3, beta: float=10, norm: Union[(float, torch.Tensor)]=1, normalize_weights: bool=False, device: Optional[torch.device]=None):
self.embedding = SpeakerEmbedding(model, device)
self.osp = OverlappedSpeechPenalty(gamma, beta, normalize_weights)
self.normalize = EmbeddingNormalization(norm)
@staticmethod
def from_pretrained(model, gamma: float=3, beta: float=10, norm: Union[(float, torch.Tensor)]=1, use_hf_token: Union[(Text, bool, None)]=True, normalize_weights: bool=False, device: Optional[torch.device]=None):
model = EmbeddingModel.from_pretrained(model, use_hf_token)
return OverlapAwareSpeakerEmbedding(model, gamma, beta, norm, normalize_weights, device)
def __call__(self, waveform: TemporalFeatures, segmentation: TemporalFeatures) -> torch.Tensor:
return self.normalize(self.embedding(waveform, self.osp(segmentation)))
|
class SpeakerSegmentation():
def __init__(self, model: SegmentationModel, device: Optional[torch.device]=None):
self.model = model
self.model.eval()
self.device = device
if (self.device is None):
self.device = torch.device('cpu')
self.model.to(self.device)
self.formatter = TemporalFeatureFormatter()
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True, device: Optional[torch.device]=None) -> 'SpeakerSegmentation':
seg_model = SegmentationModel.from_pretrained(model, use_hf_token)
return SpeakerSegmentation(seg_model, device)
def __call__(self, waveform: TemporalFeatures) -> TemporalFeatures:
'\n Calculate the speaker segmentation of input audio.\n\n Parameters\n ----------\n waveform: TemporalFeatures, shape (samples, channels) or (batch, samples, channels)\n\n Returns\n -------\n speaker_segmentation: TemporalFeatures, shape (batch, frames, speakers)\n The batch dimension is omitted if waveform is a `SlidingWindowFeature`.\n '
with torch.no_grad():
wave = rearrange(self.formatter.cast(waveform), 'batch sample channel -> batch channel sample')
output = self.model(wave.to(self.device)).cpu()
return self.formatter.restore_type(output)
|
class Binarize():
'\n Transform a speaker segmentation from the discrete-time domain\n into a continuous-time speaker segmentation.\n\n Parameters\n ----------\n threshold: float\n Probability threshold to determine if a speaker is active at a given frame.\n uri: Optional[Text]\n Uri of the audio stream. Defaults to no uri.\n '
def __init__(self, threshold: float, uri: Optional[Text]=None):
self.uri = uri
self.threshold = threshold
def __call__(self, segmentation: SlidingWindowFeature) -> Annotation:
'\n Return the continuous-time segmentation\n corresponding to the discrete-time input segmentation.\n\n Parameters\n ----------\n segmentation: SlidingWindowFeature\n Discrete-time speaker segmentation.\n\n Returns\n -------\n annotation: Annotation\n Continuous-time speaker segmentation.\n '
(num_frames, num_speakers) = segmentation.data.shape
timestamps = segmentation.sliding_window
is_active = (segmentation.data > self.threshold)
is_active = np.append(is_active, [([False] * num_speakers)], axis=0)
start_times = (np.zeros(num_speakers) + timestamps[0].middle)
annotation = Annotation(uri=self.uri, modality='speech')
for t in range(num_frames):
onsets = np.logical_and(np.logical_not(is_active[t]), is_active[(t + 1)])
start_times[onsets] = timestamps[(t + 1)].middle
offsets = np.logical_and(is_active[t], np.logical_not(is_active[(t + 1)]))
for spk in np.where(offsets)[0]:
region = Segment(start_times[spk], timestamps[(t + 1)].middle)
annotation[(region, spk)] = f'speaker{spk}'
return annotation
|
class Resample():
'Dynamically resample audio chunks.\n\n Parameters\n ----------\n sample_rate: int\n Original sample rate of the input audio\n resample_rate: int\n Sample rate of the output\n '
def __init__(self, sample_rate: int, resample_rate: int, device: Optional[torch.device]=None):
self.device = device
if (self.device is None):
self.device = torch.device('cpu')
self.resample = T.Resample(sample_rate, resample_rate).to(self.device)
self.formatter = TemporalFeatureFormatter()
def __call__(self, waveform: TemporalFeatures) -> TemporalFeatures:
wav = self.formatter.cast(waveform).to(self.device)
with torch.no_grad():
resampled_wav = self.resample(wav.transpose((- 1), (- 2))).transpose((- 1), (- 2))
return self.formatter.restore_type(resampled_wav)
|
class AdjustVolume():
'Change the volume of an audio chunk.\n\n Notice that the output volume might be different to avoid saturation.\n\n Parameters\n ----------\n volume_in_db: float\n Target volume in dB.\n '
def __init__(self, volume_in_db: float):
self.target_db = volume_in_db
self.formatter = TemporalFeatureFormatter()
@staticmethod
def get_volumes(waveforms: torch.Tensor) -> torch.Tensor:
'Compute the volumes of a set of audio chunks.\n\n Parameters\n ----------\n waveforms: torch.Tensor\n Audio chunks. Shape (batch, samples, channels).\n\n Returns\n -------\n volumes: torch.Tensor\n Audio chunk volumes per channel. Shape (batch, 1, channels)\n '
return (10 * torch.log10(torch.mean((torch.abs(waveforms) ** 2), dim=1, keepdim=True)))
def __call__(self, waveform: TemporalFeatures) -> TemporalFeatures:
wav = self.formatter.cast(waveform)
with torch.no_grad():
current_volumes = self.get_volumes(wav)
gains = (10 ** ((self.target_db - current_volumes) / 20))
wav = (gains * wav)
maximums = torch.clamp(torch.amax(torch.abs(wav), dim=1, keepdim=True), 1)
wav = (wav / maximums)
return self.formatter.restore_type(wav)
|
class VoiceActivityDetectionConfig(base.PipelineConfig):
def __init__(self, segmentation: (m.SegmentationModel | None)=None, duration: float=5, step: float=0.5, latency: ((float | Literal[('max', 'min')]) | None)=None, tau_active: float=0.6, device: (torch.device | None)=None, sample_rate: int=16000, **kwargs):
self.segmentation = (segmentation or m.SegmentationModel.from_pyannote('pyannote/segmentation'))
self._duration = duration
self._step = step
self._sample_rate = sample_rate
self._latency = latency
if ((self._latency is None) or (self._latency == 'min')):
self._latency = self._step
elif (self._latency == 'max'):
self._latency = self._duration
self.tau_active = tau_active
self.device = (device or torch.device(('cuda' if torch.cuda.is_available() else 'cpu')))
@property
def duration(self) -> float:
return self._duration
@property
def step(self) -> float:
return self._step
@property
def latency(self) -> float:
return self._latency
@property
def sample_rate(self) -> int:
return self._sample_rate
|
class VoiceActivityDetection(base.Pipeline):
def __init__(self, config: (VoiceActivityDetectionConfig | None)=None):
self._config = (VoiceActivityDetectionConfig() if (config is None) else config)
msg = f'Latency should be in the range [{self._config.step}, {self._config.duration}]'
assert (self._config.step <= self._config.latency <= self._config.duration), msg
self.segmentation = SpeakerSegmentation(self._config.segmentation, self._config.device)
self.pred_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='hamming', cropping_mode='loose')
self.audio_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='first', cropping_mode='center')
self.binarize = Binarize(self._config.tau_active)
self.timestamp_shift = 0
(self.chunk_buffer, self.pred_buffer) = ([], [])
@staticmethod
def get_config_class() -> type:
return VoiceActivityDetectionConfig
@staticmethod
def suggest_metric() -> BaseMetric:
return DetectionErrorRate(collar=0, skip_overlap=False)
@staticmethod
def hyper_parameters() -> Sequence[base.HyperParameter]:
return [base.TauActive]
@property
def config(self) -> base.PipelineConfig:
return self._config
def reset(self):
self.set_timestamp_shift(0)
(self.chunk_buffer, self.pred_buffer) = ([], [])
def set_timestamp_shift(self, shift: float):
self.timestamp_shift = shift
def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[tuple[(Annotation, SlidingWindowFeature)]]:
batch_size = len(waveforms)
msg = 'Pipeline expected at least 1 input'
assert (batch_size >= 1), msg
batch = torch.stack([torch.from_numpy(w.data) for w in waveforms])
expected_num_samples = int(np.rint((self.config.duration * self.config.sample_rate)))
msg = f'Expected {expected_num_samples} samples per chunk, but got {batch.shape[1]}'
assert (batch.shape[1] == expected_num_samples), msg
segmentations = self.segmentation(batch)
voice_detection = torch.max(segmentations, dim=(- 1), keepdim=True)[0]
seg_resolution = (waveforms[0].extent.duration / segmentations.shape[1])
outputs = []
for (wav, vad) in zip(waveforms, voice_detection):
sw = SlidingWindow(start=wav.extent.start, duration=seg_resolution, step=seg_resolution)
vad = SlidingWindowFeature(vad.cpu().numpy(), sw)
self.chunk_buffer.append(wav)
self.pred_buffer.append(vad)
agg_waveform = self.audio_aggregation(self.chunk_buffer)
agg_prediction = self.pred_aggregation(self.pred_buffer)
agg_prediction = self.binarize(agg_prediction).get_timeline(copy=False)
if (self.timestamp_shift != 0):
shifted_agg_prediction = Timeline(uri=agg_prediction.uri)
for segment in agg_prediction:
new_segment = Segment((segment.start + self.timestamp_shift), (segment.end + self.timestamp_shift))
shifted_agg_prediction.add(new_segment)
agg_prediction = shifted_agg_prediction
agg_prediction = agg_prediction.to_annotation(utils.repeat_label('speech'))
outputs.append((agg_prediction, agg_waveform))
if (len(self.chunk_buffer) == self.pred_aggregation.num_overlapping_windows):
self.chunk_buffer = self.chunk_buffer[1:]
self.pred_buffer = self.pred_buffer[1:]
return outputs
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('root', type=Path, help='Directory with audio files CONVERSATION.(wav|flac|m4a|...)')
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--reference', type=Path, help='Optional. Directory with RTTM files CONVERSATION.rttm. Names must match audio files')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--batch-size', default=32, type=int, help=f'{argdoc.BATCH_SIZE}. Defaults to 32')
parser.add_argument('--num-workers', default=0, type=int, help=f'{argdoc.NUM_WORKERS}. Defaults to 0 (no parallelism)')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--output', type=Path, help=f'{argdoc.OUTPUT}. Defaults to no writing')
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
benchmark = Benchmark(args.root, args.reference, args.output, show_progress=True, show_report=True, batch_size=args.batch_size)
config = pipeline_class.get_config_class()(**vars(args))
if (args.num_workers > 0):
benchmark = Parallelize(benchmark, args.num_workers)
report = benchmark(pipeline_class, config)
if ((args.output is not None) and isinstance(report, pd.DataFrame)):
report.to_csv((args.output / 'benchmark_report.csv'))
|
def send_audio(ws: WebSocket, source: Text, step: float, sample_rate: int):
source_components = source.split(':')
if (source_components[0] != 'microphone'):
audio_source = src.FileAudioSource(source, sample_rate, block_duration=step)
else:
device = (int(source_components[1]) if (len(source_components) > 1) else None)
audio_source = src.MicrophoneAudioSource(step, device)
audio_source.stream.pipe(ops.map(utils.encode_audio)).subscribe_(ws.send)
audio_source.read()
|
def receive_audio(ws: WebSocket, output: Optional[Path]):
while True:
message = ws.recv()
print(f'Received: {message}', end='')
if (output is not None):
with open(output, 'a') as file:
file.write(message)
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('source', type=str, help="Path to an audio file | 'microphone' | 'microphone:<DEVICE_ID>'")
parser.add_argument('--host', required=True, type=str, help='Server host')
parser.add_argument('--port', required=True, type=int, help='Server port')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('-sr', '--sample-rate', default=16000, type=int, help=f'{argdoc.SAMPLE_RATE}. Defaults to 16000')
parser.add_argument('-o', '--output-file', type=Path, help='Output RTTM file. Defaults to no writing')
args = parser.parse_args()
ws = WebSocket()
ws.connect(f'ws://{args.host}:{args.port}')
sender = Thread(target=send_audio, args=[ws, args.source, args.step, args.sample_rate])
receiver = Thread(target=receive_audio, args=[ws, args.output_file])
sender.start()
receiver.start()
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='0.0.0.0', type=str, help='Server host')
parser.add_argument('--port', default=7007, type=int, help='Server port')
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--output', type=Path, help=f'{argdoc.OUTPUT}. Defaults to no writing')
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
config = pipeline_class.get_config_class()(**vars(args))
pipeline = pipeline_class(config)
audio_source = src.WebSocketAudioSource(config.sample_rate, args.host, args.port)
inference = StreamingInference(pipeline, audio_source, batch_size=1, do_profile=False, do_plot=False, show_progress=True)
if (args.output is not None):
inference.attach_observers(RTTMWriter(audio_source.uri, (args.output / f'{audio_source.uri}.rttm')))
inference.attach_hooks((lambda ann_wav: audio_source.send(ann_wav[0].to_rttm())))
inference()
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('source', type=str, help="Path to an audio file | 'microphone' | 'microphone:<DEVICE_ID>'")
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--no-plot', dest='no_plot', action='store_true', help='Skip plotting for faster inference')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--output', type=str, help=f"{argdoc.OUTPUT}. Defaults to home directory if SOURCE == 'microphone' or parent directory if SOURCE is a file")
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
config = pipeline_class.get_config_class()(**vars(args))
pipeline = pipeline_class(config)
source_components = args.source.split(':')
if (source_components[0] != 'microphone'):
args.source = Path(args.source).expanduser()
args.output = (args.source.parent if (args.output is None) else Path(args.output))
padding = config.get_file_padding(args.source)
audio_source = src.FileAudioSource(args.source, config.sample_rate, padding, config.step)
pipeline.set_timestamp_shift((- padding[0]))
else:
args.output = (Path('~/').expanduser() if (args.output is None) else Path(args.output))
device = (int(source_components[1]) if (len(source_components) > 1) else None)
audio_source = src.MicrophoneAudioSource(config.step, device)
inference = StreamingInference(pipeline, audio_source, batch_size=1, do_profile=True, do_plot=(not args.no_plot), show_progress=True)
inference.attach_observers(RTTMWriter(audio_source.uri, (args.output / f'{audio_source.uri}.rttm')))
try:
inference()
except KeyboardInterrupt:
pass
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('root', type=str, help='Directory with audio files CONVERSATION.(wav|flac|m4a|...)')
parser.add_argument('--reference', required=True, type=str, help='Directory with RTTM files CONVERSATION.rttm. Names must match audio files')
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--batch-size', default=32, type=int, help=f'{argdoc.BATCH_SIZE}. Defaults to 32')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--hparams', nargs='+', default=('tau_active', 'rho_update', 'delta_new'), help='Hyper-parameters to optimize. Must match names in `PipelineConfig`. Defaults to tau_active, rho_update and delta_new')
parser.add_argument('--num-iter', default=100, type=int, help='Number of optimization trials')
parser.add_argument('--storage', type=str, help='Optuna storage string. If provided, continue a previous study instead of creating one. The database name must match the study name')
parser.add_argument('--output', type=str, help='Working directory')
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
base_config = pipeline_class.get_config_class()(**vars(args))
possible_hparams = pipeline_class.hyper_parameters()
hparams = [HyperParameter.from_name(name) for name in args.hparams]
hparams = [hp for hp in hparams if (hp in possible_hparams)]
if (not hparams):
print(f"No hyper-parameters to optimize. Make sure to select one of: {', '.join([hp.name for hp in possible_hparams])}")
exit(1)
if (args.output is not None):
msg = 'Both `output` and `storage` were set, but only one was expected'
assert (args.storage is None), msg
args.output = Path(args.output).expanduser()
args.output.mkdir(parents=True, exist_ok=True)
study_or_path = args.output
elif (args.storage is not None):
db_name = Path(args.storage).stem
study_or_path = optuna.load_study(db_name, args.storage, TPESampler())
else:
msg = 'Please provide either `output` or `storage`'
raise ValueError(msg)
Optimizer(pipeline_class=pipeline_class, speech_path=args.root, reference_path=args.reference, study_or_path=study_or_path, batch_size=args.batch_size, hparams=hparams, base_config=base_config)(num_iter=args.num_iter, show_progress=True)
|
class TemporalFeatureFormatterState(ABC):
'\n Represents the recorded type of a temporal feature formatter.\n Its job is to transform temporal features into tensors and\n recover the original format on other features.\n '
@abstractmethod
def to_tensor(self, features: TemporalFeatures) -> torch.Tensor:
pass
@abstractmethod
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
'\n Cast `features` to the representing type and remove batch dimension if required.\n\n Parameters\n ----------\n features: torch.Tensor, shape (batch, frames, dim)\n Batched temporal features.\n Returns\n -------\n new_features: SlidingWindowFeature or numpy.ndarray or torch.Tensor, shape (batch, frames, dim)\n '
pass
|
class SlidingWindowFeatureFormatterState(TemporalFeatureFormatterState):
def __init__(self, duration: float):
self.duration = duration
self._cur_start_time = 0
def to_tensor(self, features: SlidingWindowFeature) -> torch.Tensor:
msg = 'Features sliding window duration and step must be equal'
assert (features.sliding_window.duration == features.sliding_window.step), msg
self._cur_start_time = features.sliding_window.start
return torch.from_numpy(features.data)
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
(batch_size, num_frames, _) = features.shape
assert (batch_size == 1), 'Batched SlidingWindowFeature objects are not supported'
resolution = (self.duration / num_frames)
resolution = SlidingWindow(start=self._cur_start_time, duration=resolution, step=resolution)
return SlidingWindowFeature(features.squeeze(dim=0).cpu().numpy(), resolution)
|
class NumpyArrayFormatterState(TemporalFeatureFormatterState):
def to_tensor(self, features: np.ndarray) -> torch.Tensor:
return torch.from_numpy(features)
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
return features.cpu().numpy()
|
class PytorchTensorFormatterState(TemporalFeatureFormatterState):
def to_tensor(self, features: torch.Tensor) -> torch.Tensor:
return features
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
return features
|
class TemporalFeatureFormatter():
'\n Manages the typing and format of temporal features.\n When casting temporal features as torch.Tensor, it remembers its\n type and format so it can lately restore it on other temporal features.\n '
def __init__(self):
self.state: Optional[TemporalFeatureFormatterState] = None
def set_state(self, features: TemporalFeatures):
if isinstance(features, SlidingWindowFeature):
msg = 'Features sliding window duration and step must be equal'
assert (features.sliding_window.duration == features.sliding_window.step), msg
self.state = SlidingWindowFeatureFormatterState((features.data.shape[0] * features.sliding_window.duration))
elif isinstance(features, np.ndarray):
self.state = NumpyArrayFormatterState()
elif isinstance(features, torch.Tensor):
self.state = PytorchTensorFormatterState()
else:
msg = 'Unknown format. Provide one of SlidingWindowFeature, numpy.ndarray, torch.Tensor'
raise ValueError(msg)
def cast(self, features: TemporalFeatures) -> torch.Tensor:
'\n Transform features into a `torch.Tensor` and add batch dimension if missing.\n\n Parameters\n ----------\n features: SlidingWindowFeature or numpy.ndarray or torch.Tensor\n Shape (frames, dim) or (batch, frames, dim)\n\n Returns\n -------\n features: torch.Tensor, shape (batch, frames, dim)\n '
self.set_state(features)
data = self.state.to_tensor(features)
msg = 'Temporal features must be 2D or 3D'
assert (data.ndim in (2, 3)), msg
if (data.ndim == 2):
data = data.unsqueeze(0)
return data.float()
def restore_type(self, features: torch.Tensor) -> TemporalFeatures:
'\n Cast `features` to the internal type and remove batch dimension if required.\n\n Parameters\n ----------\n features: torch.Tensor, shape (batch, frames, dim)\n Batched temporal features.\n Returns\n -------\n new_features: SlidingWindowFeature or numpy.ndarray or torch.Tensor, shape (batch, frames, dim)\n '
return self.state.to_internal_type(features)
|
def overlapped_speech_penalty(segmentation: torch.Tensor, gamma: float=3, beta: float=10):
probs = torch.softmax((beta * segmentation), dim=(- 1))
weights = (torch.pow(segmentation, gamma) * torch.pow(probs, gamma))
weights[(weights < 1e-08)] = 1e-08
return weights
|
def normalize_embeddings(embeddings: torch.Tensor, norm: (float | torch.Tensor)=1) -> torch.Tensor:
if (embeddings.ndim == 2):
embeddings = embeddings.unsqueeze(0)
if isinstance(norm, torch.Tensor):
(batch_size1, num_speakers1, _) = norm.shape
(batch_size2, num_speakers2, _) = embeddings.shape
assert ((batch_size1 == batch_size2) and (num_speakers1 == num_speakers2))
emb_norm = torch.norm(embeddings, p=2, dim=(- 1), keepdim=True)
return ((norm * embeddings) / emb_norm)
|
class StreamingInference():
"Performs inference in real time given a pipeline and an audio source.\n Streams an audio source to an online speaker diarization pipeline.\n It allows users to attach a chain of operations in the form of hooks.\n\n Parameters\n ----------\n pipeline: StreamingPipeline\n Configured speaker diarization pipeline.\n source: AudioSource\n Audio source to be read and streamed.\n batch_size: int\n Number of inputs to send to the pipeline at once.\n Defaults to 1.\n do_profile: bool\n If True, compute and report the processing time of the pipeline.\n Defaults to True.\n do_plot: bool\n If True, draw predictions in a moving plot.\n Defaults to False.\n show_progress: bool\n If True, show a progress bar.\n Defaults to True.\n progress_bar: Optional[diart.progress.ProgressBar]\n Progress bar.\n If description is not provided, set to 'Streaming <source uri>'.\n Defaults to RichProgressBar().\n "
def __init__(self, pipeline: blocks.Pipeline, source: src.AudioSource, batch_size: int=1, do_profile: bool=True, do_plot: bool=False, show_progress: bool=True, progress_bar: Optional[ProgressBar]=None):
self.pipeline = pipeline
self.source = source
self.batch_size = batch_size
self.do_profile = do_profile
self.do_plot = do_plot
self.show_progress = show_progress
self.accumulator = PredictionAccumulator(self.source.uri)
self.unit = ('chunk' if (self.batch_size == 1) else 'batch')
self._observers = []
chunk_duration = self.pipeline.config.duration
step_duration = self.pipeline.config.step
sample_rate = self.pipeline.config.sample_rate
self.num_chunks = None
if (self.source.duration is not None):
numerator = ((self.source.duration - chunk_duration) + step_duration)
self.num_chunks = int(np.ceil((numerator / step_duration)))
self._pbar = progress_bar
if self.show_progress:
if (self._pbar is None):
self._pbar = RichProgressBar()
self._pbar.create(total=self.num_chunks, description=f'Streaming {self.source.uri}', unit=self.unit)
self._chrono = utils.Chronometer(self.unit, self._pbar)
self.stream = self.source.stream
self.stream = self.stream.pipe(dops.rearrange_audio_stream(chunk_duration, step_duration, source.sample_rate))
if (sample_rate != self.source.sample_rate):
msg = f"Audio source has sample rate {self.source.sample_rate}, but pipeline's is {sample_rate}. Will resample."
logging.warning(msg)
self.stream = self.stream.pipe(ops.map(blocks.Resample(self.source.sample_rate, sample_rate, self.pipeline.config.device)))
self.stream = self.stream.pipe(ops.buffer_with_count(count=self.batch_size))
if self.do_profile:
self.stream = self.stream.pipe(ops.do_action(on_next=(lambda _: self._chrono.start())), ops.map(self.pipeline), ops.do_action(on_next=(lambda _: self._chrono.stop())))
else:
self.stream = self.stream.pipe(ops.map(self.pipeline))
self.stream = self.stream.pipe(ops.flat_map((lambda results: rx.from_iterable(results))), ops.do(self.accumulator))
if show_progress:
self.stream = self.stream.pipe(ops.do_action(on_next=(lambda _: self._pbar.update())))
def _close_pbar(self):
if (self._pbar is not None):
self._pbar.close()
def _close_chronometer(self):
if self.do_profile:
if self._chrono.is_running:
self._chrono.stop(do_count=False)
self._chrono.report()
def attach_hooks(self, *hooks: Callable[([Tuple[(Annotation, SlidingWindowFeature)]], None)]):
'Attach hooks to the pipeline.\n\n Parameters\n ----------\n *hooks: (Tuple[Annotation, SlidingWindowFeature]) -> None\n Hook functions to consume emitted annotations and audio.\n '
self.stream = self.stream.pipe(*[ops.do_action(hook) for hook in hooks])
def attach_observers(self, *observers: Observer):
'Attach rx observers to the pipeline.\n\n Parameters\n ----------\n *observers: Observer\n Observers to consume emitted annotations and audio.\n '
self.stream = self.stream.pipe(*[ops.do(sink) for sink in observers])
self._observers.extend(observers)
def _handle_error(self, error: BaseException):
for sink in self._observers:
sink.on_error(error)
self.source.close()
window_closed = isinstance(error, WindowClosedException)
interrupted = isinstance(error, KeyboardInterrupt)
if ((not window_closed) and (not interrupted)):
print_exc()
self._close_pbar()
self._close_chronometer()
def _handle_completion(self):
self._close_pbar()
self._close_chronometer()
def __call__(self) -> Annotation:
'Stream audio chunks from `source` to `pipeline`.\n\n Returns\n -------\n predictions: Annotation\n Speaker diarization pipeline predictions\n '
if self.show_progress:
self._pbar.start()
config = self.pipeline.config
observable = self.stream
if self.do_plot:
observable = self.stream.pipe(dops.buffer_output(duration=config.duration, step=config.step, latency=config.latency, sample_rate=config.sample_rate), ops.do(StreamingPlot(config.duration, config.latency)))
observable.subscribe(on_error=self._handle_error, on_completed=self._handle_completion)
self.source.read()
return self.accumulator.get_prediction()
|
class Benchmark():
'\n Run an online speaker diarization pipeline on a set of audio files in batches.\n Write predictions to a given output directory.\n\n If the reference is given, calculate the average diarization error rate.\n\n Parameters\n ----------\n speech_path: Text or Path\n Directory with audio files.\n reference_path: Text, Path or None\n Directory with reference RTTM files (same names as audio files).\n If None, performance will not be calculated.\n Defaults to None.\n output_path: Text, Path or None\n Output directory to store predictions in RTTM format.\n If None, predictions will not be written to disk.\n Defaults to None.\n show_progress: bool\n Whether to show progress bars.\n Defaults to True.\n show_report: bool\n Whether to print a performance report to stdout.\n Defaults to True.\n batch_size: int\n Inference batch size.\n If < 2, then it will run in real time.\n If >= 2, then it will pre-calculate segmentation and\n embeddings, running the rest in real time.\n The performance between this two modes does not differ.\n Defaults to 32.\n '
def __init__(self, speech_path: Union[(Text, Path)], reference_path: Optional[Union[(Text, Path)]]=None, output_path: Optional[Union[(Text, Path)]]=None, show_progress: bool=True, show_report: bool=True, batch_size: int=32):
self.speech_path = Path(speech_path).expanduser()
assert self.speech_path.is_dir(), 'Speech path must be a directory'
msg = 'Benchmark expected reference path, output path or both'
assert ((reference_path is not None) or (output_path is not None)), msg
self.reference_path = reference_path
if (reference_path is not None):
self.reference_path = Path(self.reference_path).expanduser()
assert self.reference_path.is_dir(), 'Reference path must be a directory'
self.output_path = output_path
if (self.output_path is not None):
self.output_path = Path(output_path).expanduser()
self.output_path.mkdir(parents=True, exist_ok=True)
self.show_progress = show_progress
self.show_report = show_report
self.batch_size = batch_size
def get_file_paths(self) -> List[Path]:
'Return the path for each file in the benchmark.\n\n Returns\n -------\n paths: List[Path]\n List of audio file paths.\n '
return list(self.speech_path.iterdir())
def run_single(self, pipeline: blocks.Pipeline, filepath: Path, progress_bar: ProgressBar) -> Annotation:
'Run a given pipeline on a given file.\n Note that this method does NOT reset the\n state of the pipeline before execution.\n\n Parameters\n ----------\n pipeline: StreamingPipeline\n Speaker diarization pipeline to run.\n filepath: Path\n Path to the target file.\n progress_bar: diart.progress.ProgressBar\n An object to manage the progress of this run.\n\n Returns\n -------\n prediction: Annotation\n Pipeline prediction for the given file.\n '
padding = pipeline.config.get_file_padding(filepath)
source = src.FileAudioSource(filepath, pipeline.config.sample_rate, padding, pipeline.config.step)
pipeline.set_timestamp_shift((- padding[0]))
inference = StreamingInference(pipeline, source, self.batch_size, do_profile=False, do_plot=False, show_progress=self.show_progress, progress_bar=progress_bar)
pred = inference()
pred.uri = source.uri
if (self.output_path is not None):
with open((self.output_path / f'{source.uri}.rttm'), 'w') as out_file:
pred.write_rttm(out_file)
return pred
def evaluate(self, predictions: List[Annotation], metric: BaseMetric) -> Union[(pd.DataFrame, List[Annotation])]:
'If a reference path was provided,\n compute the diarization error rate of a list of predictions.\n\n Parameters\n ----------\n predictions: List[Annotation]\n Predictions to evaluate.\n metric: BaseMetric\n Evaluation metric from pyannote.metrics.\n\n Returns\n -------\n report_or_predictions: Union[pd.DataFrame, List[Annotation]]\n A performance report as a pandas `DataFrame` if a\n reference path was given. Otherwise return the same predictions.\n '
if (self.reference_path is not None):
progress_bar = TQDMProgressBar(f'Computing {metric.name}', leave=False)
progress_bar.create(total=len(predictions), unit='file')
progress_bar.start()
for hyp in predictions:
ref = load_rttm((self.reference_path / f'{hyp.uri}.rttm')).popitem()[1]
metric(ref, hyp)
progress_bar.update()
progress_bar.close()
return metric.report(display=self.show_report)
return predictions
def __call__(self, pipeline_class: type, config: blocks.PipelineConfig, metric: Optional[BaseMetric]=None) -> Union[(pd.DataFrame, List[Annotation])]:
"Run a given pipeline on a set of audio files.\n The internal state of the pipeline is reset before benchmarking.\n\n Parameters\n ----------\n pipeline_class: class\n Class from the StreamingPipeline hierarchy.\n A pipeline from this class will be instantiated by each worker.\n config: StreamingConfig\n Streaming pipeline configuration.\n metric: Optional[BaseMetric]\n Evaluation metric from pyannote.metrics.\n Defaults to the pipeline's suggested metric (see `StreamingPipeline.suggest_metric()`)\n\n Returns\n -------\n performance: pandas.DataFrame or List[Annotation]\n If reference annotations are given, a DataFrame with detailed\n performance on each file as well as average performance.\n\n If no reference annotations, a list of predictions.\n "
audio_file_paths = self.get_file_paths()
num_audio_files = len(audio_file_paths)
pipeline = pipeline_class(config)
predictions = []
for (i, filepath) in enumerate(audio_file_paths):
pipeline.reset()
desc = f'Streaming {filepath.stem} ({(i + 1)}/{num_audio_files})'
progress = TQDMProgressBar(desc, leave=False, do_close=True)
predictions.append(self.run_single(pipeline, filepath, progress))
metric = (pipeline.suggest_metric() if (metric is None) else metric)
return self.evaluate(predictions, metric)
|
class Parallelize():
'Wrapper to parallelize the execution of a `Benchmark` instance.\n Note that models will be copied in each worker instead of being reused.\n\n Parameters\n ----------\n benchmark: Benchmark\n Benchmark instance to execute in parallel.\n num_workers: int\n Number of parallel workers.\n Defaults to 0 (no parallelism).\n '
def __init__(self, benchmark: Benchmark, num_workers: int=4):
self.benchmark = benchmark
self.num_workers = num_workers
def run_single_job(self, pipeline_class: type, config: blocks.PipelineConfig, filepath: Path, description: Text) -> Annotation:
'Build and run a pipeline on a single file.\n Configure execution to show progress alongside parallel runs.\n\n Parameters\n ----------\n pipeline_class: class\n Class from the StreamingPipeline hierarchy.\n A pipeline from this class will be instantiated.\n config: StreamingConfig\n Streaming pipeline configuration.\n filepath: Path\n Path to the target file.\n description: Text\n Description to show in the parallel progress bar.\n\n Returns\n -------\n prediction: Annotation\n Pipeline prediction for the given file.\n '
idx_process = (int(current_process().name.split('-')[1]) - 1)
pipeline = pipeline_class(config)
progress = TQDMProgressBar(description, leave=False, position=idx_process, do_close=True)
return self.benchmark.run_single(pipeline, filepath, progress)
def __call__(self, pipeline_class: type, config: blocks.PipelineConfig, metric: Optional[BaseMetric]=None) -> Union[(pd.DataFrame, List[Annotation])]:
"Run a given pipeline on a set of audio files in parallel.\n Each worker will build and run the pipeline on a different file.\n\n Parameters\n ----------\n pipeline_class: class\n Class from the StreamingPipeline hierarchy.\n A pipeline from this class will be instantiated by each worker.\n config: StreamingConfig\n Streaming pipeline configuration.\n metric: Optional[BaseMetric]\n Evaluation metric from pyannote.metrics.\n Defaults to the pipeline's suggested metric (see `StreamingPipeline.suggest_metric()`)\n\n Returns\n -------\n performance: pandas.DataFrame or List[Annotation]\n If reference annotations are given, a DataFrame with detailed\n performance on each file as well as average performance.\n\n If no reference annotations, a list of predictions.\n "
audio_file_paths = self.benchmark.get_file_paths()
num_audio_files = len(audio_file_paths)
try:
torch.multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
freeze_support()
pool = Pool(processes=self.num_workers, initargs=(RLock(),), initializer=tqdm.set_lock)
arg_list = [(pipeline_class, config, filepath, f'Streaming {filepath.stem} ({(i + 1)}/{num_audio_files})') for (i, filepath) in enumerate(audio_file_paths)]
jobs = [pool.apply_async(self.run_single_job, args=args) for args in arg_list]
pool.close()
predictions = [job.get() for job in jobs]
metric = (pipeline_class.suggest_metric() if (metric is None) else metric)
return self.benchmark.evaluate(predictions, metric)
|
class PowersetAdapter(nn.Module):
def __init__(self, segmentation_model: nn.Module):
super().__init__()
self.model = segmentation_model
specs = self.model.specifications
max_speakers_per_frame = specs.powerset_max_classes
max_speakers_per_chunk = len(specs.classes)
self.powerset = Powerset(max_speakers_per_chunk, max_speakers_per_frame)
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
return self.powerset.to_multilabel(self.model(waveform))
|
class PyannoteLoader():
def __init__(self, model_info, hf_token: Union[(Text, bool, None)]=True):
super().__init__()
self.model_info = model_info
self.hf_token = hf_token
def __call__(self) -> Callable:
try:
model = Model.from_pretrained(self.model_info, use_auth_token=self.hf_token)
specs = getattr(model, 'specifications', None)
if ((specs is not None) and specs.powerset):
model = PowersetAdapter(model)
return model
except HTTPError:
pass
except ModuleNotFoundError:
pass
return PretrainedSpeakerEmbedding(self.model_info, use_auth_token=self.hf_token)
|
class ONNXLoader():
def __init__(self, path: (str | Path), input_names: List[str], output_name: str):
super().__init__()
self.path = Path(path)
self.input_names = input_names
self.output_name = output_name
def __call__(self) -> ONNXModel:
return ONNXModel(self.path, self.input_names, self.output_name)
|
class ONNXModel():
def __init__(self, path: Path, input_names: List[str], output_name: str):
super().__init__()
self.path = path
self.input_names = input_names
self.output_name = output_name
self.device = torch.device('cpu')
self.session = None
self.recreate_session()
@property
def execution_provider(self) -> str:
device = ('CUDA' if (self.device.type == 'cuda') else 'CPU')
return f'{device}ExecutionProvider'
def recreate_session(self):
options = ort.SessionOptions()
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
self.session = ort.InferenceSession(self.path, sess_options=options, providers=[self.execution_provider])
def to(self, device: torch.device) -> ONNXModel:
if (device.type != self.device.type):
self.device = device
self.recreate_session()
return self
def __call__(self, *args) -> torch.Tensor:
inputs = {name: arg.cpu().numpy().astype(np.float32) for (name, arg) in zip(self.input_names, args)}
output = self.session.run([self.output_name], inputs)[0]
return torch.from_numpy(output).float().to(args[0].device)
|
class LazyModel(ABC):
def __init__(self, loader: Callable[([], Callable)]):
super().__init__()
self.get_model = loader
self.model: Optional[Callable] = None
def is_in_memory(self) -> bool:
'Return whether the model has been loaded into memory'
return (self.model is not None)
def load(self):
if (not self.is_in_memory()):
self.model = self.get_model()
def to(self, device: torch.device) -> LazyModel:
self.load()
self.model = self.model.to(device)
return self
def __call__(self, *args, **kwargs):
self.load()
return self.model(*args, **kwargs)
def eval(self) -> LazyModel:
self.load()
if isinstance(self.model, nn.Module):
self.model.eval()
return self
|
class SegmentationModel(LazyModel):
'\n Minimal interface for a segmentation model.\n '
@staticmethod
def from_pyannote(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'SegmentationModel':
'\n Returns a `SegmentationModel` wrapping a pyannote model.\n\n Parameters\n ----------\n model: pyannote.PipelineModel\n The pyannote.audio model to fetch.\n use_hf_token: str | bool, optional\n The Huggingface access token to use when downloading the model.\n If True, use huggingface-cli login token.\n Defaults to None.\n\n Returns\n -------\n wrapper: SegmentationModel\n '
assert IS_PYANNOTE_AVAILABLE, 'No pyannote.audio installation found'
return SegmentationModel(PyannoteLoader(model, use_hf_token))
@staticmethod
def from_onnx(model_path: Union[(str, Path)], input_name: str='waveform', output_name: str='segmentation') -> 'SegmentationModel':
assert IS_ONNX_AVAILABLE, 'No ONNX installation found'
return SegmentationModel(ONNXLoader(model_path, [input_name], output_name))
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'SegmentationModel':
if (isinstance(model, str) or isinstance(model, Path)):
if Path(model).name.endswith('.onnx'):
return SegmentationModel.from_onnx(model)
return SegmentationModel.from_pyannote(model, use_hf_token)
def __call__(self, waveform: torch.Tensor) -> torch.Tensor:
'\n Call the forward pass of the segmentation model.\n Parameters\n ----------\n waveform: torch.Tensor, shape (batch, channels, samples)\n Returns\n -------\n speaker_segmentation: torch.Tensor, shape (batch, frames, speakers)\n '
return super().__call__(waveform)
|
class EmbeddingModel(LazyModel):
'Minimal interface for an embedding model.'
@staticmethod
def from_pyannote(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'EmbeddingModel':
'\n Returns an `EmbeddingModel` wrapping a pyannote model.\n\n Parameters\n ----------\n model: pyannote.PipelineModel\n The pyannote.audio model to fetch.\n use_hf_token: str | bool, optional\n The Huggingface access token to use when downloading the model.\n If True, use huggingface-cli login token.\n Defaults to None.\n\n Returns\n -------\n wrapper: EmbeddingModel\n '
assert IS_PYANNOTE_AVAILABLE, 'No pyannote.audio installation found'
loader = PyannoteLoader(model, use_hf_token)
return EmbeddingModel(loader)
@staticmethod
def from_onnx(model_path: Union[(str, Path)], input_names: (List[str] | None)=None, output_name: str='embedding') -> 'EmbeddingModel':
assert IS_ONNX_AVAILABLE, 'No ONNX installation found'
input_names = (input_names or ['waveform', 'weights'])
loader = ONNXLoader(model_path, input_names, output_name)
return EmbeddingModel(loader)
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'EmbeddingModel':
if (isinstance(model, str) or isinstance(model, Path)):
if Path(model).name.endswith('.onnx'):
return EmbeddingModel.from_onnx(model)
return EmbeddingModel.from_pyannote(model, use_hf_token)
def __call__(self, waveform: torch.Tensor, weights: Optional[torch.Tensor]=None) -> torch.Tensor:
'\n Call the forward pass of an embedding model with optional weights.\n Parameters\n ----------\n waveform: torch.Tensor, shape (batch, channels, samples)\n weights: Optional[torch.Tensor], shape (batch, frames)\n Temporal weights for each sample in the batch. Defaults to no weights.\n Returns\n -------\n speaker_embeddings: torch.Tensor, shape (batch, embedding_dim)\n '
embeddings = super().__call__(waveform, weights)
if isinstance(embeddings, np.ndarray):
embeddings = torch.from_numpy(embeddings)
return embeddings
|
class Optimizer():
def __init__(self, pipeline_class: type, speech_path: Union[(Text, Path)], reference_path: Union[(Text, Path)], study_or_path: Union[(FilePath, Study)], batch_size: int=32, hparams: Optional[Sequence[blocks.base.HyperParameter]]=None, base_config: Optional[blocks.PipelineConfig]=None, do_kickstart_hparams: bool=True, metric: Optional[BaseMetric]=None, direction: Literal[('minimize', 'maximize')]='minimize'):
self.pipeline_class = pipeline_class
self.benchmark = Benchmark(speech_path, reference_path, show_progress=True, show_report=False, batch_size=batch_size)
self.metric = metric
self.direction = direction
self.base_config = base_config
self.do_kickstart_hparams = do_kickstart_hparams
if (self.base_config is None):
self.base_config = self.pipeline_class.get_config_class()()
self.do_kickstart_hparams = False
self.hparams = hparams
if (self.hparams is None):
self.hparams = self.pipeline_class.hyper_parameters()
possible_hparams = vars(self.base_config)
for param in self.hparams:
msg = f'Hyper-parameter {param.name} not found in configuration {self.base_config.__class__.__name__}'
assert (param.name in possible_hparams), msg
self._progress: Optional[tqdm] = None
if isinstance(study_or_path, Study):
self.study = study_or_path
elif (isinstance(study_or_path, str) or isinstance(study_or_path, Path)):
study_or_path = Path(study_or_path)
self.study = create_study(storage=('sqlite:///' + str((study_or_path / f'{study_or_path.stem}.db'))), sampler=TPESampler(), study_name=study_or_path.stem, direction=self.direction, load_if_exists=True)
else:
msg = f'Expected Study object or path-like, but got {type(study_or_path).__name__}'
raise ValueError(msg)
@property
def best_performance(self):
return self.study.best_value
@property
def best_hparams(self):
return self.study.best_params
def _callback(self, study: Study, trial: FrozenTrial):
if (self._progress is None):
return
self._progress.update(1)
self._progress.set_description(f'Trial {(trial.number + 1)}')
values = {'best_perf': study.best_value}
for (name, value) in study.best_params.items():
values[f'best_{name}'] = value
self._progress.set_postfix(OrderedDict(values))
def objective(self, trial: Trial) -> float:
trial_config = vars(self.base_config)
for hparam in self.hparams:
trial_config[hparam.name] = trial.suggest_uniform(hparam.name, hparam.low, hparam.high)
if trial.should_prune():
raise TrialPruned()
config = self.base_config.__class__(**trial_config)
metric = self.metric
if (metric is None):
metric = self.pipeline_class.suggest_metric()
report = self.benchmark(self.pipeline_class, config, metric)
return report.loc[('TOTAL', metric.name)]['%']
def __call__(self, num_iter: int, show_progress: bool=True):
self._progress = None
if show_progress:
self._progress = trange(num_iter)
last_trial = (- 1)
if self.study.trials:
last_trial = self.study.trials[(- 1)].number
self._progress.set_description(f'Trial {(last_trial + 1)}')
if self.do_kickstart_hparams:
self.study.enqueue_trial({param.name: getattr(self.base_config, param.name) for param in self.hparams}, skip_if_exists=True)
self.study.optimize(self.objective, num_iter, callbacks=[self._callback])
|
class ProgressBar(ABC):
@abstractmethod
def create(self, total: int, description: Optional[Text]=None, unit: Text='it', **kwargs):
pass
@abstractmethod
def start(self):
pass
@abstractmethod
def update(self, n: int=1):
pass
@abstractmethod
def write(self, text: Text):
pass
@abstractmethod
def stop(self):
pass
@abstractmethod
def close(self):
pass
@property
@abstractmethod
def default_description(self) -> Text:
pass
@property
@abstractmethod
def initial_description(self) -> Optional[Text]:
pass
def resolve_description(self, new_description: Optional[Text]=None) -> Text:
if (self.initial_description is None):
if (new_description is None):
return self.default_description
return new_description
else:
return self.initial_description
|
class RichProgressBar(ProgressBar):
def __init__(self, description: Optional[Text]=None, color: Text='green', leave: bool=True, do_close: bool=True):
self.description = description
self.color = color
self.do_close = do_close
self.bar = Progress(transient=(not leave))
self.bar.start()
self.task_id: Optional[TaskID] = None
@property
def default_description(self) -> Text:
return f'[{self.color}]Streaming'
@property
def initial_description(self) -> Optional[Text]:
if (self.description is not None):
return f'[{self.color}]{self.description}'
return self.description
def create(self, total: int, description: Optional[Text]=None, unit: Text='it', **kwargs):
if (self.task_id is None):
self.task_id = self.bar.add_task(self.resolve_description(f'[{self.color}]{description}'), start=False, total=total, completed=0, visible=True, **kwargs)
def start(self):
assert (self.task_id is not None)
self.bar.start_task(self.task_id)
def update(self, n: int=1):
assert (self.task_id is not None)
self.bar.update(self.task_id, advance=n)
def write(self, text: Text):
rich.print(text)
def stop(self):
assert (self.task_id is not None)
self.bar.stop_task(self.task_id)
def close(self):
if self.do_close:
self.bar.stop()
|
class TQDMProgressBar(ProgressBar):
def __init__(self, description: Optional[Text]=None, leave: bool=True, position: Optional[int]=None, do_close: bool=True):
self.description = description
self.leave = leave
self.position = position
self.do_close = do_close
self.pbar: Optional[tqdm] = None
@property
def default_description(self) -> Text:
return 'Streaming'
@property
def initial_description(self) -> Optional[Text]:
return self.description
def create(self, total: int, description: Optional[Text]=None, unit: Optional[Text]='it', **kwargs):
if (self.pbar is None):
self.pbar = tqdm(desc=self.resolve_description(description), total=total, unit=unit, leave=self.leave, position=self.position, **kwargs)
def start(self):
pass
def update(self, n: int=1):
assert (self.pbar is not None)
self.pbar.update(n)
def write(self, text: Text):
tqdm.write(text)
def stop(self):
self.close()
def close(self):
if self.do_close:
assert (self.pbar is not None)
self.pbar.close()
|
class WindowClosedException(Exception):
pass
|
def _extract_prediction(value: Union[(Tuple, Annotation)]) -> Annotation:
if isinstance(value, tuple):
return value[0]
if isinstance(value, Annotation):
return value
msg = f'Expected tuple or Annotation, but got {type(value)}'
raise ValueError(msg)
|
class RTTMWriter(Observer):
def __init__(self, uri: Text, path: Union[(Path, Text)], patch_collar: float=0.05):
super().__init__()
self.uri = uri
self.patch_collar = patch_collar
self.path = Path(path).expanduser()
if self.path.exists():
self.path.unlink()
def patch(self):
'Stitch same-speaker turns that are close to each other'
if (not self.path.exists()):
return
annotations = list(load_rttm(self.path).values())
if annotations:
annotation = annotations[0]
annotation.uri = self.uri
with open(self.path, 'w') as file:
annotation.support(self.patch_collar).write_rttm(file)
def on_next(self, value: Union[(Tuple, Annotation)]):
prediction = _extract_prediction(value)
prediction.uri = self.uri
with open(self.path, 'a') as file:
prediction.write_rttm(file)
def on_error(self, error: Exception):
self.patch()
def on_completed(self):
self.patch()
|
class PredictionAccumulator(Observer):
def __init__(self, uri: Optional[Text]=None, patch_collar: float=0.05):
super().__init__()
self.uri = uri
self.patch_collar = patch_collar
self._prediction: Optional[Annotation] = None
def patch(self):
'Stitch same-speaker turns that are close to each other'
if (self._prediction is not None):
self._prediction = self._prediction.support(self.patch_collar)
def get_prediction(self) -> Annotation:
self.patch()
return self._prediction
def on_next(self, value: Union[(Tuple, Annotation)]):
prediction = _extract_prediction(value)
prediction.uri = self.uri
if (self._prediction is None):
self._prediction = prediction
else:
self._prediction.update(prediction)
def on_error(self, error: Exception):
self.patch()
def on_completed(self):
self.patch()
|
class StreamingPlot(Observer):
def __init__(self, duration: float, latency: float, visualization: Literal[('slide', 'accumulate')]='slide', reference: Optional[Union[(Path, Text)]]=None):
super().__init__()
assert (visualization in ['slide', 'accumulate'])
self.visualization = visualization
self.reference = reference
if (self.reference is not None):
self.reference = list(load_rttm(reference).values())[0]
self.window_duration = duration
self.latency = latency
(self.figure, self.axs, self.num_axs) = (None, None, (- 1))
self.window_closed = False
def _on_window_closed(self, event):
self.window_closed = True
def _init_num_axs(self):
if (self.num_axs == (- 1)):
self.num_axs = 2
if (self.reference is not None):
self.num_axs += 1
def _init_figure(self):
self._init_num_axs()
(self.figure, self.axs) = plt.subplots(self.num_axs, 1, figsize=(10, (2 * self.num_axs)))
if (self.num_axs == 1):
self.axs = [self.axs]
self.figure.canvas.mpl_connect('close_event', self._on_window_closed)
def _clear_axs(self):
for i in range(self.num_axs):
self.axs[i].clear()
def get_plot_bounds(self, real_time: float) -> Segment:
start_time = 0
end_time = (real_time - self.latency)
if (self.visualization == 'slide'):
start_time = max(0.0, (end_time - self.window_duration))
return Segment(start_time, end_time)
def on_next(self, values: Tuple[(Annotation, SlidingWindowFeature, float)]):
if self.window_closed:
raise WindowClosedException
(prediction, waveform, real_time) = values
if (self.figure is None):
self._init_figure()
self._clear_axs()
notebook.crop = self.get_plot_bounds(real_time)
if (self.reference is not None):
metric = DiarizationErrorRate()
mapping = metric.optimal_mapping(self.reference, prediction)
prediction.rename_labels(mapping=mapping, copy=False)
notebook.plot_annotation(prediction, self.axs[0])
self.axs[0].set_title('Output')
notebook.plot_feature(waveform, self.axs[1])
self.axs[1].set_title('Audio')
if (self.num_axs == 3):
notebook.plot_annotation(self.reference, self.axs[2])
self.axs[2].set_title('Reference')
plt.tight_layout()
self.figure.canvas.draw()
self.figure.canvas.flush_events()
plt.pause(0.05)
|
class Chronometer():
def __init__(self, unit: Text, progress_bar: Optional[ProgressBar]=None):
self.unit = unit
self.progress_bar = progress_bar
self.current_start_time = None
self.history = []
@property
def is_running(self):
return (self.current_start_time is not None)
def start(self):
self.current_start_time = time.monotonic()
def stop(self, do_count: bool=True):
msg = 'No start time available, Did you call stop() before start()?'
assert (self.current_start_time is not None), msg
end_time = (time.monotonic() - self.current_start_time)
self.current_start_time = None
if do_count:
self.history.append(end_time)
def report(self):
print_fn = print
if (self.progress_bar is not None):
print_fn = self.progress_bar.write
print_fn(f'Took {np.mean(self.history).item():.3f} (+/-{np.std(self.history).item():.3f}) seconds/{self.unit} -- ran {len(self.history)} times')
|
def parse_hf_token_arg(hf_token: Union[(bool, Text)]) -> Union[(bool, Text)]:
if isinstance(hf_token, bool):
return hf_token
if (hf_token.lower() == 'true'):
return True
if (hf_token.lower() == 'false'):
return False
return hf_token
|
def encode_audio(waveform: np.ndarray) -> Text:
data = waveform.astype(np.float32).tobytes()
return base64.b64encode(data).decode('utf-8')
|
def decode_audio(data: Text) -> np.ndarray:
byte_samples = base64.decodebytes(data.encode('utf-8'))
samples = np.frombuffer(byte_samples, dtype=np.float32)
return samples.reshape(1, (- 1))
|
def get_padding_left(stream_duration: float, chunk_duration: float) -> float:
if (stream_duration < chunk_duration):
return (chunk_duration - stream_duration)
return 0
|
def repeat_label(label: Text):
while True:
(yield label)
|
def get_pipeline_class(class_name: Text) -> type:
pipeline_class = getattr(blocks, class_name, None)
msg = f"Pipeline '{class_name}' doesn't exist"
assert (pipeline_class is not None), msg
return pipeline_class
|
def get_padding_right(latency: float, step: float) -> float:
return (latency - step)
|
def visualize_feature(duration: Optional[float]=None):
def apply(feature: SlidingWindowFeature):
if (duration is None):
notebook.crop = feature.extent
else:
notebook.crop = Segment((feature.extent.end - duration), feature.extent.end)
plt.rcParams['figure.figsize'] = (8, 2)
notebook.plot_feature(feature)
plt.tight_layout()
plt.show()
return apply
|
def visualize_annotation(duration: Optional[float]=None):
def apply(annotation: Annotation):
extent = annotation.get_timeline().extent()
if (duration is None):
notebook.crop = extent
else:
notebook.crop = Segment((extent.end - duration), extent.end)
plt.rcParams['figure.figsize'] = (8, 2)
notebook.plot_annotation(annotation)
plt.tight_layout()
plt.show()
return apply
|
class Boco():
def __init__(self, name):
self.name = name
def validate(self):
assert self.computeLoss, 'You need to specify a function to compute the loss'
|
class Neumann(Boco):
def __init__(self, sampler, name='neumann'):
super().__init__(name)
self.vars = sampler.vars
self.sampler = sampler
def sample(self, n_samples=None):
return self.sampler.sample(n_samples)
def validate(self, inputs, outputs):
super().validate()
assert (inputs == self.vars), f'Boco {self.name} with different inputs !'
assert self.computeBocoLoss, 'You need to specify a function to compute the loss'
def computeLoss(self, model, criterion, inputs, outputs):
_X = self.sample()
X = torch.stack([_X[var] for var in inputs], axis=(- 1))
X.requires_grad_(True)
y = model(X)
loss = self.computeBocoLoss(X, y)
return {f'{self.name}_{name}': criterion(l, torch.zeros(l.shape).to(X.device)) for (name, l) in loss.items()}
def computeGrads(self, outputs, inputs):
(grads,) = torch.autograd.grad(outputs, inputs, grad_outputs=outputs.data.new(outputs.shape).fill_(1), create_graph=True, only_inputs=True)
return grads
|
class Periodic(Boco):
def __init__(self, sampler, sampler1, sampler2, name='periodic'):
super().__init__(name)
self.sampler = sampler
self.sampler1 = sampler1
self.sampler2 = sampler2
inputs1 = tuple(self.sampler1.sample(1).keys())
inputs2 = tuple(self.sampler2.sample(1).keys())
(vars1, vars2) = ((sampler.vars + inputs1), (sampler.vars + inputs2))
assert (len(vars1) == len(vars2)), 'Samplers must have the same variables'
for var in vars1:
assert (var in vars2), 'Samplers must have the same variables'
self.vars = vars1
def sample(self, n_samples=None):
shared = self.sampler.sample(n_samples)
inputs = self.sampler1.sample(n_samples)
inputs.update(shared)
outputs = self.sampler2.sample(n_samples)
outputs.update(shared)
return (inputs, outputs)
def validate(self, inputs, outputs):
super().validate()
assert (len(inputs) == len(self.vars)), f'Boco {self.name} with different inputs !'
for var in self.vars:
assert (var in inputs), f'Boco {self.name} with different inputs !'
def computeLoss(self, model, criterion, inputs, outputs):
(_x1, _x2) = self.sample()
x1 = torch.stack([_x1[var] for var in inputs], axis=(- 1))
x2 = torch.stack([_x2[var] for var in inputs], axis=(- 1))
y1 = model(x1)
y2 = model(x2)
return {self.name: criterion(y1, y2)}
|
class Dataset(torch.utils.data.Dataset):
def __init__(self, data, device='cpu'):
mesh = np.stack(np.meshgrid(*data), (- 1)).reshape((- 1), len(data))
self.X = torch.from_numpy(mesh).float().to(device)
def __len__(self):
return len(self.X)
def __getitem__(self, ix):
return self.X[ix]
|
class Mesh():
def __init__(self, data, device='cpu'):
assert isinstance(data, dict), 'you must pass a dict with your data'
(self.vars, data) = (tuple(data.keys()), data.values())
self.dataset = Dataset(data, device)
self.device = device
def build_dataloader(self, batch_size=None, shuffle=True):
if (batch_size == None):
batch_size = len(self.dataset)
return torch.utils.data.DataLoader(self.dataset, batch_size=batch_size, shuffle=shuffle)
|
class History():
def __init__(self, precision=5):
self.history = {}
self.current = {}
self.precision = precision
def add(self, d):
for (name, metric) in d.items():
if (not (name in self.history)):
self.history[name] = []
self.history[name].append(metric)
def add_step(self, d):
for (name, metric) in d.items():
if (name not in self.current):
self.current[name] = []
self.current[name].append(metric)
def average(self):
return {name: round(np.mean(self.current[name]), self.precision) for name in self.current}
def step(self):
for name in self.current:
self.add({name: np.mean(self.current[name])})
self.current = {}
def __str__(self):
s = ''
for (name, value) in self.history.items():
s += f' | {name} {round(value[(- 1)], self.precision)}'
return s
|
class Sine(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return torch.sin(x)
|
def block(i, o):
fc = torch.nn.Linear(i, o)
return torch.nn.Sequential(Sine(), torch.nn.Linear(i, o))
|
class MLP(torch.nn.Module):
def __init__(self, inputs, outputs, layers, neurons):
super().__init__()
fc_in = torch.nn.Linear(inputs, neurons)
fc_hidden = [block(neurons, neurons) for layer in range((layers - 1))]
fc_out = block(neurons, outputs)
self.mlp = torch.nn.Sequential(fc_in, *fc_hidden, fc_out)
def forward(self, x):
return self.mlp(x)
|
def get_lr(optimizer):
for param_group in optimizer.param_groups:
return param_group['lr']
|
class PDE():
def __init__(self, inputs, outputs):
if isinstance(inputs, str):
inputs = tuple(inputs)
if isinstance(outputs, str):
outputs = tuple(outputs)
checkIsListOfStr(inputs)
checkIsListOfStr(outputs)
checkUnique(inputs)
checkUnique(outputs)
checkNoRepeated(inputs, outputs)
self.inputs = inputs
self.outputs = outputs
self.mesh = None
self.bocos = []
def set_sampler(self, sampler):
assert (sampler.vars == self.inputs), 'your data does not match the PDE inputs'
self.sampler = sampler
def add_boco(self, boco):
assert (boco.name not in [boco.name for boco in self.bocos]), f'Boco {boco.name} already exists, use another name'
boco.validate(self.inputs, self.outputs)
self.bocos.append(boco)
def update_boco(self, boco):
for b in self.bocos:
if (b.name == boco.name):
self.bocos[self.bocos.index(b)] = boco
return
def compile(self, model, optimizer, scheduler=None, loss_fn=None):
self.model = model
self.optimizer = optimizer
self.loss_fn = (loss_fn if loss_fn else torch.nn.MSELoss())
self.scheduler = scheduler
def computePDELoss(self, vars, grads):
print('This function need to be overloaded !!!')
def solve(self, N_STEPS=1000, log_each=100):
history = History()
pbar = tqdm(range(1, (N_STEPS + 1)), miniters=int((N_STEPS / log_each)))
for step in pbar:
history.add({'lr': get_lr(self.optimizer)})
self.optimizer.zero_grad()
loss = 0
X = self.sampler._sample()
X.requires_grad_(True)
y = self.model(X)
pde_losses = self.computePDELoss(X, y)
if (step == 1):
assert isinstance(pde_losses, dict), 'you should return a dict with the name of the equation and the corresponding loss'
for (name, l) in pde_losses.items():
_loss = self.loss_fn(l, torch.zeros(l.shape).to(self.sampler.device))
loss += _loss
history.add_step({name: _loss.item()})
for boco in self.bocos:
boco_losses = boco.computeLoss(self.model, self.loss_fn, self.inputs, self.outputs)
for (name, l) in boco_losses.items():
if (step == 1):
assert isinstance(boco_losses, dict), 'you should return a dict with the name of the equation and the corresponding loss'
loss += l
history.add_step({name: l.item()})
loss.backward()
self.optimizer.step()
if ((step % log_each) == 0):
pbar.set_description(str(history.average()))
history.step()
if self.scheduler:
self.scheduler.step()
return history.history
def computeGrads(self, outputs, inputs):
(grads,) = torch.autograd.grad(outputs, inputs, grad_outputs=outputs.data.new(outputs.shape).fill_(1), create_graph=True, only_inputs=True)
return grads
def eval(self, X):
self.model.eval()
with torch.no_grad():
return self.model(X)
|
class BaseSampler():
def __init__(self, data, n_samples=1, device='cpu'):
assert isinstance(data, dict), 'you must pass a dict with your data'
self.device = device
self.data = data
self.vars = tuple(data.keys())
self.n_samples = n_samples
def _sample(self, n_samples=None):
n_samples = (n_samples or self.n_samples)
sample = self.sample(n_samples)
return torch.stack([sample[var] for var in self.vars], axis=(- 1))
def sample(self, n_samples=None):
raise ValueError('you must implement this method')
|
class RandomSampler(BaseSampler):
def __init__(self, data, n_samples=1, device='cpu'):
super().__init__(data, n_samples, device)
for (var, lims) in data.items():
if isinstance(lims, list):
assert (len(lims) == 2), 'you must pass a list with the min and max limits'
elif (isinstance(lims, int) or isinstance(lims, float)):
data[var] = [lims, lims]
else:
raise ValueError('invalid limits')
def sample(self, n_samples=None):
n_samples = (n_samples or self.n_samples)
return {var: ((torch.rand(n_samples, device=self.device) * (lims[1] - lims[0])) + lims[0]) for (var, lims) in self.data.items()}
|
def checkIsListOfStr(l):
'Make sure that l is a list containing only strings'
if isinstance(l, tuple):
for i in l:
if (not isinstance(i, str)):
raise Exception((str(i) + ' must be a string'))
|
def checkUnique(l):
'Make sure that l does not contain repeated elements'
for (i, item1) in enumerate(l):
for (j, item2) in enumerate(l):
if ((i != j) and (item1 == item2)):
raise Exception(('Repeated item ' + str(item1)))
|
def checkNoRepeated(l1, l2):
'Make sure there are no repeated elements in both lists'
for i in l1:
if (i in l2):
raise Exception(('Repeated item ' + str(i)))
|
class EnvWrapper():
def __init__(self, task):
self.action_space = self.brain.vector_action_space_size
|
class CountScore():
def __init__(self):
self.total_episode = 100
self.episode_rewards = np.zeros(self.total_episode)
self.current_episode = 0
def add_score(self, score):
self.episode_rewards[self.current_episode] = score
self.current_episode += 1
self.current_episode = (self.current_episode % 100)
def mean_score(self):
return np.mean(self.episode_rewards)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.