code stringlengths 17 6.64M |
|---|
class SpeakerEmbedding():
def __init__(self, model: EmbeddingModel, device: Optional[torch.device]=None):
self.model = model
self.model.eval()
self.device = device
if (self.device is None):
self.device = torch.device('cpu')
self.model.to(self.device)
self.waveform_formatter = TemporalFeatureFormatter()
self.weights_formatter = TemporalFeatureFormatter()
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True, device: Optional[torch.device]=None) -> 'SpeakerEmbedding':
emb_model = EmbeddingModel.from_pretrained(model, use_hf_token)
return SpeakerEmbedding(emb_model, device)
def __call__(self, waveform: TemporalFeatures, weights: Optional[TemporalFeatures]=None) -> torch.Tensor:
'\n Calculate speaker embeddings of input audio.\n If weights are given, calculate many speaker embeddings from the same waveform.\n\n Parameters\n ----------\n waveform: TemporalFeatures, shape (samples, channels) or (batch, samples, channels)\n weights: Optional[TemporalFeatures], shape (frames, speakers) or (batch, frames, speakers)\n Per-speaker and per-frame weights. Defaults to no weights.\n\n Returns\n -------\n embeddings: torch.Tensor\n If weights are provided, the shape is (batch, speakers, embedding_dim),\n otherwise the shape is (batch, embedding_dim).\n If batch size == 1, the batch dimension is omitted.\n '
with torch.no_grad():
inputs = self.waveform_formatter.cast(waveform).to(self.device)
inputs = rearrange(inputs, 'batch sample channel -> batch channel sample')
if (weights is not None):
weights = self.weights_formatter.cast(weights).to(self.device)
(batch_size, _, num_speakers) = weights.shape
inputs = inputs.repeat(1, num_speakers, 1)
weights = rearrange(weights, 'batch frame spk -> (batch spk) frame')
inputs = rearrange(inputs, 'batch spk sample -> (batch spk) 1 sample')
output = rearrange(self.model(inputs, weights), '(batch spk) feat -> batch spk feat', batch=batch_size, spk=num_speakers)
else:
output = self.model(inputs)
return output.squeeze().cpu()
|
class OverlappedSpeechPenalty():
'Applies a penalty on overlapping speech and low-confidence regions to speaker segmentation scores.\n\n .. note::\n For more information, see `"Overlap-Aware Low-Latency Online Speaker Diarization\n based on End-to-End Local Segmentation" <https://github.com/juanmc2005/diart/blob/main/paper.pdf>`_\n (Section 2.2.1 Segmentation-driven speaker embedding). This block implements Equation 2.\n\n Parameters\n ----------\n gamma: float, optional\n Exponent to lower low-confidence predictions.\n Defaults to 3.\n beta: float, optional\n Temperature parameter (actually 1/beta) to lower joint speaker activations.\n Defaults to 10.\n normalize: bool, optional\n Whether to min-max normalize weights to be in the range [0, 1].\n Defaults to False.\n '
def __init__(self, gamma: float=3, beta: float=10, normalize: bool=False):
self.gamma = gamma
self.beta = beta
self.formatter = TemporalFeatureFormatter()
self.normalize = normalize
def __call__(self, segmentation: TemporalFeatures) -> TemporalFeatures:
weights = self.formatter.cast(segmentation)
with torch.inference_mode():
weights = F.overlapped_speech_penalty(weights, self.gamma, self.beta)
if self.normalize:
min_values = weights.min(dim=1, keepdim=True).values
max_values = weights.max(dim=1, keepdim=True).values
weights = ((weights - min_values) / (max_values - min_values))
weights.nan_to_num_(1e-08)
return self.formatter.restore_type(weights)
|
class EmbeddingNormalization():
def __init__(self, norm: Union[(float, torch.Tensor)]=1):
self.norm = norm
if (isinstance(self.norm, torch.Tensor) and (self.norm.ndim == 2)):
self.norm = self.norm.unsqueeze(0)
def __call__(self, embeddings: torch.Tensor) -> torch.Tensor:
with torch.inference_mode():
norm_embs = F.normalize_embeddings(embeddings, self.norm)
return norm_embs
|
class OverlapAwareSpeakerEmbedding():
"\n Extract overlap-aware speaker embeddings given an audio chunk and its segmentation.\n\n Parameters\n ----------\n model: EmbeddingModel\n A pre-trained embedding model.\n gamma: float, optional\n Exponent to lower low-confidence predictions.\n Defaults to 3.\n beta: float, optional\n Softmax's temperature parameter (actually 1/beta) to lower joint speaker activations.\n Defaults to 10.\n norm: float or torch.Tensor of shape (batch, speakers, 1) where batch is optional\n The target norm for the embeddings. It can be different for each speaker.\n Defaults to 1.\n normalize_weights: bool, optional\n Whether to min-max normalize embedding weights to be in the range [0, 1].\n device: Optional[torch.device]\n The device on which to run the embedding model.\n Defaults to GPU if available or CPU if not.\n "
def __init__(self, model: EmbeddingModel, gamma: float=3, beta: float=10, norm: Union[(float, torch.Tensor)]=1, normalize_weights: bool=False, device: Optional[torch.device]=None):
self.embedding = SpeakerEmbedding(model, device)
self.osp = OverlappedSpeechPenalty(gamma, beta, normalize_weights)
self.normalize = EmbeddingNormalization(norm)
@staticmethod
def from_pretrained(model, gamma: float=3, beta: float=10, norm: Union[(float, torch.Tensor)]=1, use_hf_token: Union[(Text, bool, None)]=True, normalize_weights: bool=False, device: Optional[torch.device]=None):
model = EmbeddingModel.from_pretrained(model, use_hf_token)
return OverlapAwareSpeakerEmbedding(model, gamma, beta, norm, normalize_weights, device)
def __call__(self, waveform: TemporalFeatures, segmentation: TemporalFeatures) -> torch.Tensor:
return self.normalize(self.embedding(waveform, self.osp(segmentation)))
|
class SpeakerSegmentation():
def __init__(self, model: SegmentationModel, device: Optional[torch.device]=None):
self.model = model
self.model.eval()
self.device = device
if (self.device is None):
self.device = torch.device('cpu')
self.model.to(self.device)
self.formatter = TemporalFeatureFormatter()
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True, device: Optional[torch.device]=None) -> 'SpeakerSegmentation':
seg_model = SegmentationModel.from_pretrained(model, use_hf_token)
return SpeakerSegmentation(seg_model, device)
def __call__(self, waveform: TemporalFeatures) -> TemporalFeatures:
'\n Calculate the speaker segmentation of input audio.\n\n Parameters\n ----------\n waveform: TemporalFeatures, shape (samples, channels) or (batch, samples, channels)\n\n Returns\n -------\n speaker_segmentation: TemporalFeatures, shape (batch, frames, speakers)\n The batch dimension is omitted if waveform is a `SlidingWindowFeature`.\n '
with torch.no_grad():
wave = rearrange(self.formatter.cast(waveform), 'batch sample channel -> batch channel sample')
output = self.model(wave.to(self.device)).cpu()
return self.formatter.restore_type(output)
|
class Binarize():
'\n Transform a speaker segmentation from the discrete-time domain\n into a continuous-time speaker segmentation.\n\n Parameters\n ----------\n threshold: float\n Probability threshold to determine if a speaker is active at a given frame.\n uri: Optional[Text]\n Uri of the audio stream. Defaults to no uri.\n '
def __init__(self, threshold: float, uri: Optional[Text]=None):
self.uri = uri
self.threshold = threshold
def __call__(self, segmentation: SlidingWindowFeature) -> Annotation:
'\n Return the continuous-time segmentation\n corresponding to the discrete-time input segmentation.\n\n Parameters\n ----------\n segmentation: SlidingWindowFeature\n Discrete-time speaker segmentation.\n\n Returns\n -------\n annotation: Annotation\n Continuous-time speaker segmentation.\n '
(num_frames, num_speakers) = segmentation.data.shape
timestamps = segmentation.sliding_window
is_active = (segmentation.data > self.threshold)
is_active = np.append(is_active, [([False] * num_speakers)], axis=0)
start_times = (np.zeros(num_speakers) + timestamps[0].middle)
annotation = Annotation(uri=self.uri, modality='speech')
for t in range(num_frames):
onsets = np.logical_and(np.logical_not(is_active[t]), is_active[(t + 1)])
start_times[onsets] = timestamps[(t + 1)].middle
offsets = np.logical_and(is_active[t], np.logical_not(is_active[(t + 1)]))
for spk in np.where(offsets)[0]:
region = Segment(start_times[spk], timestamps[(t + 1)].middle)
annotation[(region, spk)] = f'speaker{spk}'
return annotation
|
class Resample():
'Dynamically resample audio chunks.\n\n Parameters\n ----------\n sample_rate: int\n Original sample rate of the input audio\n resample_rate: int\n Sample rate of the output\n '
def __init__(self, sample_rate: int, resample_rate: int, device: Optional[torch.device]=None):
self.device = device
if (self.device is None):
self.device = torch.device('cpu')
self.resample = T.Resample(sample_rate, resample_rate).to(self.device)
self.formatter = TemporalFeatureFormatter()
def __call__(self, waveform: TemporalFeatures) -> TemporalFeatures:
wav = self.formatter.cast(waveform).to(self.device)
with torch.no_grad():
resampled_wav = self.resample(wav.transpose((- 1), (- 2))).transpose((- 1), (- 2))
return self.formatter.restore_type(resampled_wav)
|
class AdjustVolume():
'Change the volume of an audio chunk.\n\n Notice that the output volume might be different to avoid saturation.\n\n Parameters\n ----------\n volume_in_db: float\n Target volume in dB.\n '
def __init__(self, volume_in_db: float):
self.target_db = volume_in_db
self.formatter = TemporalFeatureFormatter()
@staticmethod
def get_volumes(waveforms: torch.Tensor) -> torch.Tensor:
'Compute the volumes of a set of audio chunks.\n\n Parameters\n ----------\n waveforms: torch.Tensor\n Audio chunks. Shape (batch, samples, channels).\n\n Returns\n -------\n volumes: torch.Tensor\n Audio chunk volumes per channel. Shape (batch, 1, channels)\n '
return (10 * torch.log10(torch.mean((torch.abs(waveforms) ** 2), dim=1, keepdim=True)))
def __call__(self, waveform: TemporalFeatures) -> TemporalFeatures:
wav = self.formatter.cast(waveform)
with torch.no_grad():
current_volumes = self.get_volumes(wav)
gains = (10 ** ((self.target_db - current_volumes) / 20))
wav = (gains * wav)
maximums = torch.clamp(torch.amax(torch.abs(wav), dim=1, keepdim=True), 1)
wav = (wav / maximums)
return self.formatter.restore_type(wav)
|
class VoiceActivityDetectionConfig(base.PipelineConfig):
def __init__(self, segmentation: (m.SegmentationModel | None)=None, duration: float=5, step: float=0.5, latency: ((float | Literal[('max', 'min')]) | None)=None, tau_active: float=0.6, device: (torch.device | None)=None, sample_rate: int=16000, **kwargs):
self.segmentation = (segmentation or m.SegmentationModel.from_pyannote('pyannote/segmentation'))
self._duration = duration
self._step = step
self._sample_rate = sample_rate
self._latency = latency
if ((self._latency is None) or (self._latency == 'min')):
self._latency = self._step
elif (self._latency == 'max'):
self._latency = self._duration
self.tau_active = tau_active
self.device = (device or torch.device(('cuda' if torch.cuda.is_available() else 'cpu')))
@property
def duration(self) -> float:
return self._duration
@property
def step(self) -> float:
return self._step
@property
def latency(self) -> float:
return self._latency
@property
def sample_rate(self) -> int:
return self._sample_rate
|
class VoiceActivityDetection(base.Pipeline):
def __init__(self, config: (VoiceActivityDetectionConfig | None)=None):
self._config = (VoiceActivityDetectionConfig() if (config is None) else config)
msg = f'Latency should be in the range [{self._config.step}, {self._config.duration}]'
assert (self._config.step <= self._config.latency <= self._config.duration), msg
self.segmentation = SpeakerSegmentation(self._config.segmentation, self._config.device)
self.pred_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='hamming', cropping_mode='loose')
self.audio_aggregation = DelayedAggregation(self._config.step, self._config.latency, strategy='first', cropping_mode='center')
self.binarize = Binarize(self._config.tau_active)
self.timestamp_shift = 0
(self.chunk_buffer, self.pred_buffer) = ([], [])
@staticmethod
def get_config_class() -> type:
return VoiceActivityDetectionConfig
@staticmethod
def suggest_metric() -> BaseMetric:
return DetectionErrorRate(collar=0, skip_overlap=False)
@staticmethod
def hyper_parameters() -> Sequence[base.HyperParameter]:
return [base.TauActive]
@property
def config(self) -> base.PipelineConfig:
return self._config
def reset(self):
self.set_timestamp_shift(0)
(self.chunk_buffer, self.pred_buffer) = ([], [])
def set_timestamp_shift(self, shift: float):
self.timestamp_shift = shift
def __call__(self, waveforms: Sequence[SlidingWindowFeature]) -> Sequence[tuple[(Annotation, SlidingWindowFeature)]]:
batch_size = len(waveforms)
msg = 'Pipeline expected at least 1 input'
assert (batch_size >= 1), msg
batch = torch.stack([torch.from_numpy(w.data) for w in waveforms])
expected_num_samples = int(np.rint((self.config.duration * self.config.sample_rate)))
msg = f'Expected {expected_num_samples} samples per chunk, but got {batch.shape[1]}'
assert (batch.shape[1] == expected_num_samples), msg
segmentations = self.segmentation(batch)
voice_detection = torch.max(segmentations, dim=(- 1), keepdim=True)[0]
seg_resolution = (waveforms[0].extent.duration / segmentations.shape[1])
outputs = []
for (wav, vad) in zip(waveforms, voice_detection):
sw = SlidingWindow(start=wav.extent.start, duration=seg_resolution, step=seg_resolution)
vad = SlidingWindowFeature(vad.cpu().numpy(), sw)
self.chunk_buffer.append(wav)
self.pred_buffer.append(vad)
agg_waveform = self.audio_aggregation(self.chunk_buffer)
agg_prediction = self.pred_aggregation(self.pred_buffer)
agg_prediction = self.binarize(agg_prediction).get_timeline(copy=False)
if (self.timestamp_shift != 0):
shifted_agg_prediction = Timeline(uri=agg_prediction.uri)
for segment in agg_prediction:
new_segment = Segment((segment.start + self.timestamp_shift), (segment.end + self.timestamp_shift))
shifted_agg_prediction.add(new_segment)
agg_prediction = shifted_agg_prediction
agg_prediction = agg_prediction.to_annotation(utils.repeat_label('speech'))
outputs.append((agg_prediction, agg_waveform))
if (len(self.chunk_buffer) == self.pred_aggregation.num_overlapping_windows):
self.chunk_buffer = self.chunk_buffer[1:]
self.pred_buffer = self.pred_buffer[1:]
return outputs
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('root', type=Path, help='Directory with audio files CONVERSATION.(wav|flac|m4a|...)')
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--reference', type=Path, help='Optional. Directory with RTTM files CONVERSATION.rttm. Names must match audio files')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--batch-size', default=32, type=int, help=f'{argdoc.BATCH_SIZE}. Defaults to 32')
parser.add_argument('--num-workers', default=0, type=int, help=f'{argdoc.NUM_WORKERS}. Defaults to 0 (no parallelism)')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--output', type=Path, help=f'{argdoc.OUTPUT}. Defaults to no writing')
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
benchmark = Benchmark(args.root, args.reference, args.output, show_progress=True, show_report=True, batch_size=args.batch_size)
config = pipeline_class.get_config_class()(**vars(args))
if (args.num_workers > 0):
benchmark = Parallelize(benchmark, args.num_workers)
report = benchmark(pipeline_class, config)
if ((args.output is not None) and isinstance(report, pd.DataFrame)):
report.to_csv((args.output / 'benchmark_report.csv'))
|
def send_audio(ws: WebSocket, source: Text, step: float, sample_rate: int):
source_components = source.split(':')
if (source_components[0] != 'microphone'):
audio_source = src.FileAudioSource(source, sample_rate, block_duration=step)
else:
device = (int(source_components[1]) if (len(source_components) > 1) else None)
audio_source = src.MicrophoneAudioSource(step, device)
audio_source.stream.pipe(ops.map(utils.encode_audio)).subscribe_(ws.send)
audio_source.read()
|
def receive_audio(ws: WebSocket, output: Optional[Path]):
while True:
message = ws.recv()
print(f'Received: {message}', end='')
if (output is not None):
with open(output, 'a') as file:
file.write(message)
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('source', type=str, help="Path to an audio file | 'microphone' | 'microphone:<DEVICE_ID>'")
parser.add_argument('--host', required=True, type=str, help='Server host')
parser.add_argument('--port', required=True, type=int, help='Server port')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('-sr', '--sample-rate', default=16000, type=int, help=f'{argdoc.SAMPLE_RATE}. Defaults to 16000')
parser.add_argument('-o', '--output-file', type=Path, help='Output RTTM file. Defaults to no writing')
args = parser.parse_args()
ws = WebSocket()
ws.connect(f'ws://{args.host}:{args.port}')
sender = Thread(target=send_audio, args=[ws, args.source, args.step, args.sample_rate])
receiver = Thread(target=receive_audio, args=[ws, args.output_file])
sender.start()
receiver.start()
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('--host', default='0.0.0.0', type=str, help='Server host')
parser.add_argument('--port', default=7007, type=int, help='Server port')
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--output', type=Path, help=f'{argdoc.OUTPUT}. Defaults to no writing')
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
config = pipeline_class.get_config_class()(**vars(args))
pipeline = pipeline_class(config)
audio_source = src.WebSocketAudioSource(config.sample_rate, args.host, args.port)
inference = StreamingInference(pipeline, audio_source, batch_size=1, do_profile=False, do_plot=False, show_progress=True)
if (args.output is not None):
inference.attach_observers(RTTMWriter(audio_source.uri, (args.output / f'{audio_source.uri}.rttm')))
inference.attach_hooks((lambda ann_wav: audio_source.send(ann_wav[0].to_rttm())))
inference()
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('source', type=str, help="Path to an audio file | 'microphone' | 'microphone:<DEVICE_ID>'")
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--no-plot', dest='no_plot', action='store_true', help='Skip plotting for faster inference')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--output', type=str, help=f"{argdoc.OUTPUT}. Defaults to home directory if SOURCE == 'microphone' or parent directory if SOURCE is a file")
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
config = pipeline_class.get_config_class()(**vars(args))
pipeline = pipeline_class(config)
source_components = args.source.split(':')
if (source_components[0] != 'microphone'):
args.source = Path(args.source).expanduser()
args.output = (args.source.parent if (args.output is None) else Path(args.output))
padding = config.get_file_padding(args.source)
audio_source = src.FileAudioSource(args.source, config.sample_rate, padding, config.step)
pipeline.set_timestamp_shift((- padding[0]))
else:
args.output = (Path('~/').expanduser() if (args.output is None) else Path(args.output))
device = (int(source_components[1]) if (len(source_components) > 1) else None)
audio_source = src.MicrophoneAudioSource(config.step, device)
inference = StreamingInference(pipeline, audio_source, batch_size=1, do_profile=True, do_plot=(not args.no_plot), show_progress=True)
inference.attach_observers(RTTMWriter(audio_source.uri, (args.output / f'{audio_source.uri}.rttm')))
try:
inference()
except KeyboardInterrupt:
pass
|
def run():
parser = argparse.ArgumentParser()
parser.add_argument('root', type=str, help='Directory with audio files CONVERSATION.(wav|flac|m4a|...)')
parser.add_argument('--reference', required=True, type=str, help='Directory with RTTM files CONVERSATION.rttm. Names must match audio files')
parser.add_argument('--pipeline', default='SpeakerDiarization', type=str, help="Class of the pipeline to optimize. Defaults to 'SpeakerDiarization'")
parser.add_argument('--segmentation', default='pyannote/segmentation', type=str, help=f'{argdoc.SEGMENTATION}. Defaults to pyannote/segmentation')
parser.add_argument('--embedding', default='pyannote/embedding', type=str, help=f'{argdoc.EMBEDDING}. Defaults to pyannote/embedding')
parser.add_argument('--duration', type=float, default=5, help=f'{argdoc.DURATION}. Defaults to training segmentation duration')
parser.add_argument('--step', default=0.5, type=float, help=f'{argdoc.STEP}. Defaults to 0.5')
parser.add_argument('--latency', default=0.5, type=float, help=f'{argdoc.LATENCY}. Defaults to 0.5')
parser.add_argument('--tau-active', default=0.5, type=float, help=f'{argdoc.TAU}. Defaults to 0.5')
parser.add_argument('--rho-update', default=0.3, type=float, help=f'{argdoc.RHO}. Defaults to 0.3')
parser.add_argument('--delta-new', default=1, type=float, help=f'{argdoc.DELTA}. Defaults to 1')
parser.add_argument('--gamma', default=3, type=float, help=f'{argdoc.GAMMA}. Defaults to 3')
parser.add_argument('--beta', default=10, type=float, help=f'{argdoc.BETA}. Defaults to 10')
parser.add_argument('--max-speakers', default=20, type=int, help=f'{argdoc.MAX_SPEAKERS}. Defaults to 20')
parser.add_argument('--batch-size', default=32, type=int, help=f'{argdoc.BATCH_SIZE}. Defaults to 32')
parser.add_argument('--cpu', dest='cpu', action='store_true', help=f'{argdoc.CPU}. Defaults to GPU if available, CPU otherwise')
parser.add_argument('--hparams', nargs='+', default=('tau_active', 'rho_update', 'delta_new'), help='Hyper-parameters to optimize. Must match names in `PipelineConfig`. Defaults to tau_active, rho_update and delta_new')
parser.add_argument('--num-iter', default=100, type=int, help='Number of optimization trials')
parser.add_argument('--storage', type=str, help='Optuna storage string. If provided, continue a previous study instead of creating one. The database name must match the study name')
parser.add_argument('--output', type=str, help='Working directory')
parser.add_argument('--hf-token', default='true', type=str, help=f"{argdoc.HF_TOKEN}. Defaults to 'true' (required by pyannote)")
parser.add_argument('--normalize-embedding-weights', action='store_true', help=f'{argdoc.NORMALIZE_EMBEDDING_WEIGHTS}. Defaults to False')
args = parser.parse_args()
args.device = (torch.device('cpu') if args.cpu else None)
hf_token = utils.parse_hf_token_arg(args.hf_token)
args.segmentation = m.SegmentationModel.from_pretrained(args.segmentation, hf_token)
args.embedding = m.EmbeddingModel.from_pretrained(args.embedding, hf_token)
pipeline_class = utils.get_pipeline_class(args.pipeline)
base_config = pipeline_class.get_config_class()(**vars(args))
possible_hparams = pipeline_class.hyper_parameters()
hparams = [HyperParameter.from_name(name) for name in args.hparams]
hparams = [hp for hp in hparams if (hp in possible_hparams)]
if (not hparams):
print(f"No hyper-parameters to optimize. Make sure to select one of: {', '.join([hp.name for hp in possible_hparams])}")
exit(1)
if (args.output is not None):
msg = 'Both `output` and `storage` were set, but only one was expected'
assert (args.storage is None), msg
args.output = Path(args.output).expanduser()
args.output.mkdir(parents=True, exist_ok=True)
study_or_path = args.output
elif (args.storage is not None):
db_name = Path(args.storage).stem
study_or_path = optuna.load_study(db_name, args.storage, TPESampler())
else:
msg = 'Please provide either `output` or `storage`'
raise ValueError(msg)
Optimizer(pipeline_class=pipeline_class, speech_path=args.root, reference_path=args.reference, study_or_path=study_or_path, batch_size=args.batch_size, hparams=hparams, base_config=base_config)(num_iter=args.num_iter, show_progress=True)
|
class TemporalFeatureFormatterState(ABC):
'\n Represents the recorded type of a temporal feature formatter.\n Its job is to transform temporal features into tensors and\n recover the original format on other features.\n '
@abstractmethod
def to_tensor(self, features: TemporalFeatures) -> torch.Tensor:
pass
@abstractmethod
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
'\n Cast `features` to the representing type and remove batch dimension if required.\n\n Parameters\n ----------\n features: torch.Tensor, shape (batch, frames, dim)\n Batched temporal features.\n Returns\n -------\n new_features: SlidingWindowFeature or numpy.ndarray or torch.Tensor, shape (batch, frames, dim)\n '
pass
|
class SlidingWindowFeatureFormatterState(TemporalFeatureFormatterState):
def __init__(self, duration: float):
self.duration = duration
self._cur_start_time = 0
def to_tensor(self, features: SlidingWindowFeature) -> torch.Tensor:
msg = 'Features sliding window duration and step must be equal'
assert (features.sliding_window.duration == features.sliding_window.step), msg
self._cur_start_time = features.sliding_window.start
return torch.from_numpy(features.data)
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
(batch_size, num_frames, _) = features.shape
assert (batch_size == 1), 'Batched SlidingWindowFeature objects are not supported'
resolution = (self.duration / num_frames)
resolution = SlidingWindow(start=self._cur_start_time, duration=resolution, step=resolution)
return SlidingWindowFeature(features.squeeze(dim=0).cpu().numpy(), resolution)
|
class NumpyArrayFormatterState(TemporalFeatureFormatterState):
def to_tensor(self, features: np.ndarray) -> torch.Tensor:
return torch.from_numpy(features)
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
return features.cpu().numpy()
|
class PytorchTensorFormatterState(TemporalFeatureFormatterState):
def to_tensor(self, features: torch.Tensor) -> torch.Tensor:
return features
def to_internal_type(self, features: torch.Tensor) -> TemporalFeatures:
return features
|
class TemporalFeatureFormatter():
'\n Manages the typing and format of temporal features.\n When casting temporal features as torch.Tensor, it remembers its\n type and format so it can lately restore it on other temporal features.\n '
def __init__(self):
self.state: Optional[TemporalFeatureFormatterState] = None
def set_state(self, features: TemporalFeatures):
if isinstance(features, SlidingWindowFeature):
msg = 'Features sliding window duration and step must be equal'
assert (features.sliding_window.duration == features.sliding_window.step), msg
self.state = SlidingWindowFeatureFormatterState((features.data.shape[0] * features.sliding_window.duration))
elif isinstance(features, np.ndarray):
self.state = NumpyArrayFormatterState()
elif isinstance(features, torch.Tensor):
self.state = PytorchTensorFormatterState()
else:
msg = 'Unknown format. Provide one of SlidingWindowFeature, numpy.ndarray, torch.Tensor'
raise ValueError(msg)
def cast(self, features: TemporalFeatures) -> torch.Tensor:
'\n Transform features into a `torch.Tensor` and add batch dimension if missing.\n\n Parameters\n ----------\n features: SlidingWindowFeature or numpy.ndarray or torch.Tensor\n Shape (frames, dim) or (batch, frames, dim)\n\n Returns\n -------\n features: torch.Tensor, shape (batch, frames, dim)\n '
self.set_state(features)
data = self.state.to_tensor(features)
msg = 'Temporal features must be 2D or 3D'
assert (data.ndim in (2, 3)), msg
if (data.ndim == 2):
data = data.unsqueeze(0)
return data.float()
def restore_type(self, features: torch.Tensor) -> TemporalFeatures:
'\n Cast `features` to the internal type and remove batch dimension if required.\n\n Parameters\n ----------\n features: torch.Tensor, shape (batch, frames, dim)\n Batched temporal features.\n Returns\n -------\n new_features: SlidingWindowFeature or numpy.ndarray or torch.Tensor, shape (batch, frames, dim)\n '
return self.state.to_internal_type(features)
|
def overlapped_speech_penalty(segmentation: torch.Tensor, gamma: float=3, beta: float=10):
probs = torch.softmax((beta * segmentation), dim=(- 1))
weights = (torch.pow(segmentation, gamma) * torch.pow(probs, gamma))
weights[(weights < 1e-08)] = 1e-08
return weights
|
def normalize_embeddings(embeddings: torch.Tensor, norm: (float | torch.Tensor)=1) -> torch.Tensor:
if (embeddings.ndim == 2):
embeddings = embeddings.unsqueeze(0)
if isinstance(norm, torch.Tensor):
(batch_size1, num_speakers1, _) = norm.shape
(batch_size2, num_speakers2, _) = embeddings.shape
assert ((batch_size1 == batch_size2) and (num_speakers1 == num_speakers2))
emb_norm = torch.norm(embeddings, p=2, dim=(- 1), keepdim=True)
return ((norm * embeddings) / emb_norm)
|
class StreamingInference():
"Performs inference in real time given a pipeline and an audio source.\n Streams an audio source to an online speaker diarization pipeline.\n It allows users to attach a chain of operations in the form of hooks.\n\n Parameters\n ----------\n pipeline: StreamingPipeline\n Configured speaker diarization pipeline.\n source: AudioSource\n Audio source to be read and streamed.\n batch_size: int\n Number of inputs to send to the pipeline at once.\n Defaults to 1.\n do_profile: bool\n If True, compute and report the processing time of the pipeline.\n Defaults to True.\n do_plot: bool\n If True, draw predictions in a moving plot.\n Defaults to False.\n show_progress: bool\n If True, show a progress bar.\n Defaults to True.\n progress_bar: Optional[diart.progress.ProgressBar]\n Progress bar.\n If description is not provided, set to 'Streaming <source uri>'.\n Defaults to RichProgressBar().\n "
def __init__(self, pipeline: blocks.Pipeline, source: src.AudioSource, batch_size: int=1, do_profile: bool=True, do_plot: bool=False, show_progress: bool=True, progress_bar: Optional[ProgressBar]=None):
self.pipeline = pipeline
self.source = source
self.batch_size = batch_size
self.do_profile = do_profile
self.do_plot = do_plot
self.show_progress = show_progress
self.accumulator = PredictionAccumulator(self.source.uri)
self.unit = ('chunk' if (self.batch_size == 1) else 'batch')
self._observers = []
chunk_duration = self.pipeline.config.duration
step_duration = self.pipeline.config.step
sample_rate = self.pipeline.config.sample_rate
self.num_chunks = None
if (self.source.duration is not None):
numerator = ((self.source.duration - chunk_duration) + step_duration)
self.num_chunks = int(np.ceil((numerator / step_duration)))
self._pbar = progress_bar
if self.show_progress:
if (self._pbar is None):
self._pbar = RichProgressBar()
self._pbar.create(total=self.num_chunks, description=f'Streaming {self.source.uri}', unit=self.unit)
self._chrono = utils.Chronometer(self.unit, self._pbar)
self.stream = self.source.stream
self.stream = self.stream.pipe(dops.rearrange_audio_stream(chunk_duration, step_duration, source.sample_rate))
if (sample_rate != self.source.sample_rate):
msg = f"Audio source has sample rate {self.source.sample_rate}, but pipeline's is {sample_rate}. Will resample."
logging.warning(msg)
self.stream = self.stream.pipe(ops.map(blocks.Resample(self.source.sample_rate, sample_rate, self.pipeline.config.device)))
self.stream = self.stream.pipe(ops.buffer_with_count(count=self.batch_size))
if self.do_profile:
self.stream = self.stream.pipe(ops.do_action(on_next=(lambda _: self._chrono.start())), ops.map(self.pipeline), ops.do_action(on_next=(lambda _: self._chrono.stop())))
else:
self.stream = self.stream.pipe(ops.map(self.pipeline))
self.stream = self.stream.pipe(ops.flat_map((lambda results: rx.from_iterable(results))), ops.do(self.accumulator))
if show_progress:
self.stream = self.stream.pipe(ops.do_action(on_next=(lambda _: self._pbar.update())))
def _close_pbar(self):
if (self._pbar is not None):
self._pbar.close()
def _close_chronometer(self):
if self.do_profile:
if self._chrono.is_running:
self._chrono.stop(do_count=False)
self._chrono.report()
def attach_hooks(self, *hooks: Callable[([Tuple[(Annotation, SlidingWindowFeature)]], None)]):
'Attach hooks to the pipeline.\n\n Parameters\n ----------\n *hooks: (Tuple[Annotation, SlidingWindowFeature]) -> None\n Hook functions to consume emitted annotations and audio.\n '
self.stream = self.stream.pipe(*[ops.do_action(hook) for hook in hooks])
def attach_observers(self, *observers: Observer):
'Attach rx observers to the pipeline.\n\n Parameters\n ----------\n *observers: Observer\n Observers to consume emitted annotations and audio.\n '
self.stream = self.stream.pipe(*[ops.do(sink) for sink in observers])
self._observers.extend(observers)
def _handle_error(self, error: BaseException):
for sink in self._observers:
sink.on_error(error)
self.source.close()
window_closed = isinstance(error, WindowClosedException)
interrupted = isinstance(error, KeyboardInterrupt)
if ((not window_closed) and (not interrupted)):
print_exc()
self._close_pbar()
self._close_chronometer()
def _handle_completion(self):
self._close_pbar()
self._close_chronometer()
def __call__(self) -> Annotation:
'Stream audio chunks from `source` to `pipeline`.\n\n Returns\n -------\n predictions: Annotation\n Speaker diarization pipeline predictions\n '
if self.show_progress:
self._pbar.start()
config = self.pipeline.config
observable = self.stream
if self.do_plot:
observable = self.stream.pipe(dops.buffer_output(duration=config.duration, step=config.step, latency=config.latency, sample_rate=config.sample_rate), ops.do(StreamingPlot(config.duration, config.latency)))
observable.subscribe(on_error=self._handle_error, on_completed=self._handle_completion)
self.source.read()
return self.accumulator.get_prediction()
|
class Benchmark():
'\n Run an online speaker diarization pipeline on a set of audio files in batches.\n Write predictions to a given output directory.\n\n If the reference is given, calculate the average diarization error rate.\n\n Parameters\n ----------\n speech_path: Text or Path\n Directory with audio files.\n reference_path: Text, Path or None\n Directory with reference RTTM files (same names as audio files).\n If None, performance will not be calculated.\n Defaults to None.\n output_path: Text, Path or None\n Output directory to store predictions in RTTM format.\n If None, predictions will not be written to disk.\n Defaults to None.\n show_progress: bool\n Whether to show progress bars.\n Defaults to True.\n show_report: bool\n Whether to print a performance report to stdout.\n Defaults to True.\n batch_size: int\n Inference batch size.\n If < 2, then it will run in real time.\n If >= 2, then it will pre-calculate segmentation and\n embeddings, running the rest in real time.\n The performance between this two modes does not differ.\n Defaults to 32.\n '
def __init__(self, speech_path: Union[(Text, Path)], reference_path: Optional[Union[(Text, Path)]]=None, output_path: Optional[Union[(Text, Path)]]=None, show_progress: bool=True, show_report: bool=True, batch_size: int=32):
self.speech_path = Path(speech_path).expanduser()
assert self.speech_path.is_dir(), 'Speech path must be a directory'
msg = 'Benchmark expected reference path, output path or both'
assert ((reference_path is not None) or (output_path is not None)), msg
self.reference_path = reference_path
if (reference_path is not None):
self.reference_path = Path(self.reference_path).expanduser()
assert self.reference_path.is_dir(), 'Reference path must be a directory'
self.output_path = output_path
if (self.output_path is not None):
self.output_path = Path(output_path).expanduser()
self.output_path.mkdir(parents=True, exist_ok=True)
self.show_progress = show_progress
self.show_report = show_report
self.batch_size = batch_size
def get_file_paths(self) -> List[Path]:
'Return the path for each file in the benchmark.\n\n Returns\n -------\n paths: List[Path]\n List of audio file paths.\n '
return list(self.speech_path.iterdir())
def run_single(self, pipeline: blocks.Pipeline, filepath: Path, progress_bar: ProgressBar) -> Annotation:
'Run a given pipeline on a given file.\n Note that this method does NOT reset the\n state of the pipeline before execution.\n\n Parameters\n ----------\n pipeline: StreamingPipeline\n Speaker diarization pipeline to run.\n filepath: Path\n Path to the target file.\n progress_bar: diart.progress.ProgressBar\n An object to manage the progress of this run.\n\n Returns\n -------\n prediction: Annotation\n Pipeline prediction for the given file.\n '
padding = pipeline.config.get_file_padding(filepath)
source = src.FileAudioSource(filepath, pipeline.config.sample_rate, padding, pipeline.config.step)
pipeline.set_timestamp_shift((- padding[0]))
inference = StreamingInference(pipeline, source, self.batch_size, do_profile=False, do_plot=False, show_progress=self.show_progress, progress_bar=progress_bar)
pred = inference()
pred.uri = source.uri
if (self.output_path is not None):
with open((self.output_path / f'{source.uri}.rttm'), 'w') as out_file:
pred.write_rttm(out_file)
return pred
def evaluate(self, predictions: List[Annotation], metric: BaseMetric) -> Union[(pd.DataFrame, List[Annotation])]:
'If a reference path was provided,\n compute the diarization error rate of a list of predictions.\n\n Parameters\n ----------\n predictions: List[Annotation]\n Predictions to evaluate.\n metric: BaseMetric\n Evaluation metric from pyannote.metrics.\n\n Returns\n -------\n report_or_predictions: Union[pd.DataFrame, List[Annotation]]\n A performance report as a pandas `DataFrame` if a\n reference path was given. Otherwise return the same predictions.\n '
if (self.reference_path is not None):
progress_bar = TQDMProgressBar(f'Computing {metric.name}', leave=False)
progress_bar.create(total=len(predictions), unit='file')
progress_bar.start()
for hyp in predictions:
ref = load_rttm((self.reference_path / f'{hyp.uri}.rttm')).popitem()[1]
metric(ref, hyp)
progress_bar.update()
progress_bar.close()
return metric.report(display=self.show_report)
return predictions
def __call__(self, pipeline_class: type, config: blocks.PipelineConfig, metric: Optional[BaseMetric]=None) -> Union[(pd.DataFrame, List[Annotation])]:
"Run a given pipeline on a set of audio files.\n The internal state of the pipeline is reset before benchmarking.\n\n Parameters\n ----------\n pipeline_class: class\n Class from the StreamingPipeline hierarchy.\n A pipeline from this class will be instantiated by each worker.\n config: StreamingConfig\n Streaming pipeline configuration.\n metric: Optional[BaseMetric]\n Evaluation metric from pyannote.metrics.\n Defaults to the pipeline's suggested metric (see `StreamingPipeline.suggest_metric()`)\n\n Returns\n -------\n performance: pandas.DataFrame or List[Annotation]\n If reference annotations are given, a DataFrame with detailed\n performance on each file as well as average performance.\n\n If no reference annotations, a list of predictions.\n "
audio_file_paths = self.get_file_paths()
num_audio_files = len(audio_file_paths)
pipeline = pipeline_class(config)
predictions = []
for (i, filepath) in enumerate(audio_file_paths):
pipeline.reset()
desc = f'Streaming {filepath.stem} ({(i + 1)}/{num_audio_files})'
progress = TQDMProgressBar(desc, leave=False, do_close=True)
predictions.append(self.run_single(pipeline, filepath, progress))
metric = (pipeline.suggest_metric() if (metric is None) else metric)
return self.evaluate(predictions, metric)
|
class Parallelize():
'Wrapper to parallelize the execution of a `Benchmark` instance.\n Note that models will be copied in each worker instead of being reused.\n\n Parameters\n ----------\n benchmark: Benchmark\n Benchmark instance to execute in parallel.\n num_workers: int\n Number of parallel workers.\n Defaults to 0 (no parallelism).\n '
def __init__(self, benchmark: Benchmark, num_workers: int=4):
self.benchmark = benchmark
self.num_workers = num_workers
def run_single_job(self, pipeline_class: type, config: blocks.PipelineConfig, filepath: Path, description: Text) -> Annotation:
'Build and run a pipeline on a single file.\n Configure execution to show progress alongside parallel runs.\n\n Parameters\n ----------\n pipeline_class: class\n Class from the StreamingPipeline hierarchy.\n A pipeline from this class will be instantiated.\n config: StreamingConfig\n Streaming pipeline configuration.\n filepath: Path\n Path to the target file.\n description: Text\n Description to show in the parallel progress bar.\n\n Returns\n -------\n prediction: Annotation\n Pipeline prediction for the given file.\n '
idx_process = (int(current_process().name.split('-')[1]) - 1)
pipeline = pipeline_class(config)
progress = TQDMProgressBar(description, leave=False, position=idx_process, do_close=True)
return self.benchmark.run_single(pipeline, filepath, progress)
def __call__(self, pipeline_class: type, config: blocks.PipelineConfig, metric: Optional[BaseMetric]=None) -> Union[(pd.DataFrame, List[Annotation])]:
"Run a given pipeline on a set of audio files in parallel.\n Each worker will build and run the pipeline on a different file.\n\n Parameters\n ----------\n pipeline_class: class\n Class from the StreamingPipeline hierarchy.\n A pipeline from this class will be instantiated by each worker.\n config: StreamingConfig\n Streaming pipeline configuration.\n metric: Optional[BaseMetric]\n Evaluation metric from pyannote.metrics.\n Defaults to the pipeline's suggested metric (see `StreamingPipeline.suggest_metric()`)\n\n Returns\n -------\n performance: pandas.DataFrame or List[Annotation]\n If reference annotations are given, a DataFrame with detailed\n performance on each file as well as average performance.\n\n If no reference annotations, a list of predictions.\n "
audio_file_paths = self.benchmark.get_file_paths()
num_audio_files = len(audio_file_paths)
try:
torch.multiprocessing.set_start_method('spawn')
except RuntimeError:
pass
freeze_support()
pool = Pool(processes=self.num_workers, initargs=(RLock(),), initializer=tqdm.set_lock)
arg_list = [(pipeline_class, config, filepath, f'Streaming {filepath.stem} ({(i + 1)}/{num_audio_files})') for (i, filepath) in enumerate(audio_file_paths)]
jobs = [pool.apply_async(self.run_single_job, args=args) for args in arg_list]
pool.close()
predictions = [job.get() for job in jobs]
metric = (pipeline_class.suggest_metric() if (metric is None) else metric)
return self.benchmark.evaluate(predictions, metric)
|
class PowersetAdapter(nn.Module):
def __init__(self, segmentation_model: nn.Module):
super().__init__()
self.model = segmentation_model
specs = self.model.specifications
max_speakers_per_frame = specs.powerset_max_classes
max_speakers_per_chunk = len(specs.classes)
self.powerset = Powerset(max_speakers_per_chunk, max_speakers_per_frame)
def forward(self, waveform: torch.Tensor) -> torch.Tensor:
return self.powerset.to_multilabel(self.model(waveform))
|
class PyannoteLoader():
def __init__(self, model_info, hf_token: Union[(Text, bool, None)]=True):
super().__init__()
self.model_info = model_info
self.hf_token = hf_token
def __call__(self) -> Callable:
try:
model = Model.from_pretrained(self.model_info, use_auth_token=self.hf_token)
specs = getattr(model, 'specifications', None)
if ((specs is not None) and specs.powerset):
model = PowersetAdapter(model)
return model
except HTTPError:
pass
except ModuleNotFoundError:
pass
return PretrainedSpeakerEmbedding(self.model_info, use_auth_token=self.hf_token)
|
class ONNXLoader():
def __init__(self, path: (str | Path), input_names: List[str], output_name: str):
super().__init__()
self.path = Path(path)
self.input_names = input_names
self.output_name = output_name
def __call__(self) -> ONNXModel:
return ONNXModel(self.path, self.input_names, self.output_name)
|
class ONNXModel():
def __init__(self, path: Path, input_names: List[str], output_name: str):
super().__init__()
self.path = path
self.input_names = input_names
self.output_name = output_name
self.device = torch.device('cpu')
self.session = None
self.recreate_session()
@property
def execution_provider(self) -> str:
device = ('CUDA' if (self.device.type == 'cuda') else 'CPU')
return f'{device}ExecutionProvider'
def recreate_session(self):
options = ort.SessionOptions()
options.graph_optimization_level = ort.GraphOptimizationLevel.ORT_ENABLE_ALL
self.session = ort.InferenceSession(self.path, sess_options=options, providers=[self.execution_provider])
def to(self, device: torch.device) -> ONNXModel:
if (device.type != self.device.type):
self.device = device
self.recreate_session()
return self
def __call__(self, *args) -> torch.Tensor:
inputs = {name: arg.cpu().numpy().astype(np.float32) for (name, arg) in zip(self.input_names, args)}
output = self.session.run([self.output_name], inputs)[0]
return torch.from_numpy(output).float().to(args[0].device)
|
class LazyModel(ABC):
def __init__(self, loader: Callable[([], Callable)]):
super().__init__()
self.get_model = loader
self.model: Optional[Callable] = None
def is_in_memory(self) -> bool:
'Return whether the model has been loaded into memory'
return (self.model is not None)
def load(self):
if (not self.is_in_memory()):
self.model = self.get_model()
def to(self, device: torch.device) -> LazyModel:
self.load()
self.model = self.model.to(device)
return self
def __call__(self, *args, **kwargs):
self.load()
return self.model(*args, **kwargs)
def eval(self) -> LazyModel:
self.load()
if isinstance(self.model, nn.Module):
self.model.eval()
return self
|
class SegmentationModel(LazyModel):
'\n Minimal interface for a segmentation model.\n '
@staticmethod
def from_pyannote(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'SegmentationModel':
'\n Returns a `SegmentationModel` wrapping a pyannote model.\n\n Parameters\n ----------\n model: pyannote.PipelineModel\n The pyannote.audio model to fetch.\n use_hf_token: str | bool, optional\n The Huggingface access token to use when downloading the model.\n If True, use huggingface-cli login token.\n Defaults to None.\n\n Returns\n -------\n wrapper: SegmentationModel\n '
assert IS_PYANNOTE_AVAILABLE, 'No pyannote.audio installation found'
return SegmentationModel(PyannoteLoader(model, use_hf_token))
@staticmethod
def from_onnx(model_path: Union[(str, Path)], input_name: str='waveform', output_name: str='segmentation') -> 'SegmentationModel':
assert IS_ONNX_AVAILABLE, 'No ONNX installation found'
return SegmentationModel(ONNXLoader(model_path, [input_name], output_name))
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'SegmentationModel':
if (isinstance(model, str) or isinstance(model, Path)):
if Path(model).name.endswith('.onnx'):
return SegmentationModel.from_onnx(model)
return SegmentationModel.from_pyannote(model, use_hf_token)
def __call__(self, waveform: torch.Tensor) -> torch.Tensor:
'\n Call the forward pass of the segmentation model.\n Parameters\n ----------\n waveform: torch.Tensor, shape (batch, channels, samples)\n Returns\n -------\n speaker_segmentation: torch.Tensor, shape (batch, frames, speakers)\n '
return super().__call__(waveform)
|
class EmbeddingModel(LazyModel):
'Minimal interface for an embedding model.'
@staticmethod
def from_pyannote(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'EmbeddingModel':
'\n Returns an `EmbeddingModel` wrapping a pyannote model.\n\n Parameters\n ----------\n model: pyannote.PipelineModel\n The pyannote.audio model to fetch.\n use_hf_token: str | bool, optional\n The Huggingface access token to use when downloading the model.\n If True, use huggingface-cli login token.\n Defaults to None.\n\n Returns\n -------\n wrapper: EmbeddingModel\n '
assert IS_PYANNOTE_AVAILABLE, 'No pyannote.audio installation found'
loader = PyannoteLoader(model, use_hf_token)
return EmbeddingModel(loader)
@staticmethod
def from_onnx(model_path: Union[(str, Path)], input_names: (List[str] | None)=None, output_name: str='embedding') -> 'EmbeddingModel':
assert IS_ONNX_AVAILABLE, 'No ONNX installation found'
input_names = (input_names or ['waveform', 'weights'])
loader = ONNXLoader(model_path, input_names, output_name)
return EmbeddingModel(loader)
@staticmethod
def from_pretrained(model, use_hf_token: Union[(Text, bool, None)]=True) -> 'EmbeddingModel':
if (isinstance(model, str) or isinstance(model, Path)):
if Path(model).name.endswith('.onnx'):
return EmbeddingModel.from_onnx(model)
return EmbeddingModel.from_pyannote(model, use_hf_token)
def __call__(self, waveform: torch.Tensor, weights: Optional[torch.Tensor]=None) -> torch.Tensor:
'\n Call the forward pass of an embedding model with optional weights.\n Parameters\n ----------\n waveform: torch.Tensor, shape (batch, channels, samples)\n weights: Optional[torch.Tensor], shape (batch, frames)\n Temporal weights for each sample in the batch. Defaults to no weights.\n Returns\n -------\n speaker_embeddings: torch.Tensor, shape (batch, embedding_dim)\n '
embeddings = super().__call__(waveform, weights)
if isinstance(embeddings, np.ndarray):
embeddings = torch.from_numpy(embeddings)
return embeddings
|
class Optimizer():
def __init__(self, pipeline_class: type, speech_path: Union[(Text, Path)], reference_path: Union[(Text, Path)], study_or_path: Union[(FilePath, Study)], batch_size: int=32, hparams: Optional[Sequence[blocks.base.HyperParameter]]=None, base_config: Optional[blocks.PipelineConfig]=None, do_kickstart_hparams: bool=True, metric: Optional[BaseMetric]=None, direction: Literal[('minimize', 'maximize')]='minimize'):
self.pipeline_class = pipeline_class
self.benchmark = Benchmark(speech_path, reference_path, show_progress=True, show_report=False, batch_size=batch_size)
self.metric = metric
self.direction = direction
self.base_config = base_config
self.do_kickstart_hparams = do_kickstart_hparams
if (self.base_config is None):
self.base_config = self.pipeline_class.get_config_class()()
self.do_kickstart_hparams = False
self.hparams = hparams
if (self.hparams is None):
self.hparams = self.pipeline_class.hyper_parameters()
possible_hparams = vars(self.base_config)
for param in self.hparams:
msg = f'Hyper-parameter {param.name} not found in configuration {self.base_config.__class__.__name__}'
assert (param.name in possible_hparams), msg
self._progress: Optional[tqdm] = None
if isinstance(study_or_path, Study):
self.study = study_or_path
elif (isinstance(study_or_path, str) or isinstance(study_or_path, Path)):
study_or_path = Path(study_or_path)
self.study = create_study(storage=('sqlite:///' + str((study_or_path / f'{study_or_path.stem}.db'))), sampler=TPESampler(), study_name=study_or_path.stem, direction=self.direction, load_if_exists=True)
else:
msg = f'Expected Study object or path-like, but got {type(study_or_path).__name__}'
raise ValueError(msg)
@property
def best_performance(self):
return self.study.best_value
@property
def best_hparams(self):
return self.study.best_params
def _callback(self, study: Study, trial: FrozenTrial):
if (self._progress is None):
return
self._progress.update(1)
self._progress.set_description(f'Trial {(trial.number + 1)}')
values = {'best_perf': study.best_value}
for (name, value) in study.best_params.items():
values[f'best_{name}'] = value
self._progress.set_postfix(OrderedDict(values))
def objective(self, trial: Trial) -> float:
trial_config = vars(self.base_config)
for hparam in self.hparams:
trial_config[hparam.name] = trial.suggest_uniform(hparam.name, hparam.low, hparam.high)
if trial.should_prune():
raise TrialPruned()
config = self.base_config.__class__(**trial_config)
metric = self.metric
if (metric is None):
metric = self.pipeline_class.suggest_metric()
report = self.benchmark(self.pipeline_class, config, metric)
return report.loc[('TOTAL', metric.name)]['%']
def __call__(self, num_iter: int, show_progress: bool=True):
self._progress = None
if show_progress:
self._progress = trange(num_iter)
last_trial = (- 1)
if self.study.trials:
last_trial = self.study.trials[(- 1)].number
self._progress.set_description(f'Trial {(last_trial + 1)}')
if self.do_kickstart_hparams:
self.study.enqueue_trial({param.name: getattr(self.base_config, param.name) for param in self.hparams}, skip_if_exists=True)
self.study.optimize(self.objective, num_iter, callbacks=[self._callback])
|
class ProgressBar(ABC):
@abstractmethod
def create(self, total: int, description: Optional[Text]=None, unit: Text='it', **kwargs):
pass
@abstractmethod
def start(self):
pass
@abstractmethod
def update(self, n: int=1):
pass
@abstractmethod
def write(self, text: Text):
pass
@abstractmethod
def stop(self):
pass
@abstractmethod
def close(self):
pass
@property
@abstractmethod
def default_description(self) -> Text:
pass
@property
@abstractmethod
def initial_description(self) -> Optional[Text]:
pass
def resolve_description(self, new_description: Optional[Text]=None) -> Text:
if (self.initial_description is None):
if (new_description is None):
return self.default_description
return new_description
else:
return self.initial_description
|
class RichProgressBar(ProgressBar):
def __init__(self, description: Optional[Text]=None, color: Text='green', leave: bool=True, do_close: bool=True):
self.description = description
self.color = color
self.do_close = do_close
self.bar = Progress(transient=(not leave))
self.bar.start()
self.task_id: Optional[TaskID] = None
@property
def default_description(self) -> Text:
return f'[{self.color}]Streaming'
@property
def initial_description(self) -> Optional[Text]:
if (self.description is not None):
return f'[{self.color}]{self.description}'
return self.description
def create(self, total: int, description: Optional[Text]=None, unit: Text='it', **kwargs):
if (self.task_id is None):
self.task_id = self.bar.add_task(self.resolve_description(f'[{self.color}]{description}'), start=False, total=total, completed=0, visible=True, **kwargs)
def start(self):
assert (self.task_id is not None)
self.bar.start_task(self.task_id)
def update(self, n: int=1):
assert (self.task_id is not None)
self.bar.update(self.task_id, advance=n)
def write(self, text: Text):
rich.print(text)
def stop(self):
assert (self.task_id is not None)
self.bar.stop_task(self.task_id)
def close(self):
if self.do_close:
self.bar.stop()
|
class TQDMProgressBar(ProgressBar):
def __init__(self, description: Optional[Text]=None, leave: bool=True, position: Optional[int]=None, do_close: bool=True):
self.description = description
self.leave = leave
self.position = position
self.do_close = do_close
self.pbar: Optional[tqdm] = None
@property
def default_description(self) -> Text:
return 'Streaming'
@property
def initial_description(self) -> Optional[Text]:
return self.description
def create(self, total: int, description: Optional[Text]=None, unit: Optional[Text]='it', **kwargs):
if (self.pbar is None):
self.pbar = tqdm(desc=self.resolve_description(description), total=total, unit=unit, leave=self.leave, position=self.position, **kwargs)
def start(self):
pass
def update(self, n: int=1):
assert (self.pbar is not None)
self.pbar.update(n)
def write(self, text: Text):
tqdm.write(text)
def stop(self):
self.close()
def close(self):
if self.do_close:
assert (self.pbar is not None)
self.pbar.close()
|
class WindowClosedException(Exception):
pass
|
def _extract_prediction(value: Union[(Tuple, Annotation)]) -> Annotation:
if isinstance(value, tuple):
return value[0]
if isinstance(value, Annotation):
return value
msg = f'Expected tuple or Annotation, but got {type(value)}'
raise ValueError(msg)
|
class RTTMWriter(Observer):
def __init__(self, uri: Text, path: Union[(Path, Text)], patch_collar: float=0.05):
super().__init__()
self.uri = uri
self.patch_collar = patch_collar
self.path = Path(path).expanduser()
if self.path.exists():
self.path.unlink()
def patch(self):
'Stitch same-speaker turns that are close to each other'
if (not self.path.exists()):
return
annotations = list(load_rttm(self.path).values())
if annotations:
annotation = annotations[0]
annotation.uri = self.uri
with open(self.path, 'w') as file:
annotation.support(self.patch_collar).write_rttm(file)
def on_next(self, value: Union[(Tuple, Annotation)]):
prediction = _extract_prediction(value)
prediction.uri = self.uri
with open(self.path, 'a') as file:
prediction.write_rttm(file)
def on_error(self, error: Exception):
self.patch()
def on_completed(self):
self.patch()
|
class PredictionAccumulator(Observer):
def __init__(self, uri: Optional[Text]=None, patch_collar: float=0.05):
super().__init__()
self.uri = uri
self.patch_collar = patch_collar
self._prediction: Optional[Annotation] = None
def patch(self):
'Stitch same-speaker turns that are close to each other'
if (self._prediction is not None):
self._prediction = self._prediction.support(self.patch_collar)
def get_prediction(self) -> Annotation:
self.patch()
return self._prediction
def on_next(self, value: Union[(Tuple, Annotation)]):
prediction = _extract_prediction(value)
prediction.uri = self.uri
if (self._prediction is None):
self._prediction = prediction
else:
self._prediction.update(prediction)
def on_error(self, error: Exception):
self.patch()
def on_completed(self):
self.patch()
|
class StreamingPlot(Observer):
def __init__(self, duration: float, latency: float, visualization: Literal[('slide', 'accumulate')]='slide', reference: Optional[Union[(Path, Text)]]=None):
super().__init__()
assert (visualization in ['slide', 'accumulate'])
self.visualization = visualization
self.reference = reference
if (self.reference is not None):
self.reference = list(load_rttm(reference).values())[0]
self.window_duration = duration
self.latency = latency
(self.figure, self.axs, self.num_axs) = (None, None, (- 1))
self.window_closed = False
def _on_window_closed(self, event):
self.window_closed = True
def _init_num_axs(self):
if (self.num_axs == (- 1)):
self.num_axs = 2
if (self.reference is not None):
self.num_axs += 1
def _init_figure(self):
self._init_num_axs()
(self.figure, self.axs) = plt.subplots(self.num_axs, 1, figsize=(10, (2 * self.num_axs)))
if (self.num_axs == 1):
self.axs = [self.axs]
self.figure.canvas.mpl_connect('close_event', self._on_window_closed)
def _clear_axs(self):
for i in range(self.num_axs):
self.axs[i].clear()
def get_plot_bounds(self, real_time: float) -> Segment:
start_time = 0
end_time = (real_time - self.latency)
if (self.visualization == 'slide'):
start_time = max(0.0, (end_time - self.window_duration))
return Segment(start_time, end_time)
def on_next(self, values: Tuple[(Annotation, SlidingWindowFeature, float)]):
if self.window_closed:
raise WindowClosedException
(prediction, waveform, real_time) = values
if (self.figure is None):
self._init_figure()
self._clear_axs()
notebook.crop = self.get_plot_bounds(real_time)
if (self.reference is not None):
metric = DiarizationErrorRate()
mapping = metric.optimal_mapping(self.reference, prediction)
prediction.rename_labels(mapping=mapping, copy=False)
notebook.plot_annotation(prediction, self.axs[0])
self.axs[0].set_title('Output')
notebook.plot_feature(waveform, self.axs[1])
self.axs[1].set_title('Audio')
if (self.num_axs == 3):
notebook.plot_annotation(self.reference, self.axs[2])
self.axs[2].set_title('Reference')
plt.tight_layout()
self.figure.canvas.draw()
self.figure.canvas.flush_events()
plt.pause(0.05)
|
class Chronometer():
def __init__(self, unit: Text, progress_bar: Optional[ProgressBar]=None):
self.unit = unit
self.progress_bar = progress_bar
self.current_start_time = None
self.history = []
@property
def is_running(self):
return (self.current_start_time is not None)
def start(self):
self.current_start_time = time.monotonic()
def stop(self, do_count: bool=True):
msg = 'No start time available, Did you call stop() before start()?'
assert (self.current_start_time is not None), msg
end_time = (time.monotonic() - self.current_start_time)
self.current_start_time = None
if do_count:
self.history.append(end_time)
def report(self):
print_fn = print
if (self.progress_bar is not None):
print_fn = self.progress_bar.write
print_fn(f'Took {np.mean(self.history).item():.3f} (+/-{np.std(self.history).item():.3f}) seconds/{self.unit} -- ran {len(self.history)} times')
|
def parse_hf_token_arg(hf_token: Union[(bool, Text)]) -> Union[(bool, Text)]:
if isinstance(hf_token, bool):
return hf_token
if (hf_token.lower() == 'true'):
return True
if (hf_token.lower() == 'false'):
return False
return hf_token
|
def encode_audio(waveform: np.ndarray) -> Text:
data = waveform.astype(np.float32).tobytes()
return base64.b64encode(data).decode('utf-8')
|
def decode_audio(data: Text) -> np.ndarray:
byte_samples = base64.decodebytes(data.encode('utf-8'))
samples = np.frombuffer(byte_samples, dtype=np.float32)
return samples.reshape(1, (- 1))
|
def get_padding_left(stream_duration: float, chunk_duration: float) -> float:
if (stream_duration < chunk_duration):
return (chunk_duration - stream_duration)
return 0
|
def repeat_label(label: Text):
while True:
(yield label)
|
def get_pipeline_class(class_name: Text) -> type:
pipeline_class = getattr(blocks, class_name, None)
msg = f"Pipeline '{class_name}' doesn't exist"
assert (pipeline_class is not None), msg
return pipeline_class
|
def get_padding_right(latency: float, step: float) -> float:
return (latency - step)
|
def visualize_feature(duration: Optional[float]=None):
def apply(feature: SlidingWindowFeature):
if (duration is None):
notebook.crop = feature.extent
else:
notebook.crop = Segment((feature.extent.end - duration), feature.extent.end)
plt.rcParams['figure.figsize'] = (8, 2)
notebook.plot_feature(feature)
plt.tight_layout()
plt.show()
return apply
|
def visualize_annotation(duration: Optional[float]=None):
def apply(annotation: Annotation):
extent = annotation.get_timeline().extent()
if (duration is None):
notebook.crop = extent
else:
notebook.crop = Segment((extent.end - duration), extent.end)
plt.rcParams['figure.figsize'] = (8, 2)
notebook.plot_annotation(annotation)
plt.tight_layout()
plt.show()
return apply
|
class MultiHeadAttn(nn.Module):
def __init__(self, dim_q, dim_k, dim_v, dim_out, num_heads=8):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.fc_q = nn.Linear(dim_q, dim_out, bias=False)
self.fc_k = nn.Linear(dim_k, dim_out, bias=False)
self.fc_v = nn.Linear(dim_v, dim_out, bias=False)
self.fc_out = nn.Linear(dim_out, dim_out)
self.ln1 = nn.LayerNorm(dim_out)
self.ln2 = nn.LayerNorm(dim_out)
def scatter(self, x):
return torch.cat(x.chunk(self.num_heads, (- 1)), (- 3))
def gather(self, x):
return torch.cat(x.chunk(self.num_heads, (- 3)), (- 1))
def attend(self, q, k, v, mask=None):
(q_, k_, v_) = [self.scatter(x) for x in [q, k, v]]
A_logits = ((q_ @ k_.transpose((- 2), (- 1))) / math.sqrt(self.dim_out))
if (mask is not None):
mask = mask.bool().to(q.device)
mask = torch.stack(([mask] * q.shape[(- 2)]), (- 2))
mask = torch.cat(([mask] * self.num_heads), (- 3))
A = torch.softmax(A_logits.masked_fill(mask, (- float('inf'))), (- 1))
A = A.masked_fill(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, (- 1))
return self.gather((A @ v_))
def forward(self, q, k, v, mask=None):
(q, k, v) = (self.fc_q(q), self.fc_k(k), self.fc_v(v))
out = self.ln1((q + self.attend(q, k, v, mask=mask)))
out = self.ln2((out + F.relu(self.fc_out(out))))
return out
|
class SelfAttn(MultiHeadAttn):
def __init__(self, dim_in, dim_out, num_heads=8):
super().__init__(dim_in, dim_in, dim_in, dim_out, num_heads)
def forward(self, x, mask=None):
return super().forward(x, x, x, mask=mask)
|
def build_mlp(dim_in, dim_hid, dim_out, depth):
modules = [nn.Linear(dim_in, dim_hid), nn.ReLU(True)]
for _ in range((depth - 2)):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, dim_out))
return nn.Sequential(*modules)
|
class PoolingEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=False, pre_depth=4, post_depth=2):
super().__init__()
self.use_lat = (dim_lat is not None)
self.net_pre = (build_mlp((dim_x + dim_y), dim_hid, dim_hid, pre_depth) if (not self_attn) else nn.Sequential(build_mlp((dim_x + dim_y), dim_hid, dim_hid, (pre_depth - 2)), nn.ReLU(True), SelfAttn(dim_hid, dim_hid)))
self.net_post = build_mlp(dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid), post_depth)
def forward(self, xc, yc, mask=None):
out = self.net_pre(torch.cat([xc, yc], (- 1)))
if (mask is None):
out = out.mean((- 2))
else:
mask = mask.to(xc.device)
out = ((out * mask.unsqueeze((- 1))).sum((- 2)) / (mask.sum((- 1), keepdim=True).detach() + 1e-05))
if self.use_lat:
(mu, sigma) = self.net_post(out).chunk(2, (- 1))
sigma = (0.1 + (0.9 * torch.sigmoid(sigma)))
return Normal(mu, sigma)
else:
return self.net_post(out)
|
class CrossAttnEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=True, v_depth=4, qk_depth=2):
super().__init__()
self.use_lat = (dim_lat is not None)
if (not self_attn):
self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, v_depth)
else:
self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, (v_depth - 2))
self.self_attn = SelfAttn(dim_hid, dim_hid)
self.net_qk = build_mlp(dim_x, dim_hid, dim_hid, qk_depth)
self.attn = MultiHeadAttn(dim_hid, dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid))
def forward(self, xc, yc, xt, mask=None):
(q, k) = (self.net_qk(xt), self.net_qk(xc))
v = self.net_v(torch.cat([xc, yc], (- 1)))
if hasattr(self, 'self_attn'):
v = self.self_attn(v, mask=mask)
out = self.attn(q, k, v, mask=mask)
if self.use_lat:
(mu, sigma) = out.chunk(2, (- 1))
sigma = (0.1 + (0.9 * torch.sigmoid(sigma)))
return Normal(mu, sigma)
else:
return out
|
class Decoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_enc=128, dim_hid=128, depth=3):
super().__init__()
self.fc = nn.Linear((dim_x + dim_enc), dim_hid)
self.dim_hid = dim_hid
modules = [nn.ReLU(True)]
for _ in range((depth - 2)):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, (2 * dim_y)))
self.mlp = nn.Sequential(*modules)
def add_ctx(self, dim_ctx):
self.dim_ctx = dim_ctx
self.fc_ctx = nn.Linear(dim_ctx, self.dim_hid, bias=False)
def forward(self, encoded, x, ctx=None):
packed = torch.cat([encoded, x], (- 1))
hid = self.fc(packed)
if (ctx is not None):
hid = (hid + self.fc_ctx(ctx))
out = self.mlp(hid)
(mu, sigma) = out.chunk(2, (- 1))
sigma = (0.1 + (0.9 * F.softplus(sigma)))
return Normal(mu, sigma)
|
def get_logger(filename, mode='a'):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(filename, mode=mode))
return logger
|
class RunningAverage(object):
def __init__(self, *keys):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
for key in keys:
self.sum[key] = 0
self.cnt[key] = 0
def update(self, key, val):
if isinstance(val, torch.Tensor):
val = val.item()
if (self.sum.get(key, None) is None):
self.sum[key] = val
self.cnt[key] = 1
else:
self.sum[key] = (self.sum[key] + val)
self.cnt[key] += 1
def reset(self):
for key in self.sum.keys():
self.sum[key] = 0
self.cnt[key] = 0
self.clock = time.time()
def clear(self):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
def keys(self):
return self.sum.keys()
def get(self, key):
assert (self.sum.get(key, None) is not None)
return (self.sum[key] / self.cnt[key])
def info(self, show_et=True):
line = ''
for key in self.sum.keys():
val = (self.sum[key] / self.cnt[key])
if (type(val) == float):
line += f'{key} {val:.4f} '
else:
line += f'{key} {val} '.format(key, val)
if show_et:
line += f'({(time.time() - self.clock):.3f} secs)'
return line
|
def gen_load_func(parser, func):
def load(args, cmdline):
(sub_args, cmdline) = parser.parse_known_args(cmdline)
for (k, v) in sub_args.__dict__.items():
args.__dict__[k] = v
return (func(**sub_args.__dict__), cmdline)
return load
|
def load_module(filename):
module_name = os.path.splitext(os.path.basename(filename))[0]
return SourceFileLoader(module_name, filename).load_module()
|
def logmeanexp(x, dim=0):
return (x.logsumexp(dim) - math.log(x.shape[dim]))
|
def stack(x, num_samples=None, dim=0):
return (x if (num_samples is None) else torch.stack(([x] * num_samples), dim=dim))
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'eval', 'plot', 'ensemble'], default='train')
parser.add_argument('--expid', type=str, default='trial')
parser.add_argument('--resume', action='store_true', default=False)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--max_num_points', type=int, default=200)
parser.add_argument('--model', type=str, default='cnp')
parser.add_argument('--train_batch_size', type=int, default=100)
parser.add_argument('--train_num_samples', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0005)
parser.add_argument('--num_epochs', type=int, default=200)
parser.add_argument('--eval_freq', type=int, default=10)
parser.add_argument('--save_freq', type=int, default=10)
parser.add_argument('--eval_seed', type=int, default=42)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--eval_num_samples', type=int, default=50)
parser.add_argument('--eval_logfile', type=str, default=None)
parser.add_argument('--plot_seed', type=int, default=None)
parser.add_argument('--plot_batch_size', type=int, default=16)
parser.add_argument('--plot_num_samples', type=int, default=30)
parser.add_argument('--plot_num_ctx', type=int, default=100)
parser.add_argument('--t_noise', type=float, default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper())
with open(f'configs/celeba/{args.model}.yaml', 'r') as f:
config = yaml.safe_load(f)
model = model_cls(**config).cuda()
args.root = osp.join(results_path, 'celeba', args.model, args.expid)
if (args.mode == 'train'):
train(args, model)
elif (args.mode == 'eval'):
eval(args, model)
elif (args.mode == 'plot'):
plot(args, model)
elif (args.mode == 'ensemble'):
ensemble(args, model)
|
def train(args, model):
if (not osp.isdir(args.root)):
os.makedirs(args.root)
with open(osp.join(args.root, 'args.yaml'), 'w') as f:
yaml.dump(args.__dict__, f)
train_ds = CelebA(train=True)
eval_ds = CelebA(train=False)
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=args.train_batch_size, shuffle=True, num_workers=4)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(len(train_loader) * args.num_epochs))
if args.resume:
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
optimizer.load_state_dict(ckpt.optimizer)
scheduler.load_state_dict(ckpt.scheduler)
logfilename = ckpt.logfilename
start_epoch = ckpt.epoch
else:
logfilename = osp.join(args.root, 'train_{}.log'.format(time.strftime('%Y%m%d-%H%M')))
start_epoch = 1
logger = get_logger(logfilename)
ravg = RunningAverage()
if (not args.resume):
logger.info('Total number of parameters: {}\n'.format(sum((p.numel() for p in model.parameters()))))
for epoch in range(start_epoch, (args.num_epochs + 1)):
model.train()
for (x, _) in tqdm(train_loader):
batch = img_to_task(x, max_num_points=args.max_num_points, device='cuda')
optimizer.zero_grad()
outs = model(batch, num_samples=args.train_num_samples)
outs.loss.backward()
optimizer.step()
scheduler.step()
for (key, val) in outs.items():
ravg.update(key, val)
line = f'{args.model}:{args.expid} epoch {epoch} '
line += f"lr {optimizer.param_groups[0]['lr']:.3e} "
line += ravg.info()
logger.info(line)
if ((epoch % args.eval_freq) == 0):
logger.info((eval(args, model) + '\n'))
ravg.reset()
if (((epoch % args.save_freq) == 0) or (epoch == args.num_epochs)):
ckpt = AttrDict()
ckpt.model = model.state_dict()
ckpt.optimizer = optimizer.state_dict()
ckpt.scheduler = scheduler.state_dict()
ckpt.logfilename = logfilename
ckpt.epoch = (epoch + 1)
torch.save(ckpt, osp.join(args.root, 'ckpt.tar'))
args.mode = 'eval'
eval(args, model)
|
def gen_evalset(args):
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
eval_ds = CelebA(train=False)
eval_loader = torch.utils.data.DataLoader(eval_ds, batch_size=args.eval_batch_size, shuffle=False, num_workers=4)
batches = []
for (x, _) in tqdm(eval_loader):
batches.append(img_to_task(x, t_noise=args.t_noise, max_num_points=args.max_num_points))
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
path = osp.join(evalsets_path, 'celeba')
if (not osp.isdir(path)):
os.makedirs(path)
filename = ('no_noise.tar' if (args.t_noise is None) else f'{args.t_noise}.tar')
torch.save(batches, osp.join(path, filename))
|
def eval(args, model):
if (args.mode == 'eval'):
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
if (args.eval_logfile is None):
eval_logfile = f'eval'
if (args.t_noise is not None):
eval_logfile += f'_{args.t_noise}'
eval_logfile += '.log'
else:
eval_logfile = args.eval_logfile
filename = osp.join(args.root, eval_logfile)
logger = get_logger(filename, mode='w')
else:
logger = None
path = osp.join(evalsets_path, 'celeba')
if (not osp.isdir(path)):
os.makedirs(path)
filename = ('no_noise.tar' if (args.t_noise is None) else f'{args.t_noise}.tar')
if (not osp.isfile(osp.join(path, filename))):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
ravg = RunningAverage()
model.eval()
with torch.no_grad():
for batch in tqdm(eval_batches):
for (key, val) in batch.items():
batch[key] = val.cuda()
outs = model(batch, num_samples=args.eval_num_samples)
for (key, val) in outs.items():
ravg.update(key, val)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
line = f'{args.model}:{args.expid} '
if (args.t_noise is not None):
line += f'tn {args.t_noise} '
line += ravg.info()
if (logger is not None):
logger.info(line)
return line
|
def ensemble(args, model):
num_runs = 5
models = []
for i in range(num_runs):
model_ = deepcopy(model)
ckpt = torch.load(osp.join(results_path, 'celeba', args.model, f'run{(i + 1)}', 'ckpt.tar'))
model_.load_state_dict(ckpt['model'])
model_.cuda()
model_.eval()
models.append(model_)
path = osp.join(evalsets_path, 'celeba')
if (not osp.isdir(path)):
os.makedirs(path)
filename = ('no_noise.tar' if (args.t_noise is None) else f'{args.t_noise}.tar')
if (not osp.isfile(osp.join(path, filename))):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
ravg = RunningAverage()
with torch.no_grad():
for batch in tqdm(eval_batches):
for (key, val) in batch.items():
batch[key] = val.cuda()
ctx_ll = []
tar_ll = []
for model in models:
outs = model(batch, num_samples=args.eval_num_samples, reduce_ll=False)
ctx_ll.append(outs.ctx_ll)
tar_ll.append(outs.tar_ll)
if (ctx_ll[0].dim() == 2):
ctx_ll = torch.stack(ctx_ll)
tar_ll = torch.stack(tar_ll)
else:
ctx_ll = torch.cat(ctx_ll)
tar_ll = torch.cat(tar_ll)
ctx_ll = logmeanexp(ctx_ll).mean()
tar_ll = logmeanexp(tar_ll).mean()
ravg.update('ctx_ll', ctx_ll)
ravg.update('tar_ll', tar_ll)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
filename = f'ensemble'
if (args.t_noise is not None):
filename += f'_{args.t_noise}'
filename += '.log'
logger = get_logger(osp.join(results_path, 'celeba', args.model, filename), mode='w')
logger.info(ravg.info())
|
class CelebA(object):
def __init__(self, train=True):
(self.data, self.targets) = torch.load(osp.join(datasets_path, 'celeba', ('train.pt' if train else 'eval.pt')))
self.data = (self.data.float() / 255.0)
if train:
(self.data, self.targets) = (self.data, self.targets)
else:
(self.data, self.targets) = (self.data, self.targets)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
return (self.data[index], self.targets[index])
|
class EMNIST(tvds.EMNIST):
def __init__(self, train=True, class_range=[0, 47], device='cpu', download=True):
super().__init__(datasets_path, train=train, split='balanced', download=download)
self.data = self.data.unsqueeze(1).float().div(255).transpose((- 1), (- 2)).to(device)
self.targets = self.targets.to(device)
idxs = []
for c in range(class_range[0], class_range[1]):
idxs.append(torch.where((self.targets == c))[0])
idxs = torch.cat(idxs)
self.data = self.data[idxs]
self.targets = self.targets[idxs]
def __getitem__(self, idx):
return (self.data[idx], self.targets[idx])
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['train', 'eval', 'plot', 'ensemble'], default='train')
parser.add_argument('--expid', type=str, default='trial')
parser.add_argument('--resume', action='store_true', default=False)
parser.add_argument('--gpu', type=str, default='0')
parser.add_argument('--max_num_points', type=int, default=200)
parser.add_argument('--class_range', type=int, nargs='*', default=[0, 10])
parser.add_argument('--model', type=str, default='cnp')
parser.add_argument('--train_batch_size', type=int, default=100)
parser.add_argument('--train_num_samples', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0005)
parser.add_argument('--num_epochs', type=int, default=200)
parser.add_argument('--eval_freq', type=int, default=10)
parser.add_argument('--save_freq', type=int, default=10)
parser.add_argument('--eval_seed', type=int, default=42)
parser.add_argument('--eval_batch_size', type=int, default=16)
parser.add_argument('--eval_num_samples', type=int, default=50)
parser.add_argument('--eval_logfile', type=str, default=None)
parser.add_argument('--plot_seed', type=int, default=None)
parser.add_argument('--plot_batch_size', type=int, default=16)
parser.add_argument('--plot_num_samples', type=int, default=30)
parser.add_argument('--plot_num_ctx', type=int, default=100)
parser.add_argument('--t_noise', type=float, default=None)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
model_cls = getattr(load_module(f'models/{args.model}.py'), args.model.upper())
with open(f'configs/emnist/{args.model}.yaml', 'r') as f:
config = yaml.safe_load(f)
model = model_cls(**config).cuda()
args.root = osp.join(results_path, 'emnist', args.model, args.expid)
if (args.mode == 'train'):
train(args, model)
elif (args.mode == 'eval'):
eval(args, model)
elif (args.mode == 'plot'):
plot(args, model)
elif (args.mode == 'ensemble'):
ensemble(args, model)
|
def train(args, model):
if (not osp.isdir(args.root)):
os.makedirs(args.root)
with open(osp.join(args.root, 'args.yaml'), 'w') as f:
yaml.dump(args.__dict__, f)
train_ds = EMNIST(train=True, class_range=args.class_range)
eval_ds = EMNIST(train=False, class_range=args.class_range)
train_loader = torch.utils.data.DataLoader(train_ds, batch_size=args.train_batch_size, shuffle=True, num_workers=4)
optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=(len(train_loader) * args.num_epochs))
if args.resume:
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
optimizer.load_state_dict(ckpt.optimizer)
scheduler.load_state_dict(ckpt.scheduler)
logfilename = ckpt.logfilename
start_epoch = ckpt.epoch
else:
logfilename = osp.join(args.root, 'train_{}.log'.format(time.strftime('%Y%m%d-%H%M')))
start_epoch = 1
logger = get_logger(logfilename)
ravg = RunningAverage()
if (not args.resume):
logger.info('Total number of parameters: {}\n'.format(sum((p.numel() for p in model.parameters()))))
for epoch in range(start_epoch, (args.num_epochs + 1)):
model.train()
for (x, _) in tqdm(train_loader):
batch = img_to_task(x, max_num_points=args.max_num_points, device='cuda')
optimizer.zero_grad()
outs = model(batch, num_samples=args.train_num_samples)
outs.loss.backward()
optimizer.step()
scheduler.step()
for (key, val) in outs.items():
ravg.update(key, val)
line = f'{args.model}:{args.expid} epoch {epoch} '
line += f"lr {optimizer.param_groups[0]['lr']:.3e} "
line += ravg.info()
logger.info(line)
if ((epoch % args.eval_freq) == 0):
logger.info((eval(args, model) + '\n'))
ravg.reset()
if (((epoch % args.save_freq) == 0) or (epoch == args.num_epochs)):
ckpt = AttrDict()
ckpt.model = model.state_dict()
ckpt.optimizer = optimizer.state_dict()
ckpt.scheduler = scheduler.state_dict()
ckpt.logfilename = logfilename
ckpt.epoch = (epoch + 1)
torch.save(ckpt, osp.join(args.root, 'ckpt.tar'))
args.mode = 'eval'
eval(args, model)
|
def gen_evalset(args):
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
eval_ds = EMNIST(train=False, class_range=args.class_range)
eval_loader = torch.utils.data.DataLoader(eval_ds, batch_size=args.eval_batch_size, shuffle=False, num_workers=4)
batches = []
for (x, _) in tqdm(eval_loader):
batches.append(img_to_task(x, t_noise=args.t_noise, max_num_points=args.max_num_points))
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
path = osp.join(evalsets_path, 'emnist')
if (not osp.isdir(path)):
os.makedirs(path)
(c1, c2) = args.class_range
filename = f'{c1}-{c2}'
if (args.t_noise is not None):
filename += f'_{args.t_noise}'
filename += '.tar'
torch.save(batches, osp.join(path, filename))
|
def eval(args, model):
if (args.mode == 'eval'):
ckpt = torch.load(osp.join(args.root, 'ckpt.tar'))
model.load_state_dict(ckpt.model)
if (args.eval_logfile is None):
(c1, c2) = args.class_range
eval_logfile = f'eval_{c1}-{c2}'
if (args.t_noise is not None):
eval_logfile += f'_{args.t_noise}'
eval_logfile += '.log'
else:
eval_logfile = args.eval_logfile
filename = osp.join(args.root, eval_logfile)
logger = get_logger(filename, mode='w')
else:
logger = None
path = osp.join(evalsets_path, 'emnist')
(c1, c2) = args.class_range
filename = f'{c1}-{c2}'
if (args.t_noise is not None):
filename += f'_{args.t_noise}'
filename += '.tar'
if (not osp.isfile(osp.join(path, filename))):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
torch.manual_seed(args.eval_seed)
torch.cuda.manual_seed(args.eval_seed)
ravg = RunningAverage()
model.eval()
with torch.no_grad():
for batch in tqdm(eval_batches):
for (key, val) in batch.items():
batch[key] = val.cuda()
outs = model(batch, num_samples=args.eval_num_samples)
for (key, val) in outs.items():
ravg.update(key, val)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
(c1, c2) = args.class_range
line = f'{args.model}:{args.expid} {c1}-{c2} '
if (args.t_noise is not None):
line += f'tn {args.t_noise} '
line += ravg.info()
if (logger is not None):
logger.info(line)
return line
|
def ensemble(args, model):
num_runs = 5
models = []
for i in range(num_runs):
model_ = deepcopy(model)
ckpt = torch.load(osp.join(results_path, 'emnist', args.model, f'run{(i + 1)}', 'ckpt.tar'))
model_.load_state_dict(ckpt['model'])
model_.cuda()
model_.eval()
models.append(model_)
path = osp.join(evalsets_path, 'emnist')
(c1, c2) = args.class_range
filename = f'{c1}-{c2}'
if (args.t_noise is not None):
filename += f'_{args.t_noise}'
filename += '.tar'
if (not osp.isfile(osp.join(path, filename))):
print('generating evaluation sets...')
gen_evalset(args)
eval_batches = torch.load(osp.join(path, filename))
ravg = RunningAverage()
with torch.no_grad():
for batch in tqdm(eval_batches):
for (key, val) in batch.items():
batch[key] = val.cuda()
ctx_ll = []
tar_ll = []
for model in models:
outs = model(batch, num_samples=args.eval_num_samples, reduce_ll=False)
ctx_ll.append(outs.ctx_ll)
tar_ll.append(outs.tar_ll)
if (ctx_ll[0].dim() == 2):
ctx_ll = torch.stack(ctx_ll)
tar_ll = torch.stack(tar_ll)
else:
ctx_ll = torch.cat(ctx_ll)
tar_ll = torch.cat(tar_ll)
ctx_ll = logmeanexp(ctx_ll).mean()
tar_ll = logmeanexp(tar_ll).mean()
ravg.update('ctx_ll', ctx_ll)
ravg.update('tar_ll', tar_ll)
torch.manual_seed(time.time())
torch.cuda.manual_seed(time.time())
filename = f'ensemble_{c1}-{c2}'
if (args.t_noise is not None):
filename += f'_{args.t_noise}'
filename += '.log'
logger = get_logger(osp.join(results_path, 'emnist', args.model, filename), mode='w')
logger.info(ravg.info())
|
class MultiHeadAttn(nn.Module):
def __init__(self, dim_q, dim_k, dim_v, dim_out, num_heads=8):
super().__init__()
self.num_heads = num_heads
self.dim_out = dim_out
self.fc_q = nn.Linear(dim_q, dim_out, bias=False)
self.fc_k = nn.Linear(dim_k, dim_out, bias=False)
self.fc_v = nn.Linear(dim_v, dim_out, bias=False)
self.fc_out = nn.Linear(dim_out, dim_out)
self.ln1 = nn.LayerNorm(dim_out)
self.ln2 = nn.LayerNorm(dim_out)
def scatter(self, x):
return torch.cat(x.chunk(self.num_heads, (- 1)), (- 3))
def gather(self, x):
return torch.cat(x.chunk(self.num_heads, (- 3)), (- 1))
def attend(self, q, k, v, mask=None):
(q_, k_, v_) = [self.scatter(x) for x in [q, k, v]]
A_logits = ((q_ @ k_.transpose((- 2), (- 1))) / math.sqrt(self.dim_out))
if (mask is not None):
mask = mask.bool().to(q.device)
mask = torch.stack(([mask] * q.shape[(- 2)]), (- 2))
mask = torch.cat(([mask] * self.num_heads), (- 3))
A = torch.softmax(A_logits.masked_fill(mask, (- float('inf'))), (- 1))
A = A.masked_fill(torch.isnan(A), 0.0)
else:
A = torch.softmax(A_logits, (- 1))
return self.gather((A @ v_))
def forward(self, q, k, v, mask=None):
(q, k, v) = (self.fc_q(q), self.fc_k(k), self.fc_v(v))
out = self.ln1((q + self.attend(q, k, v, mask=mask)))
out = self.ln2((out + F.relu(self.fc_out(out))))
return out
|
class SelfAttn(MultiHeadAttn):
def __init__(self, dim_in, dim_out, num_heads=8):
super().__init__(dim_in, dim_in, dim_in, dim_out, num_heads)
def forward(self, x, mask=None):
return super().forward(x, x, x, mask=mask)
|
def build_mlp(dim_in, dim_hid, dim_out, depth):
modules = [nn.Linear(dim_in, dim_hid), nn.ReLU(True)]
for _ in range((depth - 2)):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, dim_out))
return nn.Sequential(*modules)
|
class PoolingEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=False, pre_depth=4, post_depth=2):
super().__init__()
self.use_lat = (dim_lat is not None)
self.net_pre = (build_mlp((dim_x + dim_y), dim_hid, dim_hid, pre_depth) if (not self_attn) else nn.Sequential(build_mlp((dim_x + dim_y), dim_hid, dim_hid, (pre_depth - 2)), nn.ReLU(True), SelfAttn(dim_hid, dim_hid)))
self.net_post = build_mlp(dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid), post_depth)
def forward(self, xc, yc, mask=None):
out = self.net_pre(torch.cat([xc, yc], (- 1)))
if (mask is None):
out = out.mean((- 2))
else:
mask = mask.to(xc.device)
out = ((out * mask.unsqueeze((- 1))).sum((- 2)) / (mask.sum((- 1), keepdim=True).detach() + 1e-05))
if self.use_lat:
(mu, sigma) = self.net_post(out).chunk(2, (- 1))
sigma = (0.1 + (0.9 * torch.sigmoid(sigma)))
return Normal(mu, sigma)
else:
return self.net_post(out)
|
class CrossAttnEncoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_hid=128, dim_lat=None, self_attn=True, v_depth=4, qk_depth=2):
super().__init__()
self.use_lat = (dim_lat is not None)
if (not self_attn):
self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, v_depth)
else:
self.net_v = build_mlp((dim_x + dim_y), dim_hid, dim_hid, (v_depth - 2))
self.self_attn = SelfAttn(dim_hid, dim_hid)
self.net_qk = build_mlp(dim_x, dim_hid, dim_hid, qk_depth)
self.attn = MultiHeadAttn(dim_hid, dim_hid, dim_hid, ((2 * dim_lat) if self.use_lat else dim_hid))
def forward(self, xc, yc, xt, mask=None):
(q, k) = (self.net_qk(xt), self.net_qk(xc))
v = self.net_v(torch.cat([xc, yc], (- 1)))
if hasattr(self, 'self_attn'):
v = self.self_attn(v, mask=mask)
out = self.attn(q, k, v, mask=mask)
if self.use_lat:
(mu, sigma) = out.chunk(2, (- 1))
sigma = (0.1 + (0.9 * torch.sigmoid(sigma)))
return Normal(mu, sigma)
else:
return out
|
class Decoder(nn.Module):
def __init__(self, dim_x=1, dim_y=1, dim_enc=128, dim_hid=128, depth=3):
super().__init__()
self.fc = nn.Linear((dim_x + dim_enc), dim_hid)
self.dim_hid = dim_hid
modules = [nn.ReLU(True)]
for _ in range((depth - 2)):
modules.append(nn.Linear(dim_hid, dim_hid))
modules.append(nn.ReLU(True))
modules.append(nn.Linear(dim_hid, (2 * dim_y)))
self.mlp = nn.Sequential(*modules)
def add_ctx(self, dim_ctx):
self.dim_ctx = dim_ctx
self.fc_ctx = nn.Linear(dim_ctx, self.dim_hid, bias=False)
def forward(self, encoded, x, ctx=None):
packed = torch.cat([encoded, x], (- 1))
hid = self.fc(packed)
if (ctx is not None):
hid = (hid + self.fc_ctx(ctx))
out = self.mlp(hid)
(mu, sigma) = out.chunk(2, (- 1))
sigma = (0.1 + (0.9 * F.softplus(sigma)))
return Normal(mu, sigma)
|
def get_logger(filename, mode='a'):
logging.basicConfig(level=logging.INFO, format='%(message)s')
logger = logging.getLogger()
logger.addHandler(logging.FileHandler(filename, mode=mode))
return logger
|
class RunningAverage(object):
def __init__(self, *keys):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
for key in keys:
self.sum[key] = 0
self.cnt[key] = 0
def update(self, key, val):
if isinstance(val, torch.Tensor):
val = val.item()
if (self.sum.get(key, None) is None):
self.sum[key] = val
self.cnt[key] = 1
else:
self.sum[key] = (self.sum[key] + val)
self.cnt[key] += 1
def reset(self):
for key in self.sum.keys():
self.sum[key] = 0
self.cnt[key] = 0
self.clock = time.time()
def clear(self):
self.sum = OrderedDict()
self.cnt = OrderedDict()
self.clock = time.time()
def keys(self):
return self.sum.keys()
def get(self, key):
assert (self.sum.get(key, None) is not None)
return (self.sum[key] / self.cnt[key])
def info(self, show_et=True):
line = ''
for key in self.sum.keys():
val = (self.sum[key] / self.cnt[key])
if (type(val) == float):
line += f'{key} {val:.4f} '
else:
line += f'{key} {val} '.format(key, val)
if show_et:
line += f'({(time.time() - self.clock):.3f} secs)'
return line
|
def gen_load_func(parser, func):
def load(args, cmdline):
(sub_args, cmdline) = parser.parse_known_args(cmdline)
for (k, v) in sub_args.__dict__.items():
args.__dict__[k] = v
return (func(**sub_args.__dict__), cmdline)
return load
|
def load_module(filename):
module_name = os.path.splitext(os.path.basename(filename))[0]
return SourceFileLoader(module_name, filename).load_module()
|
def logmeanexp(x, dim=0):
return (x.logsumexp(dim) - math.log(x.shape[dim]))
|
def stack(x, num_samples=None, dim=0):
return (x if (num_samples is None) else torch.stack(([x] * num_samples), dim=dim))
|
class SetTransformer(nn.Module):
def __init__(self, dim_input=3, num_outputs=1, dim_output=40, num_inds=32, dim_hidden=128, num_heads=4, ln=False):
super(SetTransformer, self).__init__()
self.enc = nn.Sequential(ISAB(dim_input, dim_hidden, num_heads, num_inds, ln=ln), ISAB(dim_hidden, dim_hidden, num_heads, num_inds, ln=ln))
self.dec = nn.Sequential(nn.Dropout(), PMA(dim_hidden, num_heads, num_outputs, ln=ln), nn.Dropout(), nn.Linear(dim_hidden, dim_output))
def forward(self, X):
return self.dec(self.enc(X)).squeeze()
|
def gen_data(batch_size, max_length=10, test=False):
length = np.random.randint(1, (max_length + 1))
x = np.random.randint(1, 100, (batch_size, length))
y = np.max(x, axis=1)
(x, y) = (np.expand_dims(x, axis=2), np.expand_dims(y, axis=1))
return (x, y)
|
class SmallDeepSet(nn.Module):
def __init__(self, pool='max'):
super().__init__()
self.enc = nn.Sequential(nn.Linear(in_features=1, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=64))
self.dec = nn.Sequential(nn.Linear(in_features=64, out_features=64), nn.ReLU(), nn.Linear(in_features=64, out_features=1))
self.pool = pool
def forward(self, x):
x = self.enc(x)
if (self.pool == 'max'):
x = x.max(dim=1)[0]
elif (self.pool == 'mean'):
x = x.mean(dim=1)
elif (self.pool == 'sum'):
x = x.sum(dim=1)
x = self.dec(x)
return x
|
class SmallSetTransformer(nn.Module):
def __init__(self):
super().__init__()
self.enc = nn.Sequential(SAB(dim_in=1, dim_out=64, num_heads=4), SAB(dim_in=64, dim_out=64, num_heads=4))
self.dec = nn.Sequential(PMA(dim=64, num_heads=4, num_seeds=1), nn.Linear(in_features=64, out_features=1))
def forward(self, x):
x = self.enc(x)
x = self.dec(x)
return x.squeeze((- 1))
|
def train(model):
model = model.cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
criterion = nn.L1Loss().cuda()
losses = []
for _ in range(500):
(x, y) = gen_data(batch_size=(2 ** 10), max_length=10)
(x, y) = (torch.from_numpy(x).float().cuda(), torch.from_numpy(y).float().cuda())
loss = criterion(model(x), y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
losses.append(loss.item())
return losses
|
class MultivariateNormal(object):
def __init__(self, dim):
self.dim = dim
def sample(self, B, K, labels):
raise NotImplementedError
def log_prob(self, X, params):
raise NotImplementedError
def stats(self):
raise NotImplementedError
def parse(self, raw):
raise NotImplementedError
|
class MixtureOfMVNs(object):
def __init__(self, mvn):
self.mvn = mvn
def sample(self, B, N, K, return_gt=False):
device = ('cpu' if (not torch.cuda.is_available()) else torch.cuda.current_device())
pi = Dirichlet(torch.ones(K)).sample(torch.Size([B])).to(device)
labels = Categorical(probs=pi).sample(torch.Size([N])).to(device)
labels = labels.transpose(0, 1).contiguous()
(X, params) = self.mvn.sample(B, K, labels)
if return_gt:
return (X, labels, pi, params)
else:
return X
def log_prob(self, X, pi, params, return_labels=False):
ll = self.mvn.log_prob(X, params)
ll = (ll + (pi + 1e-10).log().unsqueeze((- 2)))
if return_labels:
labels = ll.argmax((- 1))
return (ll.logsumexp((- 1)).mean(), labels)
else:
return ll.logsumexp((- 1)).mean()
def plot(self, X, labels, params, axes):
(mu, cov) = self.mvn.stats(params)
for (i, ax) in enumerate(axes.flatten()):
scatter_mog(X[i].cpu().data.numpy(), labels[i].cpu().data.numpy(), mu[i].cpu().data.numpy(), cov[i].cpu().data.numpy(), ax=ax)
ax.set_xticks([])
ax.set_yticks([])
plt.subplots_adjust(hspace=0.1, wspace=0.1)
def parse(self, raw):
return self.mvn.parse(raw)
|
class DeepSet(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output, dim_hidden=128):
super(DeepSet, self).__init__()
self.num_outputs = num_outputs
self.dim_output = dim_output
self.enc = nn.Sequential(nn.Linear(dim_input, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden))
self.dec = nn.Sequential(nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, dim_hidden), nn.ReLU(), nn.Linear(dim_hidden, (num_outputs * dim_output)))
def forward(self, X):
X = self.enc(X).mean((- 2))
X = self.dec(X).reshape((- 1), self.num_outputs, self.dim_output)
return X
|
class SetTransformer(nn.Module):
def __init__(self, dim_input, num_outputs, dim_output, num_inds=32, dim_hidden=128, num_heads=4, ln=False):
super(SetTransformer, self).__init__()
self.enc = nn.Sequential(ISAB(dim_input, dim_hidden, num_heads, num_inds, ln=ln), ISAB(dim_hidden, dim_hidden, num_heads, num_inds, ln=ln))
self.dec = nn.Sequential(PMA(dim_hidden, num_heads, num_outputs, ln=ln), SAB(dim_hidden, dim_hidden, num_heads, ln=ln), SAB(dim_hidden, dim_hidden, num_heads, ln=ln), nn.Linear(dim_hidden, dim_output))
def forward(self, X):
return self.dec(self.enc(X))
|
class MAB(nn.Module):
def __init__(self, dim_Q, dim_K, dim_V, num_heads, ln=False):
super(MAB, self).__init__()
self.dim_V = dim_V
self.num_heads = num_heads
self.fc_q = nn.Linear(dim_Q, dim_V)
self.fc_k = nn.Linear(dim_K, dim_V)
self.fc_v = nn.Linear(dim_K, dim_V)
if ln:
self.ln0 = nn.LayerNorm(dim_V)
self.ln1 = nn.LayerNorm(dim_V)
self.fc_o = nn.Linear(dim_V, dim_V)
def forward(self, Q, K):
Q = self.fc_q(Q)
(K, V) = (self.fc_k(K), self.fc_v(K))
dim_split = (self.dim_V // self.num_heads)
Q_ = torch.cat(Q.split(dim_split, 2), 0)
K_ = torch.cat(K.split(dim_split, 2), 0)
V_ = torch.cat(V.split(dim_split, 2), 0)
A = torch.softmax((Q_.bmm(K_.transpose(1, 2)) / math.sqrt(self.dim_V)), 2)
O = torch.cat((Q_ + A.bmm(V_)).split(Q.size(0), 0), 2)
O = (O if (getattr(self, 'ln0', None) is None) else self.ln0(O))
O = (O + F.relu(self.fc_o(O)))
O = (O if (getattr(self, 'ln1', None) is None) else self.ln1(O))
return O
|
class SAB(nn.Module):
def __init__(self, dim_in, dim_out, num_heads, ln=False):
super(SAB, self).__init__()
self.mab = MAB(dim_in, dim_in, dim_out, num_heads, ln=ln)
def forward(self, X):
return self.mab(X, X)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.