code
stringlengths 66
870k
| docstring
stringlengths 19
26.7k
| func_name
stringlengths 1
138
| language
stringclasses 1
value | repo
stringlengths 7
68
| path
stringlengths 5
324
| url
stringlengths 46
389
| license
stringclasses 7
values |
|---|---|---|---|---|---|---|---|
def batch_to_head_dim(self, tensor: torch.Tensor) -> torch.Tensor:
r"""
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads`
is the number of heads initialized while constructing the `Attention` class.
Args:
tensor (`torch.Tensor`): The tensor to reshape.
Returns:
`torch.Tensor`: The reshaped tensor.
"""
head_size = self.heads
batch_size, seq_len, dim = tensor.shape
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
tensor = tensor.permute(0, 2, 1, 3).reshape(
batch_size // head_size, seq_len, dim * head_size
)
return tensor
|
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size // heads, seq_len, dim * heads]`. `heads`
is the number of heads initialized while constructing the `Attention` class.
Args:
tensor (`torch.Tensor`): The tensor to reshape.
Returns:
`torch.Tensor`: The reshaped tensor.
|
batch_to_head_dim
|
python
|
VAST-AI-Research/TripoSR
|
tsr/models/transformer/attention.py
|
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/models/transformer/attention.py
|
MIT
|
def head_to_batch_dim(self, tensor: torch.Tensor, out_dim: int = 3) -> torch.Tensor:
r"""
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is
the number of heads initialized while constructing the `Attention` class.
Args:
tensor (`torch.Tensor`): The tensor to reshape.
out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is
reshaped to `[batch_size * heads, seq_len, dim // heads]`.
Returns:
`torch.Tensor`: The reshaped tensor.
"""
head_size = self.heads
batch_size, seq_len, dim = tensor.shape
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
tensor = tensor.permute(0, 2, 1, 3)
if out_dim == 3:
tensor = tensor.reshape(batch_size * head_size, seq_len, dim // head_size)
return tensor
|
Reshape the tensor from `[batch_size, seq_len, dim]` to `[batch_size, seq_len, heads, dim // heads]` `heads` is
the number of heads initialized while constructing the `Attention` class.
Args:
tensor (`torch.Tensor`): The tensor to reshape.
out_dim (`int`, *optional*, defaults to `3`): The output dimension of the tensor. If `3`, the tensor is
reshaped to `[batch_size * heads, seq_len, dim // heads]`.
Returns:
`torch.Tensor`: The reshaped tensor.
|
head_to_batch_dim
|
python
|
VAST-AI-Research/TripoSR
|
tsr/models/transformer/attention.py
|
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/models/transformer/attention.py
|
MIT
|
def get_attention_scores(
self,
query: torch.Tensor,
key: torch.Tensor,
attention_mask: torch.Tensor = None,
) -> torch.Tensor:
r"""
Compute the attention scores.
Args:
query (`torch.Tensor`): The query tensor.
key (`torch.Tensor`): The key tensor.
attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
Returns:
`torch.Tensor`: The attention probabilities/scores.
"""
dtype = query.dtype
if self.upcast_attention:
query = query.float()
key = key.float()
if attention_mask is None:
baddbmm_input = torch.empty(
query.shape[0],
query.shape[1],
key.shape[1],
dtype=query.dtype,
device=query.device,
)
beta = 0
else:
baddbmm_input = attention_mask
beta = 1
attention_scores = torch.baddbmm(
baddbmm_input,
query,
key.transpose(-1, -2),
beta=beta,
alpha=self.scale,
)
del baddbmm_input
if self.upcast_softmax:
attention_scores = attention_scores.float()
attention_probs = attention_scores.softmax(dim=-1)
del attention_scores
attention_probs = attention_probs.to(dtype)
return attention_probs
|
Compute the attention scores.
Args:
query (`torch.Tensor`): The query tensor.
key (`torch.Tensor`): The key tensor.
attention_mask (`torch.Tensor`, *optional*): The attention mask to use. If `None`, no mask is applied.
Returns:
`torch.Tensor`: The attention probabilities/scores.
|
get_attention_scores
|
python
|
VAST-AI-Research/TripoSR
|
tsr/models/transformer/attention.py
|
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/models/transformer/attention.py
|
MIT
|
def prepare_attention_mask(
self,
attention_mask: torch.Tensor,
target_length: int,
batch_size: int,
out_dim: int = 3,
) -> torch.Tensor:
r"""
Prepare the attention mask for the attention computation.
Args:
attention_mask (`torch.Tensor`):
The attention mask to prepare.
target_length (`int`):
The target length of the attention mask. This is the length of the attention mask after padding.
batch_size (`int`):
The batch size, which is used to repeat the attention mask.
out_dim (`int`, *optional*, defaults to `3`):
The output dimension of the attention mask. Can be either `3` or `4`.
Returns:
`torch.Tensor`: The prepared attention mask.
"""
head_size = self.heads
if attention_mask is None:
return attention_mask
current_length: int = attention_mask.shape[-1]
if current_length != target_length:
if attention_mask.device.type == "mps":
# HACK: MPS: Does not support padding by greater than dimension of input tensor.
# Instead, we can manually construct the padding tensor.
padding_shape = (
attention_mask.shape[0],
attention_mask.shape[1],
target_length,
)
padding = torch.zeros(
padding_shape,
dtype=attention_mask.dtype,
device=attention_mask.device,
)
attention_mask = torch.cat([attention_mask, padding], dim=2)
else:
# TODO: for pipelines such as stable-diffusion, padding cross-attn mask:
# we want to instead pad by (0, remaining_length), where remaining_length is:
# remaining_length: int = target_length - current_length
# TODO: re-enable tests/models/test_models_unet_2d_condition.py#test_model_xattn_padding
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
if out_dim == 3:
if attention_mask.shape[0] < batch_size * head_size:
attention_mask = attention_mask.repeat_interleave(head_size, dim=0)
elif out_dim == 4:
attention_mask = attention_mask.unsqueeze(1)
attention_mask = attention_mask.repeat_interleave(head_size, dim=1)
return attention_mask
|
Prepare the attention mask for the attention computation.
Args:
attention_mask (`torch.Tensor`):
The attention mask to prepare.
target_length (`int`):
The target length of the attention mask. This is the length of the attention mask after padding.
batch_size (`int`):
The batch size, which is used to repeat the attention mask.
out_dim (`int`, *optional*, defaults to `3`):
The output dimension of the attention mask. Can be either `3` or `4`.
Returns:
`torch.Tensor`: The prepared attention mask.
|
prepare_attention_mask
|
python
|
VAST-AI-Research/TripoSR
|
tsr/models/transformer/attention.py
|
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/models/transformer/attention.py
|
MIT
|
def norm_encoder_hidden_states(
self, encoder_hidden_states: torch.Tensor
) -> torch.Tensor:
r"""
Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the
`Attention` class.
Args:
encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder.
Returns:
`torch.Tensor`: The normalized encoder hidden states.
"""
assert (
self.norm_cross is not None
), "self.norm_cross must be defined to call self.norm_encoder_hidden_states"
if isinstance(self.norm_cross, nn.LayerNorm):
encoder_hidden_states = self.norm_cross(encoder_hidden_states)
elif isinstance(self.norm_cross, nn.GroupNorm):
# Group norm norms along the channels dimension and expects
# input to be in the shape of (N, C, *). In this case, we want
# to norm along the hidden dimension, so we need to move
# (batch_size, sequence_length, hidden_size) ->
# (batch_size, hidden_size, sequence_length)
encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
encoder_hidden_states = self.norm_cross(encoder_hidden_states)
encoder_hidden_states = encoder_hidden_states.transpose(1, 2)
else:
assert False
return encoder_hidden_states
|
Normalize the encoder hidden states. Requires `self.norm_cross` to be specified when constructing the
`Attention` class.
Args:
encoder_hidden_states (`torch.Tensor`): Hidden states of the encoder.
Returns:
`torch.Tensor`: The normalized encoder hidden states.
|
norm_encoder_hidden_states
|
python
|
VAST-AI-Research/TripoSR
|
tsr/models/transformer/attention.py
|
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/models/transformer/attention.py
|
MIT
|
def forward(
self,
hidden_states: torch.Tensor,
encoder_hidden_states: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
):
"""
The [`Transformer1DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
Returns:
torch.FloatTensor
"""
# ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
# we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
# we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
# expects mask of shape:
# [batch, key_tokens]
# adds singleton query_tokens dimension:
# [batch, 1, key_tokens]
# this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
# [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
# [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
if attention_mask is not None and attention_mask.ndim == 2:
# assume that mask is expressed as:
# (1 = keep, 0 = discard)
# convert mask into a bias that can be added to attention scores:
# (keep = +0, discard = -10000.0)
attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
attention_mask = attention_mask.unsqueeze(1)
# convert encoder_attention_mask to a bias the same way we do for attention_mask
if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
encoder_attention_mask = (
1 - encoder_attention_mask.to(hidden_states.dtype)
) * -10000.0
encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
# 1. Input
batch, _, seq_len = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 1).reshape(
batch, seq_len, inner_dim
)
hidden_states = self.proj_in(hidden_states)
# 2. Blocks
for block in self.transformer_blocks:
if self.training and self.gradient_checkpointing:
hidden_states = torch.utils.checkpoint.checkpoint(
block,
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
use_reentrant=False,
)
else:
hidden_states = block(
hidden_states,
attention_mask=attention_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
)
# 3. Output
hidden_states = self.proj_out(hidden_states)
hidden_states = (
hidden_states.reshape(batch, seq_len, inner_dim)
.permute(0, 2, 1)
.contiguous()
)
output = hidden_states + residual
return output
|
The [`Transformer1DModel`] forward method.
Args:
hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
Input `hidden_states`.
encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
self-attention.
attention_mask ( `torch.Tensor`, *optional*):
An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
negative values to the attention scores corresponding to "discard" tokens.
encoder_attention_mask ( `torch.Tensor`, *optional*):
Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
* Mask `(batch, sequence_length)` True = keep, False = discard.
* Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
above. This bias will be added to the cross-attention scores.
Returns:
torch.FloatTensor
|
forward
|
python
|
VAST-AI-Research/TripoSR
|
tsr/models/transformer/transformer_1d.py
|
https://github.com/VAST-AI-Research/TripoSR/blob/master/tsr/models/transformer/transformer_1d.py
|
MIT
|
def log_metric(
accelerator,
metrics: Dict,
train_time: float,
step: int,
epoch: int,
learning_rate: float = None,
prefix: str = "train",
):
"""Helper function to log all training/evaluation metrics with the correct prefixes and styling."""
log_metrics = {}
for k, v in metrics.items():
log_metrics[f"{prefix}/{k}"] = v
log_metrics[f"{prefix}/time"] = train_time
log_metrics[f"{prefix}/epoch"] = epoch
if learning_rate is not None:
log_metrics[f"{prefix}/learning_rate"] = learning_rate
accelerator.log(log_metrics, step=step)
|
Helper function to log all training/evaluation metrics with the correct prefixes and styling.
|
log_metric
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def log_pred(
accelerator,
pred_str: List[str],
label_str: List[str],
norm_pred_str: List[str],
norm_label_str: List[str],
step: int,
prefix: str = "eval",
num_lines: int = 200000,
):
"""Helper function to log target/predicted transcriptions to weights and biases (wandb)."""
if accelerator.is_main_process:
wandb_tracker = accelerator.get_tracker("wandb")
# pretty name for current step: step 50000 -> step 50k
cur_step_pretty = f"{int(step // 1000)}k" if step > 1000 else step
prefix_pretty = prefix.replace("/", "-")
# convert str data to a wandb compatible format
str_data = [[label_str[i], pred_str[i], norm_label_str[i], norm_pred_str[i]] for i in range(len(pred_str))]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"predictions/{prefix_pretty}-step-{cur_step_pretty}",
columns=["Target", "Pred", "Norm Target", "Norm Pred"],
data=str_data[:num_lines],
step=step,
)
# log incorrect normalised predictions
str_data = np.asarray(str_data)
str_data_incorrect = str_data[str_data[:, -2] != str_data[:, -1]]
# log as a table with the appropriate headers
wandb_tracker.log_table(
table_name=f"incorrect_predictions/{prefix_pretty}-step-{cur_step_pretty}",
columns=["Target", "Pred", "Norm Target", "Norm Pred"],
data=str_data_incorrect[:num_lines],
step=step,
)
|
Helper function to log target/predicted transcriptions to weights and biases (wandb).
|
log_pred
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def convert_dataset_str_to_list(
dataset_names,
dataset_config_names,
splits=None,
text_column_names=None,
dataset_samples=None,
default_split="train",
) -> List[Dict]:
"""
Given three lists of dataset names, configs and splits, this function groups the corresponding
names/configs/splits. Each dataset is assigned a unique dictionary with these metadata values, and the
function returns a list of dictionaries, one for each dataset.
"""
if isinstance(dataset_names, str):
dataset_names = dataset_names.split("+")
dataset_config_names = dataset_config_names.split("+") if dataset_config_names is not None else None
splits = splits.split("+") if splits is not None else None
text_column_names = text_column_names.split("+") if text_column_names is not None else None
dataset_samples = dataset_samples.split("+") if dataset_samples is not None else None
# basic checks to ensure we've got the right number of datasets/configs/splits/columns/probs
if dataset_config_names is not None and len(dataset_names) != len(dataset_config_names):
raise ValueError(
f"Ensure one config is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(dataset_config_names)} configs."
)
if splits is not None and len(splits) != len(dataset_names):
raise ValueError(
f"Ensure one split is passed for each dataset, got {len(dataset_names)} datasets and {len(splits)} splits."
)
if text_column_names is not None and len(text_column_names) != len(dataset_names):
raise ValueError(
f"Ensure one text column name is passed for each dataset, got {len(dataset_names)} datasets and"
f" {len(text_column_names)} text column names."
)
if dataset_samples is not None:
if len(dataset_samples) != len(dataset_names):
raise ValueError(
f"Ensure one sample is passed for each dataset, got {len(dataset_names)} datasets and "
f"{len(dataset_samples)} samples."
)
dataset_samples = [float(ds_sample) for ds_sample in dataset_samples]
else:
dataset_samples = [None] * len(dataset_names)
dataset_config_names = (
dataset_config_names if dataset_config_names is not None else ["default" for _ in range(len(dataset_names))]
)
text_column_names = (
text_column_names if text_column_names is not None else ["text" for _ in range(len(dataset_names))]
)
splits = splits if splits is not None else [default_split for _ in range(len(dataset_names))]
dataset_names_dict = []
for i, ds_name in enumerate(dataset_names):
dataset_names_dict.append(
{
"name": ds_name,
"config": dataset_config_names[i],
"split": splits[i],
"text_column_name": text_column_names[i],
"samples": dataset_samples[i],
}
)
return dataset_names_dict
|
Given three lists of dataset names, configs and splits, this function groups the corresponding
names/configs/splits. Each dataset is assigned a unique dictionary with these metadata values, and the
function returns a list of dictionaries, one for each dataset.
|
convert_dataset_str_to_list
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def sorted_checkpoints(output_dir=None, checkpoint_prefix="checkpoint") -> List[str]:
"""Helper function to sort saved checkpoints from oldest to newest."""
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
glob_checkpoints = [path for path in glob_checkpoints if "val-wer" not in path] # filter out best model checkpoints
for path in glob_checkpoints:
regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
|
Helper function to sort saved checkpoints from oldest to newest.
|
sorted_checkpoints
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def sorted_best_checkpoints(output_dir=None, checkpoint_prefix="checkpoint"):
"""Helper function to sort saved best checkpoints."""
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(output_dir).glob(f"{checkpoint_prefix}-*") if os.path.isdir(x)]
for path in glob_checkpoints:
regex_match = re.search(r"val-wer-([0-9]+\.[0-9]+)", path)
if regex_match is not None and regex_match.groups() is not None:
ordering_and_checkpoint_path.append((float(regex_match.groups(1)[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path, reverse=True)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
|
Helper function to sort saved best checkpoints.
|
sorted_best_checkpoints
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def rotate_checkpoints(save_total_limit=None, output_dir=None, checkpoint_prefix="checkpoint", sorting_fn=sorted_checkpoints) -> None:
"""Helper function to delete old checkpoints."""
if save_total_limit is None or save_total_limit <= 0:
return
# Check if we should delete older checkpoint(s)
checkpoints_sorted = sorting_fn(output_dir=output_dir, checkpoint_prefix=checkpoint_prefix)
if len(checkpoints_sorted) <= save_total_limit:
return
number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - save_total_limit)
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info(f"Deleting older checkpoint [{checkpoint}].")
shutil.rmtree(checkpoint, ignore_errors=True)
|
Helper function to delete old checkpoints.
|
rotate_checkpoints
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def get_parameter_names(model, forbidden_layer_types, forbidden_module=None):
"""
Returns the names of the model parameters that are not inside a forbidden layer or forbidden module.
Can be used to get a subset of parameter names for decay masks, or to exclude parameters from an optimiser
(e.g. if the module is frozen).
"""
result = []
for name, child in model.named_children():
result += [
f"{name}.{n}"
for n in get_parameter_names(child, forbidden_layer_types, forbidden_module)
if not (
isinstance(child, tuple(forbidden_layer_types))
or (child in tuple(forbidden_module) if forbidden_module is not None else False)
)
]
# Add model specific parameters (defined with nn.Parameter) since they are not in any child.
result += list(model._parameters.keys())
return result
|
Returns the names of the model parameters that are not inside a forbidden layer or forbidden module.
Can be used to get a subset of parameter names for decay masks, or to exclude parameters from an optimiser
(e.g. if the module is frozen).
|
get_parameter_names
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def prepare_train_dataset(batch):
"""
Pre-process the raw dataset in a three stage process:
1. Convert the audio arrays to log-mel spectrogram inputs
2. Possibly filter the timestamp tokens from the token ids (depending on the timestamp probability)
3. Possibly add prompt tokens if conditioning on previous text (depending on the conditioning probability)
"""
# process audio input
audio = [sample["array"] for sample in batch["audio"]]
inputs = feature_extractor(audio, sampling_rate=sampling_rate)
batch["input_features"] = inputs.input_features
batch["input_length"] = [len(sample) for sample in audio]
# process text targets - for training these are the Whisper-generated pseudo-labels
input_str_batched = batch[train_text_column_name]
condition_on_prev_batched = batch.get("condition_on_prev", len(input_str_batched) * [None])
all_token_ids = []
all_token_ids_unprompted = []
for prev_ids, input_str in zip(condition_on_prev_batched, input_str_batched):
token_ids = tokenizer(input_str, add_special_tokens=not use_pseudo_labels).input_ids
# check whether we have timestamps in the PLs and filter if required
has_timestamps = len(set(token_ids) & set(timestamp_ids)) > 0
if has_timestamps:
# sample from binomial distribution to get probability of training on timestamps
predict_timestamps = bool(np.random.binomial(1, timestamp_probability))
if not predict_timestamps:
# filter timestamps and insert the <|notimestamps|> task token
token_ids = [token for token in token_ids if token < timestamp_begin]
token_ids.insert(timestamp_position, timestamp_begin)
all_token_ids_unprompted.append(token_ids)
# check whether to condition on previous text - we do this with probability condition_on_prev_probability
condition_on_prev = bool(np.random.binomial(1, condition_on_prev_probability))
if not condition_on_prev:
prev_ids = None
elif "condition_on_prev" not in batch and len(all_token_ids_unprompted) > 1:
# prompt ids are the penultimate token ids in the batch
prev_ids = all_token_ids_unprompted[-2]
if prev_ids is not None:
if has_timestamps and not predict_timestamps:
# filter timestamp ids from prompt when not predicting timestamps
prev_ids = [token for token in prev_ids if token < timestamp_begin]
# check that the length of the prompt does not exceed more than half the max label length (224)
if len(prev_ids) > prompt_cutoff_length:
prev_ids = prev_ids[-prompt_cutoff_length + 1 :]
# and that the total length of the labels does not exceed the max label length (448)
if len(prev_ids + token_ids) + 1 > max_label_length:
trim_length = len(token_ids) - max_label_length + 1
prev_ids = prev_ids[trim_length:]
prev_ids = [decoder_prev_token_id] + prev_ids
token_ids = prev_ids + token_ids
all_token_ids.append(token_ids)
batch["labels"] = all_token_ids
return batch
|
Pre-process the raw dataset in a three stage process:
1. Convert the audio arrays to log-mel spectrogram inputs
2. Possibly filter the timestamp tokens from the token ids (depending on the timestamp probability)
3. Possibly add prompt tokens if conditioning on previous text (depending on the conditioning probability)
|
prepare_train_dataset
|
python
|
huggingface/distil-whisper
|
training/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_distillation.py
|
MIT
|
def shift_tokens_right(label_ids: np.array, decoder_start_token_id: int) -> np.ndarray:
"""
Shift label ids one token to the right.
"""
shifted_label_ids = np.zeros_like(label_ids)
shifted_label_ids[:, 1:] = label_ids[:, :-1]
shifted_label_ids[:, 0] = decoder_start_token_id
return shifted_label_ids
|
Shift label ids one token to the right.
|
shift_tokens_right
|
python
|
huggingface/distil-whisper
|
training/run_pseudo_labelling.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_pseudo_labelling.py
|
MIT
|
def log_metric(
accelerator,
metrics: Dict,
train_time: float,
prefix: str = "eval",
):
"""Helper function to log all evaluation metrics with the correct prefixes and styling."""
log_metrics = {}
for k, v in metrics.items():
log_metrics[f"{prefix}/{k}"] = v
log_metrics[f"{prefix}/time"] = train_time
accelerator.log(log_metrics)
|
Helper function to log all evaluation metrics with the correct prefixes and styling.
|
log_metric
|
python
|
huggingface/distil-whisper
|
training/run_pseudo_labelling.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/run_pseudo_labelling.py
|
MIT
|
def create_learning_rate_fn(
num_train_steps: int, lr_scheduler_type: str, num_warmup_steps: int, learning_rate: float
) -> Callable[[int], jnp.array]:
"""Returns a linear warmup, linear_decay learning rate function."""
lr_scheduler_types = ("linear", "constant_with_warmup")
if lr_scheduler_type not in lr_scheduler_types:
raise ValueError(
f"lr_scheduler_type of type {lr_scheduler_type} not supported, choose from {lr_scheduler_types}."
)
warmup_fn = optax.linear_schedule(init_value=0.0, end_value=learning_rate, transition_steps=num_warmup_steps)
decay_fn = optax.linear_schedule(
init_value=learning_rate,
end_value=0 if lr_scheduler_type == "linear" else learning_rate,
transition_steps=num_train_steps - num_warmup_steps,
)
schedule_fn = optax.join_schedules(schedules=[warmup_fn, decay_fn], boundaries=[num_warmup_steps])
return schedule_fn
|
Returns a linear warmup, linear_decay learning rate function.
|
create_learning_rate_fn
|
python
|
huggingface/distil-whisper
|
training/flax/convert_train_state_to_hf.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/convert_train_state_to_hf.py
|
MIT
|
def apply_gradients(self, *, grads, **kwargs):
"""Updates `step`, `params`, `opt_state` and `**kwargs` in return value, clipping the
gradients by the maximum grad norm.
Note that internally this function calls `.tx.update()` followed by a call
to `optax.apply_updates()` to update `params` and `opt_state`.
Args:
grads: Gradients that have the same pytree structure as `.params`.
**kwargs: Additional dataclass attributes that should be `.replace()`-ed.
Returns:
An updated instance of `self` with `step` incremented by one, `params`
and `opt_state` updated by applying `grads`, and additional attributes
replaced as specified by `kwargs`.
"""
# clip gradients by global l2 norm
g_norm = linear_algebra.global_norm(grads)
g_norm = jnp.maximum(self.max_grad_norm, g_norm)
grads = jax.tree_map(lambda t: (t / g_norm) * self.max_grad_norm, grads)
updates, new_opt_state = self.tx.update(grads, self.opt_state, self.params)
new_params = optax.apply_updates(self.params, updates)
return self.replace(
step=self.step + 1,
params=new_params,
opt_state=new_opt_state,
**kwargs,
)
|
Updates `step`, `params`, `opt_state` and `**kwargs` in return value, clipping the
gradients by the maximum grad norm.
Note that internally this function calls `.tx.update()` followed by a call
to `optax.apply_updates()` to update `params` and `opt_state`.
Args:
grads: Gradients that have the same pytree structure as `.params`.
**kwargs: Additional dataclass attributes that should be `.replace()`-ed.
Returns:
An updated instance of `self` with `step` incremented by one, `params`
and `opt_state` updated by applying `grads`, and additional attributes
replaced as specified by `kwargs`.
|
apply_gradients
|
python
|
huggingface/distil-whisper
|
training/flax/convert_train_state_to_hf.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/convert_train_state_to_hf.py
|
MIT
|
def get_data_loader(
seed: int,
dataset: IterableDataset,
batch_size: int,
data_collator: FlaxDataCollatorSpeechSeq2SeqWithPadding,
shuffle: bool = True,
drop_last: bool = True,
dataloader_num_workers: int = 0,
skip_batches: int = 0,
pin_memory: bool = True,
prefetch_size: int = 0,
) -> DataLoader:
"""
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
Args:
seed (int): Numpy seed for generating pseudo random numbers. Used if shuffling the dataset.
dataset (IterableDataset): streaming dataset from which to load the data.
batch_size (int): how many samples per batch to load.
data_collator (FlaxDataCollatorSpeechSeq2SeqWithPadding, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a map-style dataset.
shuffle (bool, optional): set to `True` to have the batches reshuffled.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
dataloader_num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
skip_batches (int, optional): Efficiently skip the first `skip_batches`.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
"""
if shuffle:
dataset = dataset.shuffle(seed)
if skip_batches > 0:
dataset = dataset.skip(skip_batches * batch_size)
if prefetch_size > 0:
dataset = IterableWrapper(dataset)
dataset = dataset.prefetch(prefetch_size)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
drop_last=drop_last,
pin_memory=pin_memory,
collate_fn=data_collator,
num_workers=dataloader_num_workers,
)
return data_loader
|
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
Args:
seed (int): Numpy seed for generating pseudo random numbers. Used if shuffling the dataset.
dataset (IterableDataset): streaming dataset from which to load the data.
batch_size (int): how many samples per batch to load.
data_collator (FlaxDataCollatorSpeechSeq2SeqWithPadding, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a map-style dataset.
shuffle (bool, optional): set to `True` to have the batches reshuffled.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
dataloader_num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
skip_batches (int, optional): Efficiently skip the first `skip_batches`.
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
|
get_data_loader
|
python
|
huggingface/distil-whisper
|
training/flax/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/run_distillation.py
|
MIT
|
def create(cls, *, apply_fn, params, tx, to_dtype: to_fp32, **kwargs):
"""Creates a new instance with `step=0` and initialized `opt_state`."""
# downcast optimizer state to bf16 if mixed-precision training
opt_state = tx.init(to_dtype(params))
return cls(
step=0,
apply_fn=apply_fn,
params=params,
tx=tx,
opt_state=opt_state,
**kwargs,
)
|
Creates a new instance with `step=0` and initialized `opt_state`.
|
create
|
python
|
huggingface/distil-whisper
|
training/flax/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/run_distillation.py
|
MIT
|
def get_layers_to_supervise(student_layers: int, teacher_layers: int) -> dict:
"""Helper function to map the student layer i to the teacher layer j whose output we'd like them to emulate. Used
for MSE loss terms in distillation (hidden-states and activations). Student layers are paired with teacher layers
in equal increments, e.g. for a 12-layer model distilled to a 3-layer model, student layer 0 emulates teacher layer
3 (such that it behaves like the first 4 teacher layers), student layer 1 emulates teacher layer 7, and student layer
2 emulates teacher layer 11. This mapping is summarised by the dictionary: {0: 3, 1: 7, 2: 11}, which is precisely
the output of this function for the arguments (student_layers=3, teacher_layers=12)."""
layer_intervals = np.linspace(teacher_layers // student_layers - 1, teacher_layers - 1, student_layers, dtype=int)
layer_intervals[-1] = teacher_layers - 1
layer_map = {}
for student_layer, teacher_layer in enumerate(layer_intervals):
layer_map[student_layer] = teacher_layer
return layer_map
|
Helper function to map the student layer i to the teacher layer j whose output we'd like them to emulate. Used
for MSE loss terms in distillation (hidden-states and activations). Student layers are paired with teacher layers
in equal increments, e.g. for a 12-layer model distilled to a 3-layer model, student layer 0 emulates teacher layer
3 (such that it behaves like the first 4 teacher layers), student layer 1 emulates teacher layer 7, and student layer
2 emulates teacher layer 11. This mapping is summarised by the dictionary: {0: 3, 1: 7, 2: 11}, which is precisely
the output of this function for the arguments (student_layers=3, teacher_layers=12).
|
get_layers_to_supervise
|
python
|
huggingface/distil-whisper
|
training/flax/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/run_distillation.py
|
MIT
|
def _np_extract_fbank_features(self, waveform: np.array) -> np.ndarray:
"""
Compute the log-mel spectrogram of the provided audio using torch filters. Using the torch implementation
computes stft filter banks approx 5x faster than its numpy counterpart, which is the native implementation
in transformers, and matches to within 1e-5 abs tolerance.
"""
waveform = torch.from_numpy(waveform).type(torch.float32)
window = torch.hann_window(self.n_fft)
stft = torch.stft(waveform, self.n_fft, self.hop_length, window=window, return_complex=True)
magnitudes = stft[..., :-1].abs() ** 2
mel_filters = torch.from_numpy(self.mel_filters).type(torch.float32)
mel_spec = mel_filters.T @ magnitudes
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec.numpy()
|
Compute the log-mel spectrogram of the provided audio using torch filters. Using the torch implementation
computes stft filter banks approx 5x faster than its numpy counterpart, which is the native implementation
in transformers, and matches to within 1e-5 abs tolerance.
|
_np_extract_fbank_features
|
python
|
huggingface/distil-whisper
|
training/flax/run_distillation.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/run_distillation.py
|
MIT
|
def get_data_loader(
dataset: Dataset,
batch_size: int,
data_collator: FlaxDataCollatorSpeechSeq2SeqWithPadding,
dataloader_num_workers: int = 0,
pin_memory: bool = True,
) -> DataLoader:
"""
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int): how many samples per batch to load.
data_collator (FlaxDataCollatorSpeechSeq2SeqWithPadding, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a map-style dataset.
dataloader_num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
"""
data_loader = DataLoader(
dataset,
batch_size=batch_size,
drop_last=False,
pin_memory=pin_memory,
collate_fn=data_collator,
num_workers=dataloader_num_workers,
)
return data_loader
|
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
Args:
dataset (Dataset): dataset from which to load the data.
batch_size (int): how many samples per batch to load.
data_collator (FlaxDataCollatorSpeechSeq2SeqWithPadding, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a map-style dataset.
dataloader_num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
|
get_data_loader
|
python
|
huggingface/distil-whisper
|
training/flax/run_eval.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/run_eval.py
|
MIT
|
def loss_fn(logits, labels, label_smoothing_factor=0.0):
"""
The label smoothing implementation is adapted from Flax's official example:
https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104
"""
vocab_size = logits.shape[-1]
confidence = 1.0 - label_smoothing_factor
low_confidence = (1.0 - confidence) / (vocab_size - 1)
normalizing_constant = -(
confidence * jnp.log(confidence) + (vocab_size - 1) * low_confidence * jnp.log(low_confidence + 1e-20)
)
soft_labels = onehot(labels, vocab_size, on_value=confidence, off_value=low_confidence)
loss = optax.softmax_cross_entropy(logits, soft_labels)
loss = loss - normalizing_constant
# ignore padded tokens from loss, i.e. where labels are not set to -100
padding_mask = labels >= 0
loss = loss * padding_mask
loss = loss.sum()
num_labels = padding_mask.sum()
return loss, num_labels
|
The label smoothing implementation is adapted from Flax's official example:
https://github.com/google/flax/blob/87a211135c6a377c8f29048a1cac3840e38b9da4/examples/wmt/train.py#L104
|
loss_fn
|
python
|
huggingface/distil-whisper
|
training/flax/run_eval.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/run_eval.py
|
MIT
|
def get_data_loader(
rng: jax.random.PRNGKey,
dataset: Dataset,
batch_size: int,
data_collator: FlaxDataCollatorSpeechSeq2SeqWithPadding,
shuffle: bool = True,
drop_last: bool = True,
dataloader_num_workers: int = 0,
pin_memory: bool = True,
) -> DataLoader:
"""
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
Args:
rng (List(int)): JAX rng for generating pseudo random numbers. Used if shuffling the dataset.
dataset (Dataset): dataset from which to load the data.
batch_size (int): how many samples per batch to load.
data_collator (FlaxDataCollatorSpeechSeq2SeqWithPadding, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a map-style dataset.
shuffle (bool, optional): set to `True` to have the batches reshuffled.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
dataloader_num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
"""
if shuffle:
batch_idx = jax.random.permutation(rng, len(dataset))
batch_idx = np.asarray(batch_idx)
dataset = dataset.select(batch_idx)
data_loader = DataLoader(
dataset,
batch_size=batch_size,
drop_last=drop_last,
pin_memory=pin_memory,
collate_fn=data_collator,
num_workers=dataloader_num_workers,
)
return data_loader
|
Returns batches of size `batch_size` from `dataset`. If `drop_last` is set to `False`, the final batch may be incomplete,
and range in size from 1 to `batch_size`. Shuffle batches if `shuffle` is `True`.
Args:
rng (List(int)): JAX rng for generating pseudo random numbers. Used if shuffling the dataset.
dataset (Dataset): dataset from which to load the data.
batch_size (int): how many samples per batch to load.
data_collator (FlaxDataCollatorSpeechSeq2SeqWithPadding, optional): merges a list of samples to form a
mini-batch of Tensor(s). Used when using batched loading from a map-style dataset.
shuffle (bool, optional): set to `True` to have the batches reshuffled.
drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,
if the dataset size is not divisible by the batch size. If ``False`` and
the size of dataset is not divisible by the batch size, then the last batch
will be smaller. (default: ``False``)
dataloader_num_workers (int, optional): how many subprocesses to use for data
loading. ``0`` means that the data will be loaded in the main process.
(default: ``0``)
pin_memory (bool, optional): If ``True``, the data loader will copy Tensors
into device/CUDA pinned memory before returning them. If your data elements
are a custom type, or your :attr:`collate_fn` returns a batch that is a custom type,
see the example below.
|
get_data_loader
|
python
|
huggingface/distil-whisper
|
training/flax/run_finetuning.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/run_finetuning.py
|
MIT
|
def nd_dense_init(scale, mode, distribution):
"""Initializer with in_axis, out_axis set at call time."""
def init_fn(key, shape, dtype, in_axis, out_axis):
fn = variance_scaling(scale, mode, distribution, in_axis, out_axis)
return fn(key, shape, dtype)
return init_fn
|
Initializer with in_axis, out_axis set at call time.
|
nd_dense_init
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def __call__(
self,
inputs_q: Array,
inputs_kv: Array,
mask: Optional[Array] = None,
bias: Optional[Array] = None,
*,
decode: bool = False,
deterministic: bool = False,
) -> Array:
"""Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode` argument. For decoding, this method is called twice,
first to initialize the cache and then for an actual decoding process. The
two calls are differentiated by the presence of 'cached_key' in the variable
dict. In the cache initialization stage, the cache variables are initialized
as zeros and will be filled in the subsequent decoding process.
In the cache initialization call, `inputs_q` has a shape [batch, length,
q_features] and `inputs_kv`: [batch, length, kv_features]. During the
incremental decoding stage, query, key and value all have the shape [batch,
1, qkv_features] corresponding to a single step.
Args:
inputs_q: input queries of shape `[batch, q_length, q_features]`.
inputs_kv: key/values of shape `[batch, kv_length, kv_features]`.
mask: attention mask of shape `[batch, num_heads, q_length, kv_length]`.
bias: attention bias of shape `[batch, num_heads, q_length, kv_length]`.
decode: Whether to prepare and use an autoregressive cache.
deterministic: Disables dropout if set to True.
Returns:
output of shape `[batch, length, q_features]`.
"""
projection = functools.partial(
DenseGeneral,
axis=-1,
features=(self.num_heads, self.head_dim),
kernel_axes=("embed", "heads", "kv"),
dtype=self.dtype,
)
# NOTE: T5 does not explicitly rescale the attention logits by
# 1/sqrt(depth_kq)! This is folded into the initializers of the
# linear transformations, which is equivalent under Adafactor.
depth_scaling = jnp.sqrt(self.head_dim).astype(self.dtype)
def query_init(*args):
return self.kernel_init(*args) / depth_scaling
# Project inputs_q to multi-headed q/k/v
# dimensions are then [batch, length, num_heads, head_dim]
query = projection(kernel_init=query_init, name="query")(inputs_q)
key = projection(kernel_init=self.kernel_init, name="key")(inputs_kv)
value = projection(kernel_init=self.kernel_init, name="value")(inputs_kv)
query = with_sharding_constraint(query, ("batch", "length", "heads", "kv"))
key = with_sharding_constraint(key, ("batch", "length", "heads", "kv"))
value = with_sharding_constraint(value, ("batch", "length", "heads", "kv"))
if decode:
# Detect if we're initializing by absence of existing cache data.
is_initialized = self.has_variable("cache", "cached_key")
# The key and value have dimension [batch, length, num_heads, head_dim],
# but we cache them as [batch, num_heads, head_dim, length] as a TPU
# fusion optimization. This also enables the "scatter via one-hot
# broadcast" trick, which means we do a one-hot broadcast instead of a
# scatter/gather operations, resulting in a 3-4x speedup in practice.
def swap_dims(x):
return x[:-3] + tuple(x[i] for i in [-2, -1, -3])
cached_key = self.variable("cache", "cached_key", jnp.zeros, swap_dims(key.shape), key.dtype)
cached_value = self.variable("cache", "cached_value", jnp.zeros, swap_dims(value.shape), value.dtype)
cache_index = self.variable("cache", "cache_index", lambda: jnp.array(0, dtype=jnp.int32))
if is_initialized:
batch, num_heads, head_dim, length = cached_key.value.shape
# During fast autoregressive decoding, we feed one position at a time,
# and cache the keys and values step by step.
# Sanity shape check of cached key against input query.
expected_shape = (batch, 1, num_heads, head_dim)
if expected_shape != query.shape:
raise ValueError(
"Autoregressive cache shape error, "
"expected query shape %s instead got %s." % (expected_shape, query.shape)
)
# Create a OHE of the current index. NOTE: the index is increased below.
cur_index = cache_index.value
one_hot_indices = jax.nn.one_hot(cur_index, length, dtype=key.dtype)
# In order to update the key, value caches with the current key and
# value, we move the length axis to the back, similar to what we did for
# the cached ones above.
# Note these are currently the key and value of a single position, since
# we feed one position at a time.
one_token_key = jnp.moveaxis(key, -3, -1)
one_token_value = jnp.moveaxis(value, -3, -1)
# Update key, value caches with our new 1d spatial slices.
# We implement an efficient scatter into the cache via one-hot
# broadcast and addition.
key = cached_key.value + one_token_key * one_hot_indices
value = cached_value.value + one_token_value * one_hot_indices
cached_key.value = key
cached_value.value = value
cache_index.value = cache_index.value + 1
# Move the keys and values back to their original shapes.
key = jnp.moveaxis(key, -1, -3)
value = jnp.moveaxis(value, -1, -3)
# Causal mask for cached decoder self-attention: our single query
# position should only attend to those key positions that have already
# been generated and cached, not the remaining zero elements.
mask = combine_masks(
mask,
jnp.broadcast_to(
jnp.arange(length) <= cur_index,
# (1, 1, length) represent (head dim, query length, key length)
# query length is 1 because during decoding we deal with one
# index.
# The same mask is applied to all batch elements and heads.
(batch, 1, 1, length),
),
)
# Grab the correct relative attention bias during decoding. This is
# only required during single step decoding.
if bias is not None:
# The bias is a full attention matrix, but during decoding we only
# have to take a slice of it.
# This is equivalent to bias[..., cur_index:cur_index+1, :].
bias = dynamic_vector_slice_in_dim(jnp.squeeze(bias, axis=0), jnp.reshape(cur_index, (-1)), 1, -2)
# Convert the boolean attention mask to an attention bias.
if mask is not None:
# attention mask in the form of attention bias
attention_bias = lax.select(
mask > 0,
jnp.full(mask.shape, 0.0).astype(self.dtype),
jnp.full(mask.shape, -1e10).astype(self.dtype),
)
else:
attention_bias = None
# Add provided bias term (e.g. relative position embedding).
if bias is not None:
attention_bias = combine_biases(attention_bias, bias)
dropout_rng = None
if not deterministic and self.dropout_rate > 0.0:
dropout_rng = self.make_rng("dropout")
# Apply attention.
x = dot_product_attention(
query,
key,
value,
bias=attention_bias,
dropout_rng=dropout_rng,
dropout_rate=self.dropout_rate,
deterministic=deterministic,
dtype=self.dtype,
float32_logits=self.float32_logits,
)
# Back to the original inputs dimensions.
out = DenseGeneral(
features=inputs_q.shape[-1], # output dim is set to the input dim.
axis=(-2, -1),
kernel_init=self.kernel_init,
kernel_axes=("heads", "kv", "embed"),
dtype=self.dtype,
name="out",
)(x)
return out
|
Applies multi-head dot product attention on the input data.
Projects the inputs into multi-headed query, key, and value vectors,
applies dot-product attention and project the results to an output vector.
There are two modes: decoding and non-decoding (e.g., training). The mode is
determined by `decode` argument. For decoding, this method is called twice,
first to initialize the cache and then for an actual decoding process. The
two calls are differentiated by the presence of 'cached_key' in the variable
dict. In the cache initialization stage, the cache variables are initialized
as zeros and will be filled in the subsequent decoding process.
In the cache initialization call, `inputs_q` has a shape [batch, length,
q_features] and `inputs_kv`: [batch, length, kv_features]. During the
incremental decoding stage, query, key and value all have the shape [batch,
1, qkv_features] corresponding to a single step.
Args:
inputs_q: input queries of shape `[batch, q_length, q_features]`.
inputs_kv: key/values of shape `[batch, kv_length, kv_features]`.
mask: attention mask of shape `[batch, num_heads, q_length, kv_length]`.
bias: attention bias of shape `[batch, num_heads, q_length, kv_length]`.
decode: Whether to prepare and use an autoregressive cache.
deterministic: Disables dropout if set to True.
Returns:
output of shape `[batch, length, q_features]`.
|
__call__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def __call__(self, inputs: Array) -> Array:
"""Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
"""
features = _canonicalize_tuple(self.features)
axis = _canonicalize_tuple(self.axis)
inputs = jnp.asarray(inputs, self.dtype)
axis = _normalize_axes(axis, inputs.ndim)
kernel_shape = tuple([inputs.shape[ax] for ax in axis]) + features
kernel_in_axis = np.arange(len(axis))
kernel_out_axis = np.arange(len(axis), len(axis) + len(features))
kernel = param_with_axes(
"kernel",
self.kernel_init,
kernel_shape,
self.params_dtype,
kernel_in_axis,
kernel_out_axis,
axes=self.kernel_axes,
)
if self.use_bias:
bias = param_with_axes(
"bias",
self.bias_init,
features,
self.params_dtype,
axes=(self.kernel_axes[-1],),
)
kernel = jnp.asarray(kernel, self.dtype)
contract_ind = tuple(range(0, len(axis)))
y = lax.dot_general(inputs, kernel, ((axis, contract_ind), ((), ())))
if self.use_bias:
bias = jnp.asarray(bias, self.dtype)
# y += jnp.reshape(bias, (1,) * (y.ndim - 1) + (-1,))
y += jnp.reshape(bias, (1,) * (len(features) - y.ndim) + bias.shape[:])
return y
|
Applies a linear transformation to the inputs along multiple dimensions.
Args:
inputs: The nd-array to be transformed.
Returns:
The transformed input.
|
__call__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def _convert_to_activation_function(fn_or_string: Union[str, Callable]) -> Callable:
"""Convert a string to an activation function."""
if fn_or_string == "linear":
return lambda x: x
elif isinstance(fn_or_string, str):
return getattr(nn, fn_or_string)
elif callable(fn_or_string):
return fn_or_string
else:
raise ValueError("don't know how to convert %s to an activation function" % (fn_or_string,))
|
Convert a string to an activation function.
|
_convert_to_activation_function
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def __call__(self, inputs: Array) -> Array:
"""Embeds the inputs along the last dimension.
Args:
inputs: input data, all dimensions are considered batch dimensions.
Returns:
Output which is embedded input data. The output shape follows the input,
with an additional `features` dimension appended.
"""
if self.cast_input_dtype:
inputs = inputs.astype(self.cast_input_dtype)
if not jnp.issubdtype(inputs.dtype, jnp.integer):
raise ValueError("Input type must be an integer or unsigned integer.")
if self.one_hot:
iota = lax.iota(jnp.int32, self.num_embeddings)
one_hot = jnp.array(inputs[..., jnp.newaxis] == iota, dtype=self.dtype)
output = jnp.dot(one_hot, jnp.asarray(self.embedding, self.dtype))
else:
output = jnp.asarray(self.embedding, self.dtype)[inputs]
output = with_sharding_constraint(output, ("batch", "length", "embed"))
return output
|
Embeds the inputs along the last dimension.
Args:
inputs: input data, all dimensions are considered batch dimensions.
Returns:
Output which is embedded input data. The output shape follows the input,
with an additional `features` dimension appended.
|
__call__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def attend(self, query: Array) -> Array:
"""Attend over the embedding using a query array.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An array with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
"""
dtype = self.attend_dtype if self.attend_dtype is not None else self.dtype
return jnp.dot(query, jnp.asarray(self.embedding, dtype).T)
|
Attend over the embedding using a query array.
Args:
query: array with last dimension equal the feature depth `features` of the
embedding.
Returns:
An array with final dim `num_embeddings` corresponding to the batched
inner-product of the array of query vectors against each embedding.
Commonly used for weight-sharing between embeddings and logit transform
in NLP models.
|
attend
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative
positions <=-max_distance map to the same bucket. This should allow for
more graceful generalization to longer sequences than the model has been
trained on.
Args:
relative_position: an int32 array
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
"""
ret = 0
n = -relative_position
if bidirectional:
num_buckets //= 2
ret += (n < 0).astype(np.int32) * num_buckets
n = np.abs(n)
else:
n = np.maximum(n, 0)
# now n is in the range [0, inf)
max_exact = num_buckets // 2
is_small = n < max_exact
val_if_large = max_exact + (
np.log(n.astype(np.float32) / max_exact + np.finfo(np.float32).eps)
/ np.log(max_distance / max_exact)
* (num_buckets - max_exact)
).astype(np.int32)
val_if_large = np.minimum(val_if_large, num_buckets - 1)
ret += np.where(is_small, n, val_if_large)
return ret
|
Translate relative position to a bucket number for relative attention.
The relative position is defined as memory_position - query_position, i.e.
the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are
invalid.
We use smaller buckets for small absolute relative_position and larger
buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative
positions <=-max_distance map to the same bucket. This should allow for
more graceful generalization to longer sequences than the model has been
trained on.
Args:
relative_position: an int32 array
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32
values in the range [0, num_buckets)
|
_relative_position_bucket
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def __call__(self, qlen, klen, bidirectional=True):
"""Produce relative position embedding attention biases.
Args:
qlen: attention query length.
klen: attention key length.
bidirectional: whether to allow positive memory-query relative position
embeddings.
Returns:
output: `(1, len, q_len, k_len)` attention bias
"""
# TODO(levskaya): should we be computing this w. numpy as a program
# constant?
context_position = np.arange(qlen, dtype=jnp.int32)[:, None]
memory_position = np.arange(klen, dtype=jnp.int32)[None, :]
relative_position = memory_position - context_position # shape (qlen, klen)
rp_bucket = self._relative_position_bucket(
relative_position,
bidirectional=bidirectional,
num_buckets=self.num_buckets,
max_distance=self.max_distance,
)
relative_attention_bias = param_with_axes(
"rel_embedding",
self.embedding_init,
(self.num_heads, self.num_buckets),
jnp.float32,
axes=("heads", "relpos_buckets"),
)
relative_attention_bias = jnp.asarray(relative_attention_bias, self.dtype)
# Instead of using a slow gather, we create a leading-dimension one-hot
# array from rp_bucket and use it to perform the gather-equivalent via a
# contraction, i.e.:
# (num_head, num_buckets) x (num_buckets one-hot, qlen, klen).
# This is equivalent to relative_attention_bias[:, rp_bucket]
bcast_iota = lax.broadcasted_iota(jnp.int32, (self.num_buckets, 1, 1), 0)
rp_bucket_one_hot = jnp.array(rp_bucket[jnp.newaxis, ...] == bcast_iota, dtype=self.dtype)
# --> shape (qlen, klen, num_heads)
values = lax.dot_general(
relative_attention_bias,
rp_bucket_one_hot,
(((1,), (0,)), ((), ())), # rhs, lhs contracting dims
) # no batched dims
# Add a singleton batch dimension.
# --> shape (1, num_heads, qlen, klen)
return values[jnp.newaxis, ...]
|
Produce relative position embedding attention biases.
Args:
qlen: attention query length.
klen: attention key length.
bidirectional: whether to allow positive memory-query relative position
embeddings.
Returns:
output: `(1, len, q_len, k_len)` attention bias
|
__call__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def __call__(self, x):
"""Applies layer normalization on the input.
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
"""
x = jnp.asarray(x, jnp.float32)
features = x.shape[-1]
mean = jnp.mean(x, axis=-1, keepdims=True)
mean2 = jnp.mean(lax.square(x), axis=-1, keepdims=True)
var = mean2 - lax.square(mean)
mul = lax.rsqrt(var + self.epsilon)
if self.use_scale:
scale = param_with_axes(
"scale",
self.scale_init,
(features,),
self.params_dtype,
axes=("embed",),
)
mul = mul * jnp.asarray(scale, self.dtype)
y = (x - mean) * mul
if self.use_bias:
bias = param_with_axes("bias", self.bias_init, (features,), self.params_dtype, axes=("embed",))
y = y + jnp.asarray(bias, self.dtype)
return jnp.asarray(y, self.dtype)
|
Applies layer normalization on the input.
Args:
x: the inputs
Returns:
Normalized inputs (the same shape as inputs).
|
__call__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def make_attention_mask(
query_input: Array,
key_input: Array,
pairwise_fn: Callable = jnp.multiply,
extra_batch_dims: int = 0,
dtype: DType = jnp.float32,
) -> Array:
"""Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch, len_q]`, `[batch, len_kv]`, the
attention weights will be `[batch, heads, len_q, len_kv]` and this
function will produce `[batch, 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
extra_batch_dims: number of extra batch dims to add singleton axes for, none
by default
dtype: mask return dtype
Returns:
A `[batch, 1, len_q, len_kv]` shaped mask for 1d attention.
"""
# [batch, len_q, len_kv]
mask = pairwise_fn(
# [batch, len_q] -> [batch, len_q, 1]
jnp.expand_dims(query_input, axis=-1),
# [batch, len_q] -> [batch, 1, len_kv]
jnp.expand_dims(key_input, axis=-2),
)
# [batch, 1, len_q, len_kv]. This creates the head dim.
mask = jnp.expand_dims(mask, axis=-3)
mask = jnp.expand_dims(mask, axis=tuple(range(extra_batch_dims)))
return mask.astype(dtype)
|
Mask-making helper for attention weights.
In case of 1d inputs (i.e., `[batch, len_q]`, `[batch, len_kv]`, the
attention weights will be `[batch, heads, len_q, len_kv]` and this
function will produce `[batch, 1, len_q, len_kv]`.
Args:
query_input: a batched, flat input of query_length size
key_input: a batched, flat input of key_length size
pairwise_fn: broadcasting elementwise comparison function
extra_batch_dims: number of extra batch dims to add singleton axes for, none
by default
dtype: mask return dtype
Returns:
A `[batch, 1, len_q, len_kv]` shaped mask for 1d attention.
|
make_attention_mask
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def make_causal_mask(x: Array, extra_batch_dims: int = 0, dtype: DType = jnp.float32) -> Array:
"""Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch, len]`, the self-attention weights
will be `[batch, heads, len, len]` and this function will produce a
causal mask of shape `[batch, 1, len, len]`.
Note that a causal mask does not depend on the values of x; it only depends on
the shape. If x has padding elements, they will not be treated in a special
manner.
Args:
x: input array of shape `[batch, len]`
extra_batch_dims: number of batch dims to add singleton axes for, none by
default
dtype: mask return dtype
Returns:
A `[batch, 1, len, len]` shaped causal mask for 1d attention.
"""
idxs = jnp.broadcast_to(jnp.arange(x.shape[-1], dtype=jnp.int32), x.shape)
return make_attention_mask(idxs, idxs, jnp.greater_equal, extra_batch_dims=extra_batch_dims, dtype=dtype)
|
Make a causal mask for self-attention.
In case of 1d inputs (i.e., `[batch, len]`, the self-attention weights
will be `[batch, heads, len, len]` and this function will produce a
causal mask of shape `[batch, 1, len, len]`.
Note that a causal mask does not depend on the values of x; it only depends on
the shape. If x has padding elements, they will not be treated in a special
manner.
Args:
x: input array of shape `[batch, len]`
extra_batch_dims: number of batch dims to add singleton axes for, none by
default
dtype: mask return dtype
Returns:
A `[batch, 1, len, len]` shaped causal mask for 1d attention.
|
make_causal_mask
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def combine_masks(*masks: Optional[Array], dtype: DType = jnp.float32):
"""Combine attention masks.
Args:
*masks: set of attention mask arguments to combine, some can be None.
dtype: final mask dtype
Returns:
Combined mask, reduced by logical and, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(
(x.ndim == masks[0].ndim for x in masks)
), f"masks must have same rank: {tuple((x.ndim for x in masks))}"
mask, *other_masks = masks
for other_mask in other_masks:
mask = jnp.logical_and(mask, other_mask)
return mask.astype(dtype)
|
Combine attention masks.
Args:
*masks: set of attention mask arguments to combine, some can be None.
dtype: final mask dtype
Returns:
Combined mask, reduced by logical and, returns None if no masks given.
|
combine_masks
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def combine_biases(*masks: Optional[Array]):
"""Combine attention biases.
Args:
*masks: set of attention bias arguments to combine, some can be None.
Returns:
Combined mask, reduced by summation, returns None if no masks given.
"""
masks = [m for m in masks if m is not None]
if not masks:
return None
assert all(
(x.ndim == masks[0].ndim for x in masks)
), f"masks must have same rank: {tuple((x.ndim for x in masks))}"
mask, *other_masks = masks
for other_mask in other_masks:
mask = mask + other_mask
return mask
|
Combine attention biases.
Args:
*masks: set of attention bias arguments to combine, some can be None.
Returns:
Combined mask, reduced by summation, returns None if no masks given.
|
combine_biases
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def make_decoder_mask(
decoder_target_tokens: Array,
dtype: DType,
decoder_causal_attention: Optional[Array] = None,
decoder_segment_ids: Optional[Array] = None,
) -> Array:
"""Compute the self-attention mask for a decoder.
Decoder mask is formed by combining a causal mask, a padding mask and an
optional packing mask. If decoder_causal_attention is passed, it makes the
masking non-causal for positions that have value of 1.
A prefix LM is applied to a dataset which has a notion of "inputs" and
"targets", e.g., a machine translation task. The inputs and targets are
concatenated to form a new target. `decoder_target_tokens` is the concatenated
decoder output tokens.
The "inputs" portion of the concatenated sequence can attend to other "inputs"
tokens even for those at a later time steps. In order to control this
behavior, `decoder_causal_attention` is necessary. This is a binary mask with
a value of 1 indicating that the position belonged to "inputs" portion of the
original dataset.
Example:
Suppose we have a dataset with two examples.
ds = [{"inputs": [6, 7], "targets": [8]},
{"inputs": [3, 4], "targets": [5]}]
After the data preprocessing with packing, the two examples are packed into
one example with the following three fields (some fields are skipped for
simplicity).
decoder_target_tokens = [[6, 7, 8, 3, 4, 5, 0]]
decoder_segment_ids = [[1, 1, 1, 2, 2, 2, 0]]
decoder_causal_attention = [[1, 1, 0, 1, 1, 0, 0]]
where each array has [batch, length] shape with batch size being 1. Then,
this function computes the following mask.
mask = [[[[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]]]
mask[b, 1, :, :] represents the mask for the example `b` in the batch.
Because mask is for a self-attention layer, the mask's shape is a square of
shape [query length, key length].
mask[b, 1, i, j] = 1 means that the query token at position i can attend to
the key token at position j.
Args:
decoder_target_tokens: decoder output tokens. [batch, length]
dtype: dtype of the output mask.
decoder_causal_attention: a binary mask indicating which position should
only attend to earlier positions in the sequence. Others will attend
bidirectionally. [batch, length]
decoder_segment_ids: decoder segmentation info for packed examples. [batch,
length]
Returns:
the combined decoder mask.
"""
masks = []
# The same mask is applied to all attention heads. So the head dimension is 1,
# i.e., the mask will be broadcast along the heads dim.
# [batch, 1, length, length]
causal_mask = make_causal_mask(decoder_target_tokens, dtype=dtype)
# Positions with value 1 in `decoder_causal_attneition` can attend
# bidirectionally.
if decoder_causal_attention is not None:
# [batch, 1, length, length]
inputs_mask = make_attention_mask(
decoder_causal_attention,
decoder_causal_attention,
jnp.logical_and,
dtype=dtype,
)
masks.append(jnp.logical_or(causal_mask, inputs_mask).astype(dtype))
else:
masks.append(causal_mask)
# Padding mask.
masks.append(make_attention_mask(decoder_target_tokens > 0, decoder_target_tokens > 0, dtype=dtype))
# Packing mask
if decoder_segment_ids is not None:
masks.append(make_attention_mask(decoder_segment_ids, decoder_segment_ids, jnp.equal, dtype=dtype))
return combine_masks(*masks, dtype=dtype)
|
Compute the self-attention mask for a decoder.
Decoder mask is formed by combining a causal mask, a padding mask and an
optional packing mask. If decoder_causal_attention is passed, it makes the
masking non-causal for positions that have value of 1.
A prefix LM is applied to a dataset which has a notion of "inputs" and
"targets", e.g., a machine translation task. The inputs and targets are
concatenated to form a new target. `decoder_target_tokens` is the concatenated
decoder output tokens.
The "inputs" portion of the concatenated sequence can attend to other "inputs"
tokens even for those at a later time steps. In order to control this
behavior, `decoder_causal_attention` is necessary. This is a binary mask with
a value of 1 indicating that the position belonged to "inputs" portion of the
original dataset.
Example:
Suppose we have a dataset with two examples.
ds = [{"inputs": [6, 7], "targets": [8]},
{"inputs": [3, 4], "targets": [5]}]
After the data preprocessing with packing, the two examples are packed into
one example with the following three fields (some fields are skipped for
simplicity).
decoder_target_tokens = [[6, 7, 8, 3, 4, 5, 0]]
decoder_segment_ids = [[1, 1, 1, 2, 2, 2, 0]]
decoder_causal_attention = [[1, 1, 0, 1, 1, 0, 0]]
where each array has [batch, length] shape with batch size being 1. Then,
this function computes the following mask.
mask = [[[[1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0]]]]
mask[b, 1, :, :] represents the mask for the example `b` in the batch.
Because mask is for a self-attention layer, the mask's shape is a square of
shape [query length, key length].
mask[b, 1, i, j] = 1 means that the query token at position i can attend to
the key token at position j.
Args:
decoder_target_tokens: decoder output tokens. [batch, length]
dtype: dtype of the output mask.
decoder_causal_attention: a binary mask indicating which position should
only attend to earlier positions in the sequence. Others will attend
bidirectionally. [batch, length]
decoder_segment_ids: decoder segmentation info for packed examples. [batch,
length]
Returns:
the combined decoder mask.
|
make_decoder_mask
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def canonicalize_padding(padding: PaddingLike, rank: int) -> LaxPadding:
""" "Canonicalizes conv padding to a jax.lax supported format."""
if isinstance(padding, str):
return padding
if isinstance(padding, int):
return [(padding, padding)] * rank
if isinstance(padding, Sequence) and len(padding) == rank:
new_pad = []
for p in padding:
if isinstance(p, int):
new_pad.append((p, p))
elif isinstance(p, tuple) and len(p) == 2:
new_pad.append(p)
else:
break
if len(new_pad) == rank:
return new_pad
raise ValueError(
f"Invalid padding format: {padding}, should be str, int,"
f" or a sequence of len {rank} where each element is an"
" int or pair of ints."
)
|
"Canonicalizes conv padding to a jax.lax supported format.
|
canonicalize_padding
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def _conv_dimension_numbers(input_shape):
"""Computes the dimension numbers based on the input shape."""
ndim = len(input_shape)
lhs_spec = (0, ndim - 1) + tuple(range(1, ndim - 1))
rhs_spec = (ndim - 1, ndim - 2) + tuple(range(0, ndim - 2))
out_spec = lhs_spec
return lax.ConvDimensionNumbers(lhs_spec, rhs_spec, out_spec)
|
Computes the dimension numbers based on the input shape.
|
_conv_dimension_numbers
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def __call__(self, inputs: Array) -> Array:
"""Applies a (potentially unshared) convolution to the inputs.
Args:
inputs: input data with dimensions (*batch_dims, spatial_dims...,
features). This is the channels-last convention, i.e. NHWC for a 2d
convolution and NDHWC for a 3D convolution. Note: this is different from
the input convention used by `lax.conv_general_dilated`, which puts the
spatial dimensions last.
Note: If the input has more than 1 batch dimension, all batch dimensions
are flattened into a single dimension for the convolution and restored
before returning. In some cases directly vmap'ing the layer may yield
better performance than this default flattening approach. If the input
lacks a batch dimension it will be added for the convolution and removed
n return, an allowance made to enable writing single-example code.
Returns:
The convolved data.
"""
if isinstance(self.kernel_size, int):
raise TypeError(
"Expected Conv kernel_size to be a"
" tuple/list of integers (eg.: [3, 3]) but got"
f" {self.kernel_size}."
)
else:
kernel_size = tuple(self.kernel_size)
def maybe_broadcast(x: Optional[Union[int, Sequence[int]]]) -> Tuple[int, ...]:
if x is None:
# backward compatibility with using None as sentinel for
# broadcast 1
x = 1
if isinstance(x, int):
return (x,) * len(kernel_size)
return tuple(x)
# Combine all input batch dimensions into a single leading batch axis.
num_batch_dimensions = inputs.ndim - (len(kernel_size) + 1)
if num_batch_dimensions != 1:
input_batch_shape = inputs.shape[:num_batch_dimensions]
total_batch_size = int(np.prod(input_batch_shape))
flat_input_shape = (total_batch_size,) + inputs.shape[num_batch_dimensions:]
inputs = jnp.reshape(inputs, flat_input_shape)
# self.strides or (1,) * (inputs.ndim - 2)
strides = maybe_broadcast(self.strides)
input_dilation = maybe_broadcast(self.input_dilation)
kernel_dilation = maybe_broadcast(self.kernel_dilation)
padding_lax = canonicalize_padding(self.padding, len(kernel_size))
if padding_lax == "CIRCULAR":
kernel_size_dilated = [(k - 1) * d + 1 for k, d in zip(kernel_size, kernel_dilation)]
zero_pad: List[Tuple[int, int]] = [(0, 0)]
pads = zero_pad + [((k - 1) // 2, k // 2) for k in kernel_size_dilated] + [(0, 0)]
inputs = jnp.pad(inputs, pads, mode="wrap")
padding_lax = "VALID"
elif padding_lax == "CAUSAL":
if len(kernel_size) != 1:
raise ValueError("Causal padding is only implemented for 1D convolutions.")
left_pad = kernel_dilation[0] * (kernel_size[0] - 1)
pads = [(0, 0), (left_pad, 0), (0, 0)]
inputs = jnp.pad(inputs, pads)
padding_lax = "VALID"
dimension_numbers = _conv_dimension_numbers(inputs.shape)
in_features = jnp.shape(inputs)[-1]
if self.shared_weights:
# One shared convolutional kernel for all pixels in the output.
assert in_features % self.feature_group_count == 0
kernel_shape = kernel_size + (
in_features // self.feature_group_count,
self.features,
)
else:
if self.feature_group_count != 1:
raise NotImplementedError(
"`lax.conv_general_dilated_local` does not support "
f"`feature_group_count != 1`, got `{self.feature_group_count}`."
)
# Need to know the spatial output shape of a standard convolution to
# create the unshared convolution kernel.
conv_output_shape = jax.eval_shape(
lambda lhs, rhs: self.conv_general_dilated( # pylint: disable=g-long-lambda
lhs=lhs,
rhs=rhs,
window_strides=strides,
padding=padding_lax,
dimension_numbers=dimension_numbers,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
),
inputs,
jax.ShapedArray(kernel_size + (in_features, self.features), inputs.dtype),
).shape
# One (unshared) convolutional kernel per each pixel in the output.
kernel_shape = conv_output_shape[1:-1] + (
np.prod(kernel_size) * in_features,
self.features,
)
if self.mask is not None and self.mask.shape != kernel_shape:
raise ValueError(
"Mask needs to have the same shape as weights. " f"Shapes are: {self.mask.shape}, {kernel_shape}"
)
kernel = param_with_axes(
"kernel",
self.kernel_init,
kernel_shape,
self.params_dtype,
axes=self.kernel_axes,
)
if self.mask is not None:
kernel *= self.mask
if self.use_bias:
if self.shared_weights:
# One bias weight per output channel, shared between pixels.
bias_shape = (self.features,)
else:
# One bias weight per output entry, unshared betwen pixels.
bias_shape = conv_output_shape[1:]
bias = param_with_axes(
"bias",
self.bias_init,
bias_shape,
self.params_dtype,
axes=(self.kernel_axes[-1],),
)
else:
bias = None
inputs, kernel, bias = promote_dtype(inputs, kernel, bias, dtype=self.dtype)
if self.shared_weights:
y = self.conv_general_dilated(
inputs,
kernel,
strides,
padding_lax,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
dimension_numbers=dimension_numbers,
feature_group_count=self.feature_group_count,
precision=self.precision,
)
else:
y = lax.conv_general_dilated_local(
lhs=inputs,
rhs=kernel,
window_strides=strides,
padding=padding_lax,
filter_shape=kernel_size,
lhs_dilation=input_dilation,
rhs_dilation=kernel_dilation,
dimension_numbers=dimension_numbers,
precision=self.precision,
)
if self.use_bias:
bias = bias.reshape((1,) * (y.ndim - bias.ndim) + bias.shape)
y += bias
if num_batch_dimensions != 1:
output_shape = input_batch_shape + y.shape[1:]
y = jnp.reshape(y, output_shape)
return y
|
Applies a (potentially unshared) convolution to the inputs.
Args:
inputs: input data with dimensions (*batch_dims, spatial_dims...,
features). This is the channels-last convention, i.e. NHWC for a 2d
convolution and NDHWC for a 3D convolution. Note: this is different from
the input convention used by `lax.conv_general_dilated`, which puts the
spatial dimensions last.
Note: If the input has more than 1 batch dimension, all batch dimensions
are flattened into a single dimension for the convolution and restored
before returning. In some cases directly vmap'ing the layer may yield
better performance than this default flattening approach. If the input
lacks a batch dimension it will be added for the convolution and removed
n return, an allowance made to enable writing single-example code.
Returns:
The convolved data.
|
__call__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/layers.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/layers.py
|
MIT
|
def convert_unroll_to_scan(self, params: Union[Dict, FrozenDict]):
r"""
Convert a `PyTree` of unrolled model parameters to a scanned block of model parameters. This method can be used
to explicitly convert the model parameters to scanned format. This returns a new `params` tree and does not
convert the `params` in place.
To illustrate the workings of this method, take the Flax BERT model. The unrolled structure for the query
projection params is as follows:
('bert', 'encoder', 'layer', '0', 'self_attn', 'q_proj') ('bert', 'encoder', 'layer', '1', 'self_attn',
'q_proj') ... ('bert', 'encoder', 'layer', '23', 'self_attn', 'q_proj')
This method takes each of the `q_proj` matrices for layers (0, ..., 23) and stacks them into a single 'super'
matrix, giving a *single* block of weights for all 24 layers compatible with the scanned model:
('bert', 'encoder', 'layer', 'ScanLayers', 'self_attn', 'q_proj')
When enabling scan with _do_init=True (default), this method will be called automatically under the hood. With
_do_init=False, it will have to be called explicitly (see example below).
Arguments:
params (`Union[Dict, FrozenDict]`):
A `PyTree` of model parameters.
Examples:
```python
>>> from distil_whisper import FlaxWhisperForConditionalGeneration
>>> # Download model and configuration from huggingface.co
>>> model, params = FlaxWhisperModel.from_pretrained("openai/whisper-tiny.en", _do_init=False)
>>> # By default, the model params will be in unrolled format. To illustrate the use of this method,
>>> # we'll first convert to scan format and then back to unrolled
>>> model.enable_scan()
>>> params = model.convert_unroll_to_scan(params)
>>> # now convert back to unrolled
>>> model.disable_scan()
>>> params = model.convert_scan_to_unroll(params)
```"""
if isinstance(params, FrozenDict):
params = unfreeze(params)
params = flatten_dict(params, sep="/")
keys = list(params.keys())
for k in keys:
# Identify all "unrolled" layers formed as part of the FlaxBertLayerCollection
# These params contain the identifier `layer` in their key
if "layers/0" in k:
if "decoder" in k:
block_prefix = "Decoder"
num_hidden_layers = self.config.decoder_layers
else:
block_prefix = "Encoder"
num_hidden_layers = self.config.encoder_layers
# Squash the keys for the N unrolled layers into one single key:
# (layer/0, ..., layer/N) -> layer/FlaxScanLayers
scan_key = k.replace("0", f"Flax{block_prefix}ScanLayers")
stacked_params = []
# Iterate over the unrolled layers (1,...,N)
for i in range(num_hidden_layers):
# Stack the params for the N layers into one super block
# and remove the unrolled layer params on the fly
# -> no memory overhead for conversion!
unrolled_layer = params.pop(k.replace("0", str(i)))
stacked_params.append(unrolled_layer)
params[scan_key] = jnp.stack(stacked_params)
# Finally, unflatten the dict to restore the nested pytree structure
params = unflatten_dict(params, sep="/")
return params
|
Convert a `PyTree` of unrolled model parameters to a scanned block of model parameters. This method can be used
to explicitly convert the model parameters to scanned format. This returns a new `params` tree and does not
convert the `params` in place.
To illustrate the workings of this method, take the Flax BERT model. The unrolled structure for the query
projection params is as follows:
('bert', 'encoder', 'layer', '0', 'self_attn', 'q_proj') ('bert', 'encoder', 'layer', '1', 'self_attn',
'q_proj') ... ('bert', 'encoder', 'layer', '23', 'self_attn', 'q_proj')
This method takes each of the `q_proj` matrices for layers (0, ..., 23) and stacks them into a single 'super'
matrix, giving a *single* block of weights for all 24 layers compatible with the scanned model:
('bert', 'encoder', 'layer', 'ScanLayers', 'self_attn', 'q_proj')
When enabling scan with _do_init=True (default), this method will be called automatically under the hood. With
_do_init=False, it will have to be called explicitly (see example below).
Arguments:
params (`Union[Dict, FrozenDict]`):
A `PyTree` of model parameters.
Examples:
```python
>>> from distil_whisper import FlaxWhisperForConditionalGeneration
>>> # Download model and configuration from huggingface.co
>>> model, params = FlaxWhisperModel.from_pretrained("openai/whisper-tiny.en", _do_init=False)
>>> # By default, the model params will be in unrolled format. To illustrate the use of this method,
>>> # we'll first convert to scan format and then back to unrolled
>>> model.enable_scan()
>>> params = model.convert_unroll_to_scan(params)
>>> # now convert back to unrolled
>>> model.disable_scan()
>>> params = model.convert_scan_to_unroll(params)
```
|
convert_unroll_to_scan
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/modeling_flax_whisper.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/modeling_flax_whisper.py
|
MIT
|
def convert_scan_to_unroll(self, params: Union[Dict, FrozenDict]):
r"""
Convert a `PyTree` of scanned model parameters to an unrolled stack of model parameters. This method can be
used to explicitly convert the model parameters to unrolled format. This returns a new `params` tree and does
not convert the `params` in place.
To illustrate the workings of this method, take the Flax BERT model. The scanned structure for the query
projection (`q_proj`) params is a single, stacked matrix of parameters over all N layers:
('bert', 'encoder', 'layer', 'FlaxScanLayers', 'self_attn', 'q_proj')
This method slices each layer of the `q_proj` scanned matrix into single, standalone layers, and replaces the
scanned matrix of parameteres on the fly:
('bert', 'encoder', 'layer', '0', 'self_attn', 'q_proj') ('bert', 'encoder', 'layer', '1', 'self_attn',
'q_proj') ... ('bert', 'encoder', 'layer', 'N', 'self_attn', 'q_proj')
When enabling scan with _do_init=True (default), this method will be called automatically under the hood. With
_do_init=False, it will have to be called explicitly (see example below).
Arguments:
params (`Union[Dict, FrozenDict]`):
A `PyTree` of model parameters.
Examples:
```python
>>> from distil_whisper import FlaxWhisperForConditionalGeneration
>>> # Download model and configuration from huggingface.co
>>> model, params = FlaxWhisperModel.from_pretrained("openai/whisper-tiny.en", _do_init=False)
>>> # By default, the model params will be in unrolled format. To illustrate the use of this method,
>>> # we'll first convert to scan format and then back to unrolled
>>> model.enable_scan()
>>> params = model.convert_unroll_to_scan(params)
>>> # now convert back to unrolled
>>> model.disable_scan()
>>> params = model.convert_scan_to_unroll(params)
```"""
if isinstance(params, FrozenDict):
params = unfreeze(params)
params = flatten_dict(params, sep="/")
keys = list(params.keys())
for k in keys:
# Identify all "scan" layers formed as part of the FlaxBertLayerCollection
# These params contain the identifier `FlaxScanLayers` in their key
if "FlaxEncoderScanLayers" in k:
# Remove the scan layer from the PyTree of params
scan_layer = params.pop(k)
# Unroll the key for the stacked scan matrix into N separate keys, indexed by layer number
# layer/FlaxScanLayers -> (layer/0, ..., layer/N)
for i in range(self.config.encoder_layers):
# Unstack the params for the i-th scan layer to unrolled
# and remove corresponding scan params on the fly
# -> no memory overhead for conversion!
unrolled_key = k.replace("FlaxEncoderScanLayers", str(i))
params[unrolled_key], scan_layer = scan_layer[0], scan_layer[1:]
elif "FlaxDecoderScanLayers" in k:
# Remove the scan layer from the PyTree of params
scan_layer = params.pop(k)
# Unroll the key for the stacked scan matrix into N separate keys, indexed by layer number
# layer/FlaxScanLayers -> (layer/0, ..., layer/N)
for i in range(self.config.decoder_layers):
# Unstack the params for the i-th scan layer to unrolled
# and remove corresponding scan params on the fly
# -> no memory overhead for conversion!
unrolled_key = k.replace("FlaxDecoderScanLayers", str(i))
params[unrolled_key], scan_layer = scan_layer[0], scan_layer[1:]
params = unflatten_dict(params, sep="/")
return params
|
Convert a `PyTree` of scanned model parameters to an unrolled stack of model parameters. This method can be
used to explicitly convert the model parameters to unrolled format. This returns a new `params` tree and does
not convert the `params` in place.
To illustrate the workings of this method, take the Flax BERT model. The scanned structure for the query
projection (`q_proj`) params is a single, stacked matrix of parameters over all N layers:
('bert', 'encoder', 'layer', 'FlaxScanLayers', 'self_attn', 'q_proj')
This method slices each layer of the `q_proj` scanned matrix into single, standalone layers, and replaces the
scanned matrix of parameteres on the fly:
('bert', 'encoder', 'layer', '0', 'self_attn', 'q_proj') ('bert', 'encoder', 'layer', '1', 'self_attn',
'q_proj') ... ('bert', 'encoder', 'layer', 'N', 'self_attn', 'q_proj')
When enabling scan with _do_init=True (default), this method will be called automatically under the hood. With
_do_init=False, it will have to be called explicitly (see example below).
Arguments:
params (`Union[Dict, FrozenDict]`):
A `PyTree` of model parameters.
Examples:
```python
>>> from distil_whisper import FlaxWhisperForConditionalGeneration
>>> # Download model and configuration from huggingface.co
>>> model, params = FlaxWhisperModel.from_pretrained("openai/whisper-tiny.en", _do_init=False)
>>> # By default, the model params will be in unrolled format. To illustrate the use of this method,
>>> # we'll first convert to scan format and then back to unrolled
>>> model.enable_scan()
>>> params = model.convert_unroll_to_scan(params)
>>> # now convert back to unrolled
>>> model.disable_scan()
>>> params = model.convert_scan_to_unroll(params)
```
|
convert_scan_to_unroll
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/modeling_flax_whisper.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/modeling_flax_whisper.py
|
MIT
|
def init_cache(self, batch_size, max_length, encoder_outputs):
r"""
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
"""
# init input variables to retrieve cache
decoder_input_ids = jnp.ones((batch_size, max_length), dtype="i4")
decoder_attention_mask = jnp.ones_like(decoder_input_ids)
decoder_position_ids = jnp.broadcast_to(
jnp.arange(jnp.atleast_2d(decoder_input_ids).shape[-1]),
decoder_input_ids.shape,
)
def _decoder_forward(
module,
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
):
decoder_module = module._get_decoder_module()
return decoder_module(
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
)
init_variables = self.module.init(
jax.random.PRNGKey(0),
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
decoder_position_ids=decoder_position_ids,
encoder_hidden_states=encoder_outputs[0],
init_cache=True,
method=_decoder_forward, # we only need to call the decoder to init the cache
)
return unfreeze(init_variables["cache"])
|
Args:
batch_size (`int`):
batch_size used for fast auto-regressive decoding. Defines the batch size of the initialized cache.
max_length (`int`):
maximum possible length for auto-regressive decoding. Defines the sequence length of the initialized
cache.
encoder_outputs (`Union[FlaxBaseModelOutput, tuple(tuple(jnp.ndarray)]`):
`encoder_outputs` consists of (`last_hidden_state`, *optional*: `hidden_states`, *optional*:
`attentions`). `last_hidden_state` of shape `(batch_size, sequence_length, hidden_size)`, *optional*)
is a sequence of hidden-states at the output of the last layer of the encoder. Used in the
cross-attention of the decoder.
|
init_cache
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/modeling_flax_whisper.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/modeling_flax_whisper.py
|
MIT
|
def encode(
self,
input_features: jnp.ndarray,
attention_mask: Optional[jnp.ndarray] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
**kwargs,
):
r"""
Returns:
Example:
```python
>>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np")
>>> input_features = inputs.input_features
>>> encoder_outputs = model.encode(input_features=input_features)
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
def _encoder_forward(module, input_features, **kwargs):
encode_module = module._get_encoder_module()
return encode_module(input_features, **kwargs)
return self.module.apply(
{"params": params or self.params},
input_features=jnp.array(input_features, dtype="f4"),
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
method=_encoder_forward,
)
|
Returns:
Example:
```python
>>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np")
>>> input_features = inputs.input_features
>>> encoder_outputs = model.encode(input_features=input_features)
```
|
encode
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/modeling_flax_whisper.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/modeling_flax_whisper.py
|
MIT
|
def decode(
self,
decoder_input_ids,
encoder_outputs,
encoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_attention_mask: Optional[jnp.ndarray] = None,
decoder_position_ids: Optional[jnp.ndarray] = None,
past_key_values: dict = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
train: bool = False,
params: dict = None,
dropout_rng: PRNGKey = None,
):
r"""
Returns:
Example:
```python
>>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np")
>>> input_features = inputs.input_features
>>> encoder_outputs = model.encode(input_features=input_features)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```"""
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
output_hidden_states = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
return_dict = return_dict if return_dict is not None else self.config.return_dict
encoder_hidden_states = encoder_outputs[0]
batch_size, sequence_length = decoder_input_ids.shape
if decoder_position_ids is None:
if past_key_values is not None:
raise ValueError("Make sure to provide `decoder_position_ids` when passing `past_key_values`.")
if decoder_attention_mask is not None:
decoder_position_ids = (decoder_attention_mask.cumsum(-1) * decoder_attention_mask) - 1
else:
decoder_position_ids = jnp.broadcast_to(
jnp.arange(sequence_length)[None, :], (batch_size, sequence_length)
)
if decoder_attention_mask is None:
decoder_attention_mask = jnp.ones((batch_size, sequence_length))
# Handle any PRNG if needed
rngs = {}
if dropout_rng is not None:
rngs["dropout"] = dropout_rng
inputs = {"params": params or self.params}
# if past_key_values are passed then cache is already initialized a private flag init_cache has to be
# passed down to ensure cache is used. It has to be made sure that cache is marked as mutable so that
# it can be changed by FlaxWhisperAttention module
if past_key_values:
inputs["cache"] = past_key_values
mutable = ["cache"]
else:
mutable = False
def _decoder_forward(
module,
decoder_input_ids,
decoder_attention_mask,
decoder_position_ids,
**kwargs,
):
decoder_module = module._get_decoder_module()
return decoder_module(
input_ids=decoder_input_ids,
attention_mask=decoder_attention_mask,
position_ids=decoder_position_ids,
**kwargs,
)
outputs = self.module.apply(
inputs,
decoder_input_ids=jnp.array(decoder_input_ids, dtype="i4"),
decoder_attention_mask=jnp.array(decoder_attention_mask, dtype="i4"),
decoder_position_ids=jnp.array(decoder_position_ids, dtype="i4"),
encoder_hidden_states=encoder_hidden_states,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
deterministic=not train,
rngs=rngs,
mutable=mutable,
method=_decoder_forward,
)
# add updated cache to model output
if past_key_values is not None and return_dict:
outputs, past = outputs
outputs["past_key_values"] = unfreeze(past["cache"])
return outputs
elif past_key_values is not None and not return_dict:
outputs, past = outputs
outputs = outputs[:1] + (unfreeze(past["cache"]),) + outputs[1:]
return outputs
|
Returns:
Example:
```python
>>> from transformers import WhisperProcessor, FlaxWhisperForConditionalGeneration
>>> from datasets import load_dataset
>>> processor = WhisperProcessor.from_pretrained("openai/whisper-tiny.en")
>>> model = FlaxWhisperForConditionalGeneration.from_pretrained("openai/whisper-tiny.en", from_pt=True)
>>> ds = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation")
>>> inputs = processor(ds[0]["audio"]["array"], return_tensors="np")
>>> input_features = inputs.input_features
>>> encoder_outputs = model.encode(input_features=input_features)
>>> decoder_start_token_id = model.config.decoder_start_token_id
>>> decoder_input_ids = jnp.ones((inputs.input_ids.shape[0], 1), dtype="i4") * decoder_start_token_id
>>> outputs = model.decode(decoder_input_ids, encoder_outputs)
>>> last_decoder_hidden_states = outputs.last_hidden_state
```
|
decode
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/modeling_flax_whisper.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/modeling_flax_whisper.py
|
MIT
|
def pjit_with_cpu_fallback(
fun: Callable, # pylint: disable=g-bare-generic
in_axis_resources,
out_axis_resources,
static_argnums: Union[int, Sequence[int]] = (),
donate_argnums: Union[int, Sequence[int]] = (),
backend: Optional[str] = None,
):
"""Wrapper for pjit that calls normal jit on cpu."""
if jax.devices(backend)[0].platform == "cpu":
return jax.jit(fun, static_argnums=static_argnums, donate_argnums=donate_argnums)
else:
return jax_pjit(
fun,
in_axis_resources,
out_axis_resources,
static_argnums=static_argnums,
donate_argnums=donate_argnums,
)
|
Wrapper for pjit that calls normal jit on cpu.
|
pjit_with_cpu_fallback
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def with_sharding_constraint(x, axis_resources):
"""Wrapper for pjit with_sharding_constraint, no-op on cpu or outside pjit."""
if jax.devices()[0].platform == "cpu" or not global_mesh_defined():
return x
else:
return jax.experimental.pjit.with_sharding_constraint(x, axis_resources)
|
Wrapper for pjit with_sharding_constraint, no-op on cpu or outside pjit.
|
with_sharding_constraint
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def bounds_from_last_device(last_device: JaxDevice) -> HardwareMesh:
"""Get the bound from the given last device."""
# Must be passed the device at the highest-coordinate corner of the
# relevant mesh, which is a requirement we know is satisfied by the last
# device in jax.devices().
if hasattr(last_device, "coords"):
x, y, z = last_device.coords
return x + 1, y + 1, z + 1, last_device.core_on_chip + 1
else:
# On non-TPU platforms, the "mesh" is hosts x devices per host in order
# to take advantage of faster within-host interconnect.
return jax.host_count(), jax.local_device_count()
|
Get the bound from the given last device.
|
bounds_from_last_device
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_coords(device: JaxDevice) -> HardwareMesh:
"""Returns the coordinates of the given device."""
if hasattr(device, "coords"):
return (*device.coords, device.core_on_chip)
return (device.process_index, device.id % jax.local_device_count())
|
Returns the coordinates of the given device.
|
get_coords
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def global_mesh_defined():
"""Checks if global xmap/pjit mesh resource environment is defined."""
maps_env = jax.experimental.maps.thread_resources.env
return maps_env.physical_mesh.devices.shape != () # pylint: disable=g-explicit-bool-comparison
|
Checks if global xmap/pjit mesh resource environment is defined.
|
global_mesh_defined
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_mesh(
model_parallel_submesh: HardwareMesh,
input_devices: Sequence[JaxDevice] = (),
input_local_devices: Sequence[JaxDevice] = (),
tile_by_host_if_needed: bool = True,
backend: Optional[str] = None,
) -> Mesh:
"""Construct an xmap/pjit Mesh for the given model-parallel submesh.
The resulting mesh has two resource axes: 'model', with the provided submesh
shape, and 'data', which covers the rest of the mesh.
Args:
model_parallel_submesh: a HardwareMesh spec, namely (x,y,z,core) on TPU for
a single model-parallel replica's "tile" in the physical device mesh. The
first three elements (`x`, `y`, and `z`) should be factors of the pod
slice; e.g., if you are using df_4x8, then `x` should be a factor of 4
(one of 1, 2, 4), `y` should be a factor of 8 (one of 1, 2, 4, 8), and `z`
must be 1, because TPU v3 slices are only 2D. `z` can be >1 for TPU v4
(and maybe later TPUs) that allow 3D slices. `core` is the number of cores
to use from each TPU node. As communication is usually fastest inside the
same node, if you need a tile of more than 1 core, then
you should first increase `core`: e.g., for TPU v3, (1,1,1,2) is better
than (2,1,1,1). To pick a good spec, try a few possible values until you
get high TPU utilization.
input_devices: the devices to use, will use jax.devices() if this is not
set.
input_local_devices: the local devices to use, will use jax.local_devices()
if this is not set.
tile_by_host_if_needed: JAX currently requires that the parts of any sharded
array that are located on one host's local devices form a single
contiguous slice. A best effort will be made to achieve this without
"tiling" the device assignment over hosts (which can reduce XLA collective
performance). If this flag is True, then the device assignment will be
tiled over hosts if necessary to satisfy this constraint and create a
buildable mesh; if false, mesh construction will fail instead.
backend: get devices from the pinned backend, if specified. This is
useful for explicitly specifying the devices other than relying on
jax_platform_name.
Returns:
A xmap / pjit Mesh containing the virtual device mesh with data, model axes.
"""
input_devices = input_devices or jax.devices(backend)
input_local_devices = input_local_devices or jax.local_devices(0, backend)
# Sort input_devices based on coords, as backends might not return devices
# in order.
last_device = sorted(input_devices, key=get_coords)[-1]
last_input_local_devices = sorted(input_local_devices, key=get_coords)[-1]
logging.info(
"last device coords : %r\nlast local device coords: %r",
get_coords(last_device),
get_coords(last_input_local_devices),
)
global_hardware_mesh = bounds_from_last_device(last_device)
mesh_ndim = len(global_hardware_mesh)
local_hardware_mesh = bounds_from_last_device(last_input_local_devices)
mesh_err = (
f"each dimension of the model parallel submesh {model_parallel_submesh} "
"must be a factor of the corresponding dimension of the global device "
f"mesh {global_hardware_mesh}"
)
assert not any(g % m for g, m in zip(global_hardware_mesh, model_parallel_submesh)), mesh_err
assert not any(g % l for g, l in zip(global_hardware_mesh, local_hardware_mesh))
devices = np.empty(global_hardware_mesh, dtype=object)
for device in input_devices:
device_coords = get_coords(device)
devices[device_coords] = device
tile_by_host = tile_by_host_if_needed
if len(global_hardware_mesh) == 4:
# enable contiguous local chunks without host tiling by making Z major
global_hardware_mesh = typing.cast(Tuple[int, int, int, int], global_hardware_mesh)
model_parallel_submesh = typing.cast(Tuple[int, int, int, int], model_parallel_submesh)
gx, gy, gz, gc = global_hardware_mesh
mx, my, mz, mc = model_parallel_submesh
if (mx == gx > 1 and my == mz == 1) or (mx == 1 and my == gy > 1 and mz == gz > 1):
logging.info("ensuring YZ plane has a Z-major device order")
# YZ should be ZY
assert mc == gc, (mc, gc)
global_hardware_mesh = gx, gz, gy, gc
model_parallel_submesh = mx, mz, my, mc
devices = devices.swapaxes(1, 2)
tile_by_host = False
if (my == gy > 1 and mx == mz == 1) or (my == 1 and mx == gx > 1 and mz == gz > 1):
logging.info("ensuring XZ plane has a Z-major device order")
# XZ should be ZX
assert mc == gc, (mc, gc)
global_hardware_mesh = gz, gy, gx, gc
model_parallel_submesh = mz, my, mx, mc
devices = devices.swapaxes(0, 2)
tile_by_host = False
if tile_by_host:
logging.warning(
"Tiling device assignment mesh by hosts, which may lead to "
"reduced XLA collective performance. To avoid this, modify "
"the model parallel submesh or run with more tasks per host."
)
tile_err = (
"to tile the mesh by hosts, each dimension of the model parallel "
"submesh must be either a factor or a multiple of the corresponding "
"dimension of the per-host submesh"
)
def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:
"""Split a global mesh dimension into four tiling components.
Args:
g: global mesh bounds dimension size
m: model-parallel submesh bounds dimension size
l: local submesh bounds dimension size
Returns:
The resulting tuple divides the dimension into the hosts component of
the data-parallel submesh, the devices component of the data-parallel
submesh, the hosts component of the model-parallel submesh, and the
devices component of the model-parallel submesh.
"""
d = g // m
if m >= l:
assert not m % l, tile_err
return (d, 1, m // l, l)
else:
assert not l % m, tile_err
return (d // (l // m), l // m, 1, m)
# e.g. [(x_data_hosts, x_data_devs, x_model_hosts, x_model_devs), ...]
dh_dd_mh_md_tups = map(
dh_dd_mh_md,
global_hardware_mesh,
model_parallel_submesh,
local_hardware_mesh,
)
# reshape to e.g. (x_dh, x_dd, x_mh, x_md, y_dh, ...)
devices = devices.reshape(*(s for t in dh_dd_mh_md_tups for s in t)) # pylint: disable=g-complex-comprehension
# TODO(jekbradbury): reorder local subgroups for ring locality
# Transpose to [data_host], [data_device], [model_host], [model_device]
# block ordering e.g. (x_dh, y_dh, ..., x_dd, y_dd, ...)
devices = devices.transpose(
*(4 * i for i in range(mesh_ndim)),
*(4 * i + 1 for i in range(mesh_ndim)),
*(4 * i + 2 for i in range(mesh_ndim)),
*(4 * i + 3 for i in range(mesh_ndim)),
)
else:
# e.g. [(x_data, x_model), (y_data, y_model), ...]
model_data_tups = [(g // m, m) for g, m in zip(global_hardware_mesh, model_parallel_submesh)]
# reshape to e.g. (x_data, x_model, y_data, y_model...)
devices = devices.reshape(*(s for t in model_data_tups for s in t)) # pylint: disable=g-complex-comprehension
# TODO(jekbradbury): reorder small subgroups for ring locality
# transpose to e.g. (x_data, y_data, ..., x_model, ...)
devices = devices.transpose(*(2 * i for i in range(mesh_ndim)), *(2 * i + 1 for i in range(mesh_ndim)))
# reshape to (data, model)
devices = devices.reshape(-1, np.prod(model_parallel_submesh))
global_mesh = Mesh(devices, ["data", "model"])
logging.info("global_mesh axis_names: %s", global_mesh.axis_names)
logging.info("global_mesh devices: %s", global_mesh.devices)
logging.info("global_mesh devices shape: %s", global_mesh.devices.shape)
return global_mesh
|
Construct an xmap/pjit Mesh for the given model-parallel submesh.
The resulting mesh has two resource axes: 'model', with the provided submesh
shape, and 'data', which covers the rest of the mesh.
Args:
model_parallel_submesh: a HardwareMesh spec, namely (x,y,z,core) on TPU for
a single model-parallel replica's "tile" in the physical device mesh. The
first three elements (`x`, `y`, and `z`) should be factors of the pod
slice; e.g., if you are using df_4x8, then `x` should be a factor of 4
(one of 1, 2, 4), `y` should be a factor of 8 (one of 1, 2, 4, 8), and `z`
must be 1, because TPU v3 slices are only 2D. `z` can be >1 for TPU v4
(and maybe later TPUs) that allow 3D slices. `core` is the number of cores
to use from each TPU node. As communication is usually fastest inside the
same node, if you need a tile of more than 1 core, then
you should first increase `core`: e.g., for TPU v3, (1,1,1,2) is better
than (2,1,1,1). To pick a good spec, try a few possible values until you
get high TPU utilization.
input_devices: the devices to use, will use jax.devices() if this is not
set.
input_local_devices: the local devices to use, will use jax.local_devices()
if this is not set.
tile_by_host_if_needed: JAX currently requires that the parts of any sharded
array that are located on one host's local devices form a single
contiguous slice. A best effort will be made to achieve this without
"tiling" the device assignment over hosts (which can reduce XLA collective
performance). If this flag is True, then the device assignment will be
tiled over hosts if necessary to satisfy this constraint and create a
buildable mesh; if false, mesh construction will fail instead.
backend: get devices from the pinned backend, if specified. This is
useful for explicitly specifying the devices other than relying on
jax_platform_name.
Returns:
A xmap / pjit Mesh containing the virtual device mesh with data, model axes.
|
get_mesh
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:
"""Split a global mesh dimension into four tiling components.
Args:
g: global mesh bounds dimension size
m: model-parallel submesh bounds dimension size
l: local submesh bounds dimension size
Returns:
The resulting tuple divides the dimension into the hosts component of
the data-parallel submesh, the devices component of the data-parallel
submesh, the hosts component of the model-parallel submesh, and the
devices component of the model-parallel submesh.
"""
d = g // m
if m >= l:
assert not m % l, tile_err
return (d, 1, m // l, l)
else:
assert not l % m, tile_err
return (d // (l // m), l // m, 1, m)
|
Split a global mesh dimension into four tiling components.
Args:
g: global mesh bounds dimension size
m: model-parallel submesh bounds dimension size
l: local submesh bounds dimension size
Returns:
The resulting tuple divides the dimension into the hosts component of
the data-parallel submesh, the devices component of the data-parallel
submesh, the hosts component of the model-parallel submesh, and the
devices component of the model-parallel submesh.
|
dh_dd_mh_md
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_gpu_mesh(num_partitions: int) -> Mesh:
"""Mesh for GPUs that preferentially places 'model' on NVLink."""
nvlink_size = jax.local_device_count()
dcn_size = jax.process_count()
nvlink_mp = min(num_partitions, nvlink_size)
nvlink_dp, extra1 = divmod(nvlink_size, nvlink_mp)
dcn_mp, extra2 = divmod(num_partitions, nvlink_mp)
assert not (
extra1 or extra2
), "number of partitions on GPU must be a factor or multiple of the number of local devices"
dcn_dp = dcn_size // dcn_mp
devices = create_hybrid_device_mesh(
mesh_shape=[nvlink_dp, nvlink_mp],
dcn_mesh_shape=[dcn_dp, dcn_mp],
process_is_granule=True,
)
global_mesh = Mesh(devices, ["data", "model"])
logging.info("global_mesh axis_names: %s", global_mesh.axis_names)
logging.info("global_mesh devices: %s", global_mesh.devices)
return global_mesh
|
Mesh for GPUs that preferentially places 'model' on NVLink.
|
get_gpu_mesh
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def default_mesh(
num_partitions: int,
model_parallel_submesh: Optional[HardwareMesh] = None,
backend: Optional[str] = None,
) -> Mesh:
"""Attempt to return a default mesh for simple cases.
Args:
num_partitions: number of partitions to use, will be ignored if
model_parallel_submesh is provided.
model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use as
the model-parallel device tile.
backend: get devices from the pinned backend, if specified. This is useful
for explicitly specifying the devices other than relying on
jax_platform_name.
Returns:
xmap/pjit 2D Mesh with 'data', 'model' mesh axes.
"""
last_device = jax.devices(backend)[-1]
platform = last_device.platform
device_kind = last_device.device_kind
bounds = bounds_from_last_device(last_device)
if model_parallel_submesh:
return get_mesh(model_parallel_submesh, backend=backend)
if platform == "cpu":
return get_cpu_mesh()
elif platform == "gpu":
return get_gpu_mesh(num_partitions)
mps = None
if device_kind in ("TPU v2", "TPU v3"):
if num_partitions == 1:
mps = (1, 1, 1, 1)
elif num_partitions == 2:
mps = (1, 1, 1, 2)
elif num_partitions == 4:
mps = (2, 1, 1, 2)
elif num_partitions == 8:
mps = (2, 2, 1, 2)
elif num_partitions == 16:
mps = (4, 2, 1, 2)
# assume the use of megacore on TPU v4
elif (device_kind == "TPU v4" or device_kind == "TPU v4 lite") and bounds[3] == 1:
if num_partitions == 1:
mps = (1, 1, 1, 1)
elif num_partitions == 2:
mps = (1, 2, 1, 1)
elif num_partitions == 4:
if bounds[0] >= 4:
mps = (4, 1, 1, 1)
else:
mps = (2, 2, 1, 1)
elif num_partitions == 8:
if bounds[2] >= 8:
mps = (1, 1, 8, 1)
else:
mps = (4, 2, 1, 1)
elif num_partitions == 16:
if bounds[2] >= 16:
mps = (1, 1, 16, 1)
elif bounds[0] >= 8:
mps = (8, 2, 1, 1)
elif bounds[0] >= 4:
mps = (4, 4, 1, 1)
else:
mps = (2, 2, 4, 1)
if mps is None:
raise ValueError(
"No default mesh for this configuration: specify " "config.model_parallel_submesh explicitly."
)
return get_mesh(mps, backend=backend)
|
Attempt to return a default mesh for simple cases.
Args:
num_partitions: number of partitions to use, will be ignored if
model_parallel_submesh is provided.
model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use as
the model-parallel device tile.
backend: get devices from the pinned backend, if specified. This is useful
for explicitly specifying the devices other than relying on
jax_platform_name.
Returns:
xmap/pjit 2D Mesh with 'data', 'model' mesh axes.
|
default_mesh
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_local_chunk_info(
self, global_shape: Tuple[int, ...], mesh_axes: Sequence[Optional[str]]
) -> LocalChunkInfo:
"""Get the local chunk info for a given array shape and sharded axes.
Args:
global_shape: the global, unsharded shape of the array to chunk.
mesh_axes: a sequence of names (or None) of equal rank to `global_shape`
that specifies which mesh dimensions the array is sharded along.
Returns:
LocalChunkInfo containing the logical slices of the array found on this
host's local devices, as well as the replica index for this chunk among
chunks with the same slice. The latter is used to determine which
host should write this chunk during checkpointing.
"""
local_slice = [slice(None) for dim in global_shape]
sharded_mesh_axes = set()
for i, (mesh_axis, size) in enumerate(zip(mesh_axes, global_shape)):
if not mesh_axis:
continue
sharded_mesh_axes.add(mesh_axis)
if not isinstance(mesh_axis, str):
raise NotImplementedError("TODO(jekbradbury)")
chunk_id = self.chunk_ids[mesh_axis]
chunk_size = size // self.num_chunks[mesh_axis]
local_slice[i] = slice(chunk_id * chunk_size, (chunk_id + 1) * chunk_size)
replicated_mesh_axes = [mesh_axis for mesh_axis in self.mesh_axes if mesh_axis not in sharded_mesh_axes]
replica_id = 0
for mesh_axis in replicated_mesh_axes:
chunk_id = self.chunk_ids[mesh_axis]
replica_id = replica_id * self.num_chunks[mesh_axis] + chunk_id
return LocalChunkInfo(tuple(local_slice), replica_id)
|
Get the local chunk info for a given array shape and sharded axes.
Args:
global_shape: the global, unsharded shape of the array to chunk.
mesh_axes: a sequence of names (or None) of equal rank to `global_shape`
that specifies which mesh dimensions the array is sharded along.
Returns:
LocalChunkInfo containing the logical slices of the array found on this
host's local devices, as well as the replica index for this chunk among
chunks with the same slice. The latter is used to determine which
host should write this chunk during checkpointing.
|
get_local_chunk_info
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def standard_logical_axis_rules(
activation_partitioning_dims: int = 1,
parameter_partitioning_dims: int = 1,
additional_rules: Optional[LogicalAxisRules] = None,
) -> LogicalAxisRules:
"""Default sharding rules for T5X model in terms of logical axis names.
Args:
activation_partitioning_dims: enables 2-D activation sharding when set to 2.
parameter_partitioning_dims: enables 2-D parameter sharding when set to 2.
additional_rules: additional rules (a sequence of tuples) that will be
appended to the standard rules.
Returns:
Sequence of logical axis rules
"""
logging.info(
"`activation_partitioning_dims` = %d, `parameter_partitioning_dims` = %d",
activation_partitioning_dims,
parameter_partitioning_dims,
)
if activation_partitioning_dims == 1 and parameter_partitioning_dims == 1:
rules = [
("batch", "data"),
("vocab", "model"),
("embed", None),
("mlp", "model"),
("heads", "model"),
("kv", None),
("joined_kv", "model"), # joined heads+kv dim in 2D attn param layouts
]
elif activation_partitioning_dims == 2 and parameter_partitioning_dims == 1:
rules = [
("batch", "data"),
("vocab", "model"),
("mlp", "model"),
("heads", "model"),
("kv", None),
("joined_kv", "model"),
("embed", "model"),
]
elif activation_partitioning_dims == 1 and parameter_partitioning_dims == 2:
rules = [
("batch", "data"),
("vocab", "model"),
("mlp", "model"),
("heads", "model"),
("kv", None),
("joined_kv", "model"),
("embed", "data"),
]
elif activation_partitioning_dims == 2 and parameter_partitioning_dims == 2:
rules = [
("batch", "data"),
("vocab", "model"),
("mlp", "model"),
("heads", "model"),
("kv", None),
("joined_kv", "model"),
("embed", "model"),
("embed", "data"),
]
else:
raise ValueError(
f"`activation_partitioning_dims` = {activation_partitioning_dims} "
f"`parameter_partitioning_dims` = {parameter_partitioning_dims} "
"is not supported."
)
# Add the common rules for the replicated logical axes names.
replicated_rules = [
("relpos_buckets", None),
("abspos_buckets", None),
("length", None),
("layers", None),
("stack", None),
("mlp_activations", None),
]
rules.extend(replicated_rules)
if additional_rules:
rules.extend(additional_rules)
return rules
|
Default sharding rules for T5X model in terms of logical axis names.
Args:
activation_partitioning_dims: enables 2-D activation sharding when set to 2.
parameter_partitioning_dims: enables 2-D parameter sharding when set to 2.
additional_rules: additional rules (a sequence of tuples) that will be
appended to the standard rules.
Returns:
Sequence of logical axis rules
|
standard_logical_axis_rules
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def _id_fn(x, ix):
"""Identity function for copying parameters to the devices, sharded."""
# A pure identity such as `lambda x, *: x` can get optimized away, so we
# include a random.split as a cheap function that cannot be optimized away.
y = random.split(random.PRNGKey(jnp.array(ix, dtype=jnp.uint32)))
return x, y
|
Identity function for copying parameters to the devices, sharded.
|
_id_fn
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def __init__(
self,
num_partitions: Optional[int] = None,
model_parallel_submesh: Optional[HardwareMesh] = None,
params_on_devices: bool = True,
backend: Optional[str] = None,
):
"""Configures the partitioner.
Args:
num_partitions: the number of partitions to use. Ignored if
`model_parallel_submesh` is provided.
model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use
as the model-parallel device tile. This submesh is used for the larger
of the two parameter dimensions, and, if 2-D activation sharding is
enabled, for the model dimension of activations. The rest of the mesh is
used for data parallelism and, if 2-D parameter sharding is enabled, the
other parameter dimension.
params_on_devices: whether to keep the params on devices, if False -
params stay in the host memory. Note that some partitioners might ignore
this setting, for example if they don't support storing all params on
device memory.
backend: get devices from the pinned backend, if specified. This is useful
for explicitly specifying the devices other than relying on
jax_platform_name.
"""
if not num_partitions and not model_parallel_submesh:
raise ValueError("At least one of `num_partitions` or " "`model_parallel_submesh` must be set.")
if model_parallel_submesh is not None and len(model_parallel_submesh) != 4:
logging.error(
(
"`model_parallel_submesh` must be either None or a 4-tuple. Got"
" `model_parallel_submesh`=%s. A ValueError will be raised"
" beginning March 1, 2022."
),
model_parallel_submesh,
)
if bool(num_partitions) and bool(model_parallel_submesh):
logging.error(
(
"At most one of `num_partitions` or `model_parallel_submesh` can be"
" set. Got `num_partitions=%s` and `model_parallel_submesh`=%s. A"
" ValueError will be raised beginning March 21, 2022."
),
num_partitions,
model_parallel_submesh,
)
self._num_partitions = num_partitions
self._model_parallel_submesh = model_parallel_submesh
self._params_on_devices = params_on_devices
self._data_axis = "data"
self._backend = backend
|
Configures the partitioner.
Args:
num_partitions: the number of partitions to use. Ignored if
`model_parallel_submesh` is provided.
model_parallel_submesh: 4-tuple that specifies the x,y,z,c submesh to use
as the model-parallel device tile. This submesh is used for the larger
of the two parameter dimensions, and, if 2-D activation sharding is
enabled, for the model dimension of activations. The rest of the mesh is
used for data parallelism and, if 2-D parameter sharding is enabled, the
other parameter dimension.
params_on_devices: whether to keep the params on devices, if False -
params stay in the host memory. Note that some partitioners might ignore
this setting, for example if they don't support storing all params on
device memory.
backend: get devices from the pinned backend, if specified. This is useful
for explicitly specifying the devices other than relying on
jax_platform_name.
|
__init__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_data_layout(self, batch_size: Optional[int] = None, host_index: Optional[int] = None) -> DataLayout:
"""Returns filled `DataLayout` based on the partitioned model layout.
Args:
batch_size: if set, indicates the requested batch size. The exception will
be raised if this batch size is not compatible with the layout. If not
set, the batch size is inferred from the layout.
host_index: indicates the host index to use for the calculations, if not
set - use JAX-provided one. Should be in [0, num_hosts) interval and the
order should match the order of corresponding CPU devices in
`jax.devices()`.
Returns:
Filled `DataLayout` structure.
"""
if host_index is not None:
raise NotImplementedError("Explicit host_index is not yet implemented.")
if self._data_axis is None:
return DataLayout(
batch_size=batch_size,
shard_id=0,
num_shards=1,
is_first_host_in_replica_set=(jax.process_index() == 0),
)
mesh_size = self._local_chunker.global_mesh.shape[self._data_axis]
batch_size = batch_size or mesh_size
if batch_size % mesh_size:
raise ValueError(
f"Batch size ({batch_size}) must be divisible by corresponding " f"mesh size ({mesh_size})."
)
num_shards = self._local_chunker.num_chunks[self._data_axis]
if batch_size % num_shards:
raise ValueError(f"Batch size ({batch_size}) must be divisible by number of " f"replicas ({num_shards}).")
replica_id = self._local_chunker.get_local_chunk_info((batch_size,), [self._data_axis]).replica_id
return DataLayout(
batch_size=int(batch_size),
shard_id=int(self._local_chunker.chunk_ids[self._data_axis]),
num_shards=int(num_shards),
is_first_host_in_replica_set=(replica_id == 0),
)
|
Returns filled `DataLayout` based on the partitioned model layout.
Args:
batch_size: if set, indicates the requested batch size. The exception will
be raised if this batch size is not compatible with the layout. If not
set, the batch size is inferred from the layout.
host_index: indicates the host index to use for the calculations, if not
set - use JAX-provided one. Should be in [0, num_hosts) interval and the
order should match the order of corresponding CPU devices in
`jax.devices()`.
Returns:
Filled `DataLayout` structure.
|
get_data_layout
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_local_chunk_info(
self, global_shape: Tuple[int, ...], mesh_axes: Sequence[Optional[str]]
) -> LocalChunkInfo:
"""Returns the local chunk info for a given array shape and sharded axes."""
return self._local_chunker.get_local_chunk_info(global_shape, mesh_axes)
|
Returns the local chunk info for a given array shape and sharded axes.
|
get_local_chunk_info
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def move_params_to_devices(self, train_state: TrainState, train_state_axes: TrainState) -> TrainState:
"""Moves the optimizer parameters to devices."""
p_id_fn = self.partition(
_id_fn,
in_axis_resources=(train_state_axes, None),
out_axis_resources=(train_state_axes, None),
donate_argnums=(0,),
)
if jax.config.jax_array and jax.process_count() > 1:
train_state = multihost_utils.host_local_array_to_global_array(train_state, self.mesh, train_state_axes)
train_state, _ = p_id_fn(train_state, jnp.ones((), dtype=jnp.uint32))
return train_state
|
Moves the optimizer parameters to devices.
|
move_params_to_devices
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_logical_axes(self, train_state: TrainState) -> TrainState:
"""Returns a copy of TrainState with Optional[AxisNames] as leaves."""
# By default, return None for the logical axes.
return train_state.restore_state(jax.tree_map(lambda x: None, train_state.state_dict()))
|
Returns a copy of TrainState with Optional[AxisNames] as leaves.
|
get_logical_axes
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def partition(
self,
fn: Callable, # pylint: disable=g-bare-generic
in_axis_resources,
out_axis_resources,
static_argnums: Union[int, Sequence[int]] = (),
donate_argnums: Union[int, Sequence[int]] = (),
) -> PartitionedCallable:
"""Partitions the computation using partitioner-specific implementation.
Args:
fn: the function to partition.
in_axis_resources: Pytree of structure matching that of arguments to `fn`,
with all actual arguments replaced by resource assignment
specifications. It is also valid to specify a pytree prefix (e.g. one
value in place of a whole subtree), in which case the leaves get
broadcast to all values in that subtree.
The valid resource assignment specifications are:
`None`: in which case the value will be replicated on all devices
`PartitionSpec`: a tuple of length at most equal to the rank of the
partitioned value. Each element can be a `None`, a mesh axis or a
tuple of mesh axes, and specifies the set of resources assigned to
partition the value's dimension matching its position in the spec.
out_axis_resources: Like `in_axis_resources`, but specifies resource
assignment for function outputs.
static_argnums: an optional int or collection of ints that specify which
positional arguments to treat as static (compile-time constant) in the
partitioned function.
donate_argnums: an optional int or collection of ints that specify which
argument buffers are "donated" to the computation. It is safe to donate
argument buffers if you no longer need them once the computation has
finished.
Returns:
A partitioned version of the input function.
"""
raise NotImplementedError
|
Partitions the computation using partitioner-specific implementation.
Args:
fn: the function to partition.
in_axis_resources: Pytree of structure matching that of arguments to `fn`,
with all actual arguments replaced by resource assignment
specifications. It is also valid to specify a pytree prefix (e.g. one
value in place of a whole subtree), in which case the leaves get
broadcast to all values in that subtree.
The valid resource assignment specifications are:
`None`: in which case the value will be replicated on all devices
`PartitionSpec`: a tuple of length at most equal to the rank of the
partitioned value. Each element can be a `None`, a mesh axis or a
tuple of mesh axes, and specifies the set of resources assigned to
partition the value's dimension matching its position in the spec.
out_axis_resources: Like `in_axis_resources`, but specifies resource
assignment for function outputs.
static_argnums: an optional int or collection of ints that specify which
positional arguments to treat as static (compile-time constant) in the
partitioned function.
donate_argnums: an optional int or collection of ints that specify which
argument buffers are "donated" to the computation. It is safe to donate
argument buffers if you no longer need them once the computation has
finished.
Returns:
A partitioned version of the input function.
|
partition
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def __init__(
self,
num_partitions: Optional[int] = None,
model_parallel_submesh: Optional[HardwareMesh] = None,
params_on_devices: bool = True,
backend: Optional[str] = None,
logical_axis_rules: Optional[LogicalAxisRules] = None,
use_cpu_pjit: Optional[bool] = False,
):
"""PjitPartitioner constructor.
See https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.mdx/usage/partitioning for details.
Args:
num_partitions: an integer that specifies the size of the model parallel
submesh to be automatically selected for the current topology. See
`model_parallel_submesh` for details on how this submesh is used.
Mutually exlusive with `model_parallel_submesh`.
model_parallel_submesh: is a 4-tuple that specifies the `(x, y, z, c)`
submesh model-parallel device tile, an axis of accelerator parallelism
orthogonal to data parallelism. Array axes in a model's parameters or
activations can be sharded over this submesh using axis rules (see
`logical_axis_rules`) that map them to 'model'. The effective number of
model sub-partitions is equal to `np.prod(model_parallel_submesh)` and
must evenly divide the total number of devices (i.e.,
`jax.device_count() % np.prod(model_parallel_submesh) == 0`). The rest
of the TPU mesh is the data parallel submesh, providing
`jax.device_count() // np.prod(model_parallel_submesh)` partitions. It
is used for data (batch) parallelism and to shard other array axes that
are mapped to 'data'. This argument is mutually exclusive with
`num_partitions`.
params_on_devices: whether to keep the params on devices, if False -
params stay in the host memory. Note that some partitioners might ignore
this setting, for example if they don't support storing all params on
device memory.
backend: get devices from the pinned backend, if specified. This is
useful for explicitly specifying the devices other than relying on
jax_platform_name.
logical_axis_rules: a priority-ordered sequence of KV tuples that maps
logical axis names to either `None` (not sharded), 'model' (to shard
across the model-parallel submesh), or 'data' (to shard across the
data-parallel submesh).
use_cpu_pjit: enables wrapper function for pjit which just jits the
function if using CPU backend.
"""
super().__init__(
num_partitions=num_partitions,
model_parallel_submesh=model_parallel_submesh,
params_on_devices=params_on_devices,
backend=backend,
)
if logical_axis_rules is None:
logical_axis_rules = standard_logical_axis_rules()
self._logical_axis_rules = tuple(logical_axis_rules)
(self._data_axis,) = flax_partitioning.logical_to_mesh_axes(["batch"], logical_axis_rules)
self._use_cpu_pjit = use_cpu_pjit
|
PjitPartitioner constructor.
See https://github.com/google-research/text-to-text-transfer-transformer/blob/main/README.mdx/usage/partitioning for details.
Args:
num_partitions: an integer that specifies the size of the model parallel
submesh to be automatically selected for the current topology. See
`model_parallel_submesh` for details on how this submesh is used.
Mutually exlusive with `model_parallel_submesh`.
model_parallel_submesh: is a 4-tuple that specifies the `(x, y, z, c)`
submesh model-parallel device tile, an axis of accelerator parallelism
orthogonal to data parallelism. Array axes in a model's parameters or
activations can be sharded over this submesh using axis rules (see
`logical_axis_rules`) that map them to 'model'. The effective number of
model sub-partitions is equal to `np.prod(model_parallel_submesh)` and
must evenly divide the total number of devices (i.e.,
`jax.device_count() % np.prod(model_parallel_submesh) == 0`). The rest
of the TPU mesh is the data parallel submesh, providing
`jax.device_count() // np.prod(model_parallel_submesh)` partitions. It
is used for data (batch) parallelism and to shard other array axes that
are mapped to 'data'. This argument is mutually exclusive with
`num_partitions`.
params_on_devices: whether to keep the params on devices, if False -
params stay in the host memory. Note that some partitioners might ignore
this setting, for example if they don't support storing all params on
device memory.
backend: get devices from the pinned backend, if specified. This is
useful for explicitly specifying the devices other than relying on
jax_platform_name.
logical_axis_rules: a priority-ordered sequence of KV tuples that maps
logical axis names to either `None` (not sharded), 'model' (to shard
across the model-parallel submesh), or 'data' (to shard across the
data-parallel submesh).
use_cpu_pjit: enables wrapper function for pjit which just jits the
function if using CPU backend.
|
__init__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def partition(
self,
fn: Callable, # pylint: disable=g-bare-generic
in_axis_resources,
out_axis_resources,
static_argnums: Union[int, Sequence[int]] = (),
donate_argnums: Union[int, Sequence[int]] = (),
) -> PjittedFnWithContext:
"""Partitions the function using jax.pjit."""
if self._use_cpu_pjit:
pjit_fn = pjit_with_cpu_fallback
else:
pjit_fn = pjit
pjitted = pjit_fn(
fn,
in_axis_resources=in_axis_resources,
out_axis_resources=out_axis_resources,
static_argnums=static_argnums,
donate_argnums=donate_argnums,
backend=self._backend,
)
return PjittedFnWithContext(pjitted, self.mesh, self._logical_axis_rules)
|
Partitions the function using jax.pjit.
|
partition
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def get_mesh_axes(self, train_state: TrainState) -> TrainState:
"""Returns a copy of TrainState with Optional[PartitionSpecs] as leaves."""
logical_axes = self.get_logical_axes(train_state)
def _logical_to_mesh_axes(param_name, logical_axes):
if logical_axes is None:
return None
elif logical_axes is traverse_util.empty_node:
return traverse_util.empty_node
try:
return flax_partitioning.logical_to_mesh_axes(logical_axes, self._logical_axis_rules)
except ValueError as e:
raise ValueError(f"Failed to map logical axes for {param_name}") from e
flat_logical_axes = traverse_util.flatten_dict(logical_axes.state_dict(), keep_empty_nodes=True, sep="/")
flat_mesh_axes = {k: _logical_to_mesh_axes(k, v) for k, v in flat_logical_axes.items()}
return logical_axes.restore_state(traverse_util.unflatten_dict(flat_mesh_axes, sep="/"))
|
Returns a copy of TrainState with Optional[PartitionSpecs] as leaves.
|
get_mesh_axes
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/partitioner.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/partitioner.py
|
MIT
|
def __init__(
self,
checkpoint="openai/whisper-large-v2",
dtype=jnp.float32,
batch_size=None,
max_length=None,
**kwargs,
):
"""
Args
checkpoint (`str`, *optional*, defaults to `"openai/whisper-large-v2"):
The Whisper checkpoint to use with the pipeline. Must be an available checkpoint on the Hugging Face Hub
with Flax weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs). This can be used to enable half-precision inference on GPUs or TPUs.
If specified all the computation will be performed with the given `dtype`. **Note that this only
specifies the dtype of the computation and does not influence the dtype of model parameters.**
batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`):
The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing
a batch size in the `__init__` method will be superseded by any batch size passed to the `__call__` method.
max_length (`int`, *optional*):
The maximum numbers of tokens to generate. Defaults to `model.config.max_length`.
"""
self.checkpoint = checkpoint
self.dtype = dtype
self.feature_extractor = FlaxWhisperFeatureExtractor.from_pretrained(self.checkpoint)
self.tokenizer = WhisperTokenizerFast.from_pretrained(self.checkpoint)
self.model, self.params = FlaxWhisperForConditionalGeneration.from_pretrained(
self.checkpoint,
_do_init=False,
dtype=self.dtype,
**kwargs,
)
self.max_length = max_length if max_length is not None else self.model.generation_config.max_length
self.min_batch_size = jax.local_device_count()
self.batch_size = (
batch_size if batch_size is not None else self.min_batch_size
) # we need a minimum of 1 batch per-device
def generate(
params,
input_features,
forced_decoder_ids,
return_timestamps,
num_beams,
length_penalty,
do_sample,
top_k,
temperature,
):
output_ids = self.model.pipeline_generate(
input_features,
params=params,
forced_decoder_ids=forced_decoder_ids,
return_timestamps=return_timestamps,
max_length=self.max_length,
num_beams=num_beams,
length_penalty=length_penalty,
do_sample=do_sample,
top_k=top_k,
temperature=temperature,
)
return output_ids
self.params = jax_utils.replicate(self.params)
self.p_generate = jax.pmap(
generate,
"input_features",
in_axes=(0, 0, None, None, None, None, None, None, None),
static_broadcasted_argnums=(
3,
4,
5,
6,
7,
8,
),
)
|
Args
checkpoint (`str`, *optional*, defaults to `"openai/whisper-large-v2"):
The Whisper checkpoint to use with the pipeline. Must be an available checkpoint on the Hugging Face Hub
with Flax weights.
dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
`jax.numpy.bfloat16` (on TPUs). This can be used to enable half-precision inference on GPUs or TPUs.
If specified all the computation will be performed with the given `dtype`. **Note that this only
specifies the dtype of the computation and does not influence the dtype of model parameters.**
batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`):
The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing
a batch size in the `__init__` method will be superseded by any batch size passed to the `__call__` method.
max_length (`int`, *optional*):
The maximum numbers of tokens to generate. Defaults to `model.config.max_length`.
|
__init__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/pipeline.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/pipeline.py
|
MIT
|
def __call__(
self,
inputs,
chunk_length_s=30.0,
stride_length_s=None,
batch_size=None,
language=None,
task=None,
return_timestamps=None,
num_beams=1,
length_penalty=1.0,
do_sample=False,
top_k=50,
temperature=1.0,
):
"""
Transcribe an audio input sequence to a text transcription, optionally with timestamps.
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either:
- `str` that is the filename of the audio file, the file will be read at the correct sampling rate
to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` is the byte content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio assumed to be at the correct sampling rate (16kHz). Note that no further sampling
rate check will be done.
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "array":
np.array}`. Optionally an additional argument `"stride": (left: int, right: int)` can be used to
ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in
decoding (but used at inference to provide more context to the model). In general, this additional
stride argument is not required.
chunk_length_s (`float`, *optional*, defaults to 30.0):
The input length for each chunk. If `chunk_length_s = 0` then chunking is disabled. By default, the chunk
length is set 30.0s, equal to Whisper's context window.
stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):
The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables
the model to *see* more context and infer letters better than without this context but the pipeline
discards the stride bits at the end to make the final reconstitution as perfect as possible.
<Tip>
For more information on how to effectively use `stride_length_s`, refer to the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`):
The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing
a batch size in the `__call__` method will supersede any batch size passed to the `__init__`.
task (`str`, *optional*):
Task to use for generation, either `"transcribe"` or `"translate"`. Defaults to `"transcribe"`.
language (`str`, *optional*):
Language token to use for generation, can be either in the form of `"<|en|>"`, `"en"` or `"english"`.
Defaults to `None`, meaning the language is automatically inferred from the audio input.
return_timestamps (*optional*, `bool`):
Whether to return timestamps in the prediction. Defaults to False. If set to true, the pipeline
will return two keys in the output dictionary: `"text"` containing the text transcription, and `"chunks"`
containing the transcription segments chunked by their utterance-level timestamps.
length_penalty (*optional*, `float`):
Exponential penalty to the length that is used with beam-based generation. It is applied as an
exponent to the sequence length, which in turn is used to divide the score of the sequence. Since
the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes
longer sequences, while length_penalty < 1.0 encourages shorter sequences.
do_sample (*optional*, `bool`):
Whether or not to use sampling ; use greedy decoding otherwise.
top_k (*optional*, `int`):
The number of the highest probability vocabulary tokens to keep for top-k-filtering.
temperature (*optional*, `float`):
The value used to modulate the next token probabilities if sampling.
Return:
`Dict`: A dictionary with the following keys:
- **text** (`str` ) -- The recognised text.
- **chunks** (*optional(, `List[Dict]`)
When using `return_timestamps`, the `chunks` will become a list containing all the various text
chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamps": (0.5,0.9), {"text":
"there", "timestamps": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing
`"".join(chunk["text"] for chunk in output["chunks"])`.
"""
batch_size = batch_size if batch_size is not None else self.batch_size
if batch_size % self.min_batch_size != 0:
raise ValueError(
f"Batch size must be a multiple of the number of JAX devices, but got batch size {batch_size} and num devices {self.min_batch_size}."
)
dataloader = self.preprocess_batch(
inputs, chunk_length_s=chunk_length_s, stride_length_s=stride_length_s, batch_size=batch_size
)
model_outputs = []
# iterate over our chunked audio samples
for batch in dataloader:
model_outputs.append(
self.forward(
batch,
batch_size=batch_size,
language=language,
task=task,
return_timestamps=return_timestamps,
num_beams=num_beams,
length_penalty=length_penalty,
do_sample=do_sample,
top_k=top_k,
temperature=temperature,
)
)
post_processed = self.postprocess(model_outputs, return_timestamps=return_timestamps)
return post_processed
|
Transcribe an audio input sequence to a text transcription, optionally with timestamps.
Args:
inputs (`np.ndarray` or `bytes` or `str` or `dict`):
The inputs is either:
- `str` that is the filename of the audio file, the file will be read at the correct sampling rate
to get the waveform using *ffmpeg*. This requires *ffmpeg* to be installed on the system.
- `bytes` is the byte content of an audio file and is interpreted by *ffmpeg* in the
same way.
- (`np.ndarray` of shape (n, ) of type `np.float32` or `np.float64`)
Raw audio assumed to be at the correct sampling rate (16kHz). Note that no further sampling
rate check will be done.
- `dict` form can be used to pass raw audio sampled at arbitrary `sampling_rate` and let this
pipeline do the resampling. The dict must be in the format `{"sampling_rate": int, "array":
np.array}`. Optionally an additional argument `"stride": (left: int, right: int)` can be used to
ask the pipeline to treat the first `left` samples and last `right` samples to be ignored in
decoding (but used at inference to provide more context to the model). In general, this additional
stride argument is not required.
chunk_length_s (`float`, *optional*, defaults to 30.0):
The input length for each chunk. If `chunk_length_s = 0` then chunking is disabled. By default, the chunk
length is set 30.0s, equal to Whisper's context window.
stride_length_s (`float`, *optional*, defaults to `chunk_length_s / 6`):
The length of stride on the left and right of each chunk. Used only with `chunk_length_s > 0`. This enables
the model to *see* more context and infer letters better than without this context but the pipeline
discards the stride bits at the end to make the final reconstitution as perfect as possible.
<Tip>
For more information on how to effectively use `stride_length_s`, refer to the [ASR chunking
blog post](https://huggingface.co/blog/asr-chunking).
</Tip>
batch_size (`int`, *optional*, defaults to the minimum per-device batch size, i.e. `jax.local_device_count()`):
The batch size to be used in chunking transcription. Beneficial for transcribing long audio files. Passing
a batch size in the `__call__` method will supersede any batch size passed to the `__init__`.
task (`str`, *optional*):
Task to use for generation, either `"transcribe"` or `"translate"`. Defaults to `"transcribe"`.
language (`str`, *optional*):
Language token to use for generation, can be either in the form of `"<|en|>"`, `"en"` or `"english"`.
Defaults to `None`, meaning the language is automatically inferred from the audio input.
return_timestamps (*optional*, `bool`):
Whether to return timestamps in the prediction. Defaults to False. If set to true, the pipeline
will return two keys in the output dictionary: `"text"` containing the text transcription, and `"chunks"`
containing the transcription segments chunked by their utterance-level timestamps.
length_penalty (*optional*, `float`):
Exponential penalty to the length that is used with beam-based generation. It is applied as an
exponent to the sequence length, which in turn is used to divide the score of the sequence. Since
the score is the log likelihood of the sequence (i.e. negative), length_penalty > 1.0 promotes
longer sequences, while length_penalty < 1.0 encourages shorter sequences.
do_sample (*optional*, `bool`):
Whether or not to use sampling ; use greedy decoding otherwise.
top_k (*optional*, `int`):
The number of the highest probability vocabulary tokens to keep for top-k-filtering.
temperature (*optional*, `float`):
The value used to modulate the next token probabilities if sampling.
Return:
`Dict`: A dictionary with the following keys:
- **text** (`str` ) -- The recognised text.
- **chunks** (*optional(, `List[Dict]`)
When using `return_timestamps`, the `chunks` will become a list containing all the various text
chunks identified by the model, *e.g.* `[{"text": "hi ", "timestamps": (0.5,0.9), {"text":
"there", "timestamps": (1.0, 1.5)}]`. The original full text can roughly be recovered by doing
`"".join(chunk["text"] for chunk in output["chunks"])`.
|
__call__
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/pipeline.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/pipeline.py
|
MIT
|
def _split_variables_and_axes(
variables_and_axes: FrozenVariableDict,
) -> Tuple[FrozenVariableDict, FrozenVariableDict]:
"""Splits `variables_and_axes` into two separate dicts with the same keys."""
# For each `key`, `key_axes` (if any) are its axes in `variables_and_axes`.
variables = {}
axes = {}
for k, v in variables_and_axes.items():
if k.endswith("_axes"):
axes[k[:-5]] = v # k without "_axes".
_validate_params_axes(v, variables_and_axes[k[:-5]]) # k without "_axes".
else:
variables[k] = v
return flax.core.freeze(variables), flax.core.freeze(axes)
|
Splits `variables_and_axes` into two separate dicts with the same keys.
|
_split_variables_and_axes
|
python
|
huggingface/distil-whisper
|
training/flax/distil_whisper/train_state.py
|
https://github.com/huggingface/distil-whisper/blob/master/training/flax/distil_whisper/train_state.py
|
MIT
|
def emailUser(profile, SUBJECT="", BODY=""):
"""
sends an email.
Arguments:
profile -- contains information related to the user (e.g., email
address)
SUBJECT -- subject line of the email
BODY -- body text of the email
"""
def generateSMSEmail(profile):
"""
Generates an email from a user's phone number based on their carrier.
"""
if profile['carrier'] is None or not profile['phone_number']:
return None
return str(profile['phone_number']) + "@" + profile['carrier']
if profile['prefers_email'] and profile['gmail_address']:
# add footer
if BODY:
BODY = profile['first_name'] + \
",<br><br>Here are your top headlines:" + BODY
BODY += "<br>Sent from your Jasper"
recipient = profile['gmail_address']
if profile['first_name'] and profile['last_name']:
recipient = profile['first_name'] + " " + \
profile['last_name'] + " <%s>" % recipient
else:
recipient = generateSMSEmail(profile)
if not recipient:
return False
try:
if 'mailgun' in profile:
user = profile['mailgun']['username']
password = profile['mailgun']['password']
server = 'smtp.mailgun.org'
else:
user = profile['gmail_address']
password = profile['gmail_password']
server = 'smtp.gmail.com'
sendEmail(SUBJECT, BODY, recipient, user,
"Jasper <jasper>", password, server)
return True
except:
return False
|
sends an email.
Arguments:
profile -- contains information related to the user (e.g., email
address)
SUBJECT -- subject line of the email
BODY -- body text of the email
|
emailUser
|
python
|
jasperproject/jasper-client
|
client/app_utils.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
|
MIT
|
def generateSMSEmail(profile):
"""
Generates an email from a user's phone number based on their carrier.
"""
if profile['carrier'] is None or not profile['phone_number']:
return None
return str(profile['phone_number']) + "@" + profile['carrier']
|
Generates an email from a user's phone number based on their carrier.
|
generateSMSEmail
|
python
|
jasperproject/jasper-client
|
client/app_utils.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
|
MIT
|
def getTimezone(profile):
"""
Returns the pytz timezone for a given profile.
Arguments:
profile -- contains information related to the user (e.g., email
address)
"""
try:
return timezone(profile['timezone'])
except:
return None
|
Returns the pytz timezone for a given profile.
Arguments:
profile -- contains information related to the user (e.g., email
address)
|
getTimezone
|
python
|
jasperproject/jasper-client
|
client/app_utils.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
|
MIT
|
def generateTinyURL(URL):
"""
Generates a compressed URL.
Arguments:
URL -- the original URL to-be compressed
"""
target = "http://tinyurl.com/api-create.php?url=" + URL
response = urllib2.urlopen(target)
return response.read()
|
Generates a compressed URL.
Arguments:
URL -- the original URL to-be compressed
|
generateTinyURL
|
python
|
jasperproject/jasper-client
|
client/app_utils.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/app_utils.py
|
MIT
|
def __init__(self, mic, profile):
"""
Instantiates a new Brain object, which cross-references user
input with a list of modules. Note that the order of brain.modules
matters, as the Brain will cease execution on the first module
that accepts a given input.
Arguments:
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
"""
self.mic = mic
self.profile = profile
self.modules = self.get_modules()
self._logger = logging.getLogger(__name__)
|
Instantiates a new Brain object, which cross-references user
input with a list of modules. Note that the order of brain.modules
matters, as the Brain will cease execution on the first module
that accepts a given input.
Arguments:
mic -- used to interact with the user (for both input and output)
profile -- contains information related to the user (e.g., phone
number)
|
__init__
|
python
|
jasperproject/jasper-client
|
client/brain.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/brain.py
|
MIT
|
def get_modules(cls):
"""
Dynamically loads all the modules in the modules folder and sorts
them by the PRIORITY key. If no PRIORITY is defined for a given
module, a priority of 0 is assumed.
"""
logger = logging.getLogger(__name__)
locations = [jasperpath.PLUGIN_PATH]
logger.debug("Looking for modules in: %s",
', '.join(["'%s'" % location for location in locations]))
modules = []
for finder, name, ispkg in pkgutil.walk_packages(locations):
try:
loader = finder.find_module(name)
mod = loader.load_module(name)
except:
logger.warning("Skipped module '%s' due to an error.", name,
exc_info=True)
else:
if hasattr(mod, 'WORDS'):
logger.debug("Found module '%s' with words: %r", name,
mod.WORDS)
modules.append(mod)
else:
logger.warning("Skipped module '%s' because it misses " +
"the WORDS constant.", name)
modules.sort(key=lambda mod: mod.PRIORITY if hasattr(mod, 'PRIORITY')
else 0, reverse=True)
return modules
|
Dynamically loads all the modules in the modules folder and sorts
them by the PRIORITY key. If no PRIORITY is defined for a given
module, a priority of 0 is assumed.
|
get_modules
|
python
|
jasperproject/jasper-client
|
client/brain.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/brain.py
|
MIT
|
def query(self, texts):
"""
Passes user input to the appropriate module, testing it against
each candidate module's isValid function.
Arguments:
text -- user input, typically speech, to be parsed by a module
"""
for module in self.modules:
for text in texts:
if module.isValid(text):
self._logger.debug("'%s' is a valid phrase for module " +
"'%s'", text, module.__name__)
try:
module.handle(text, self.mic, self.profile)
except Exception:
self._logger.error('Failed to execute module',
exc_info=True)
self.mic.say("I'm sorry. I had some trouble with " +
"that operation. Please try again later.")
else:
self._logger.debug("Handling of phrase '%s' by " +
"module '%s' completed", text,
module.__name__)
finally:
return
self._logger.debug("No module was able to handle any of these " +
"phrases: %r", texts)
|
Passes user input to the appropriate module, testing it against
each candidate module's isValid function.
Arguments:
text -- user input, typically speech, to be parsed by a module
|
query
|
python
|
jasperproject/jasper-client
|
client/brain.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/brain.py
|
MIT
|
def handleForever(self):
"""
Delegates user input to the handling function when activated.
"""
self._logger.info("Starting to handle conversation with keyword '%s'.",
self.persona)
while True:
# Print notifications until empty
notifications = self.notifier.getAllNotifications()
for notif in notifications:
self._logger.info("Received notification: '%s'", str(notif))
self._logger.debug("Started listening for keyword '%s'",
self.persona)
threshold, transcribed = self.mic.passiveListen(self.persona)
self._logger.debug("Stopped listening for keyword '%s'",
self.persona)
if not transcribed or not threshold:
self._logger.info("Nothing has been said or transcribed.")
continue
self._logger.info("Keyword '%s' has been said!", self.persona)
self._logger.debug("Started to listen actively with threshold: %r",
threshold)
input = self.mic.activeListenToAllOptions(threshold)
self._logger.debug("Stopped to listen actively with threshold: %r",
threshold)
if input:
self.brain.query(input)
else:
self.mic.say("Pardon?")
|
Delegates user input to the handling function when activated.
|
handleForever
|
python
|
jasperproject/jasper-client
|
client/conversation.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/conversation.py
|
MIT
|
def check_network_connection(server="www.google.com"):
"""
Checks if jasper can connect a network server.
Arguments:
server -- (optional) the server to connect with (Default:
"www.google.com")
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking network connection to server '%s'...", server)
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(server)
# connect to the host -- tells us if the host is actually
# reachable
socket.create_connection((host, 80), 2)
except Exception:
logger.debug("Network connection not working")
return False
else:
logger.debug("Network connection working")
return True
|
Checks if jasper can connect a network server.
Arguments:
server -- (optional) the server to connect with (Default:
"www.google.com")
Returns:
True or False
|
check_network_connection
|
python
|
jasperproject/jasper-client
|
client/diagnose.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
|
MIT
|
def check_executable(executable):
"""
Checks if an executable exists in $PATH.
Arguments:
executable -- the name of the executable (e.g. "echo")
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking executable '%s'...", executable)
executable_path = find_executable(executable)
found = executable_path is not None
if found:
logger.debug("Executable '%s' found: '%s'", executable,
executable_path)
else:
logger.debug("Executable '%s' not found", executable)
return found
|
Checks if an executable exists in $PATH.
Arguments:
executable -- the name of the executable (e.g. "echo")
Returns:
True or False
|
check_executable
|
python
|
jasperproject/jasper-client
|
client/diagnose.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
|
MIT
|
def check_python_import(package_or_module):
"""
Checks if a python package or module is importable.
Arguments:
package_or_module -- the package or module name to check
Returns:
True or False
"""
logger = logging.getLogger(__name__)
logger.debug("Checking python import '%s'...", package_or_module)
loader = pkgutil.get_loader(package_or_module)
found = loader is not None
if found:
logger.debug("Python %s '%s' found: %r",
"package" if loader.is_package(package_or_module)
else "module", package_or_module, loader.get_filename())
else:
logger.debug("Python import '%s' not found", package_or_module)
return found
|
Checks if a python package or module is importable.
Arguments:
package_or_module -- the package or module name to check
Returns:
True or False
|
check_python_import
|
python
|
jasperproject/jasper-client
|
client/diagnose.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
|
MIT
|
def get_pip_requirements(fname=os.path.join(jasperpath.LIB_PATH,
'requirements.txt')):
"""
Gets the PIP requirements from a text file. If the files does not exists
or is not readable, it returns None
Arguments:
fname -- (optional) the requirement text file (Default:
"client/requirements.txt")
Returns:
A list of pip requirement objects or None
"""
logger = logging.getLogger(__name__)
if os.access(fname, os.R_OK):
reqs = list(pip.req.parse_requirements(fname))
logger.debug("Found %d PIP requirements in file '%s'", len(reqs),
fname)
return reqs
else:
logger.debug("PIP requirements file '%s' not found or not readable",
fname)
|
Gets the PIP requirements from a text file. If the files does not exists
or is not readable, it returns None
Arguments:
fname -- (optional) the requirement text file (Default:
"client/requirements.txt")
Returns:
A list of pip requirement objects or None
|
get_pip_requirements
|
python
|
jasperproject/jasper-client
|
client/diagnose.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
|
MIT
|
def get_git_revision():
"""
Gets the current git revision hash as hex string. If the git executable is
missing or git is unable to get the revision, None is returned
Returns:
A hex string or None
"""
logger = logging.getLogger(__name__)
if not check_executable('git'):
logger.warning("'git' command not found, git revision not detectable")
return None
output = subprocess.check_output(['git', 'rev-parse', 'HEAD']).strip()
if not output:
logger.warning("Couldn't detect git revision (not a git repository?)")
return None
return output
|
Gets the current git revision hash as hex string. If the git executable is
missing or git is unable to get the revision, None is returned
Returns:
A hex string or None
|
get_git_revision
|
python
|
jasperproject/jasper-client
|
client/diagnose.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
|
MIT
|
def run():
"""
Performs a series of checks against the system and writes the results to
the logging system.
Returns:
The number of failed checks as integer
"""
logger = logging.getLogger(__name__)
# Set loglevel of this module least to info
loglvl = logger.getEffectiveLevel()
if loglvl == logging.NOTSET or loglvl > logging.INFO:
logger.setLevel(logging.INFO)
logger.info("Starting jasper diagnostic at %s" % time.strftime("%c"))
logger.info("Git revision: %r", get_git_revision())
failed_checks = 0
if not check_network_connection():
failed_checks += 1
for executable in ['phonetisaurus-g2p', 'espeak', 'say']:
if not check_executable(executable):
logger.warning("Executable '%s' is missing in $PATH", executable)
failed_checks += 1
for req in get_pip_requirements():
logger.debug("Checking PIP package '%s'...", req.name)
if not req.check_if_exists():
logger.warning("PIP package '%s' is missing", req.name)
failed_checks += 1
else:
logger.debug("PIP package '%s' found", req.name)
for fname in [os.path.join(jasperpath.APP_PATH, os.pardir, "phonetisaurus",
"g014b2b.fst")]:
logger.debug("Checking file '%s'...", fname)
if not os.access(fname, os.R_OK):
logger.warning("File '%s' is missing", fname)
failed_checks += 1
else:
logger.debug("File '%s' found", fname)
if not failed_checks:
logger.info("All checks passed")
else:
logger.info("%d checks failed" % failed_checks)
return failed_checks
|
Performs a series of checks against the system and writes the results to
the logging system.
Returns:
The number of failed checks as integer
|
run
|
python
|
jasperproject/jasper-client
|
client/diagnose.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/diagnose.py
|
MIT
|
def __init__(self, speaker, passive_stt_engine, active_stt_engine):
"""
Initiates the pocketsphinx instance.
Arguments:
speaker -- handles platform-independent audio output
passive_stt_engine -- performs STT while Jasper is in passive listen
mode
acive_stt_engine -- performs STT while Jasper is in active listen mode
"""
self._logger = logging.getLogger(__name__)
self.speaker = speaker
self.passive_stt_engine = passive_stt_engine
self.active_stt_engine = active_stt_engine
self._logger.info("Initializing PyAudio. ALSA/Jack error messages " +
"that pop up during this process are normal and " +
"can usually be safely ignored.")
self._audio = pyaudio.PyAudio()
self._logger.info("Initialization of PyAudio completed.")
|
Initiates the pocketsphinx instance.
Arguments:
speaker -- handles platform-independent audio output
passive_stt_engine -- performs STT while Jasper is in passive listen
mode
acive_stt_engine -- performs STT while Jasper is in active listen mode
|
__init__
|
python
|
jasperproject/jasper-client
|
client/mic.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
|
MIT
|
def passiveListen(self, PERSONA):
"""
Listens for PERSONA in everyday sound. Times out after LISTEN_TIME, so
needs to be restarted.
"""
THRESHOLD_MULTIPLIER = 1.8
RATE = 16000
CHUNK = 1024
# number of seconds to allow to establish threshold
THRESHOLD_TIME = 1
# number of seconds to listen before forcing restart
LISTEN_TIME = 10
# prepare recording stream
stream = self._audio.open(format=pyaudio.paInt16,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
# stores the audio data
frames = []
# stores the lastN score values
lastN = [i for i in range(30)]
# calculate the long run average, and thereby the proper threshold
for i in range(0, RATE / CHUNK * THRESHOLD_TIME):
data = stream.read(CHUNK)
frames.append(data)
# save this data point as a score
lastN.pop(0)
lastN.append(self.getScore(data))
average = sum(lastN) / len(lastN)
# this will be the benchmark to cause a disturbance over!
THRESHOLD = average * THRESHOLD_MULTIPLIER
# save some memory for sound data
frames = []
# flag raised when sound disturbance detected
didDetect = False
# start passively listening for disturbance above threshold
for i in range(0, RATE / CHUNK * LISTEN_TIME):
data = stream.read(CHUNK)
frames.append(data)
score = self.getScore(data)
if score > THRESHOLD:
didDetect = True
break
# no use continuing if no flag raised
if not didDetect:
print "No disturbance detected"
stream.stop_stream()
stream.close()
return (None, None)
# cutoff any recording before this disturbance was detected
frames = frames[-20:]
# otherwise, let's keep recording for few seconds and save the file
DELAY_MULTIPLIER = 1
for i in range(0, RATE / CHUNK * DELAY_MULTIPLIER):
data = stream.read(CHUNK)
frames.append(data)
# save the audio data
stream.stop_stream()
stream.close()
with tempfile.NamedTemporaryFile(mode='w+b') as f:
wav_fp = wave.open(f, 'wb')
wav_fp.setnchannels(1)
wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
wav_fp.setframerate(RATE)
wav_fp.writeframes(''.join(frames))
wav_fp.close()
f.seek(0)
# check if PERSONA was said
transcribed = self.passive_stt_engine.transcribe(f)
if any(PERSONA in phrase for phrase in transcribed):
return (THRESHOLD, PERSONA)
return (False, transcribed)
|
Listens for PERSONA in everyday sound. Times out after LISTEN_TIME, so
needs to be restarted.
|
passiveListen
|
python
|
jasperproject/jasper-client
|
client/mic.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
|
MIT
|
def activeListen(self, THRESHOLD=None, LISTEN=True, MUSIC=False):
"""
Records until a second of silence or times out after 12 seconds
Returns the first matching string or None
"""
options = self.activeListenToAllOptions(THRESHOLD, LISTEN, MUSIC)
if options:
return options[0]
|
Records until a second of silence or times out after 12 seconds
Returns the first matching string or None
|
activeListen
|
python
|
jasperproject/jasper-client
|
client/mic.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
|
MIT
|
def activeListenToAllOptions(self, THRESHOLD=None, LISTEN=True,
MUSIC=False):
"""
Records until a second of silence or times out after 12 seconds
Returns a list of the matching options or None
"""
RATE = 16000
CHUNK = 1024
LISTEN_TIME = 12
# check if no threshold provided
if THRESHOLD is None:
THRESHOLD = self.fetchThreshold()
self.speaker.play(jasperpath.data('audio', 'beep_hi.wav'))
# prepare recording stream
stream = self._audio.open(format=pyaudio.paInt16,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
frames = []
# increasing the range # results in longer pause after command
# generation
lastN = [THRESHOLD * 1.2 for i in range(30)]
for i in range(0, RATE / CHUNK * LISTEN_TIME):
data = stream.read(CHUNK)
frames.append(data)
score = self.getScore(data)
lastN.pop(0)
lastN.append(score)
average = sum(lastN) / float(len(lastN))
# TODO: 0.8 should not be a MAGIC NUMBER!
if average < THRESHOLD * 0.8:
break
self.speaker.play(jasperpath.data('audio', 'beep_lo.wav'))
# save the audio data
stream.stop_stream()
stream.close()
with tempfile.SpooledTemporaryFile(mode='w+b') as f:
wav_fp = wave.open(f, 'wb')
wav_fp.setnchannels(1)
wav_fp.setsampwidth(pyaudio.get_sample_size(pyaudio.paInt16))
wav_fp.setframerate(RATE)
wav_fp.writeframes(''.join(frames))
wav_fp.close()
f.seek(0)
return self.active_stt_engine.transcribe(f)
|
Records until a second of silence or times out after 12 seconds
Returns a list of the matching options or None
|
activeListenToAllOptions
|
python
|
jasperproject/jasper-client
|
client/mic.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/mic.py
|
MIT
|
def handleEmailNotifications(self, lastDate):
"""Places new Gmail notifications in the Notifier's queue."""
emails = Gmail.fetchUnreadEmails(self.profile, since=lastDate)
if emails:
lastDate = Gmail.getMostRecentDate(emails)
def styleEmail(e):
return "New email from %s." % Gmail.getSender(e)
for e in emails:
self.q.put(styleEmail(e))
return lastDate
|
Places new Gmail notifications in the Notifier's queue.
|
handleEmailNotifications
|
python
|
jasperproject/jasper-client
|
client/notifier.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/notifier.py
|
MIT
|
def getNotification(self):
"""Returns a notification. Note that this function is consuming."""
try:
notif = self.q.get(block=False)
return notif
except Queue.Empty:
return None
|
Returns a notification. Note that this function is consuming.
|
getNotification
|
python
|
jasperproject/jasper-client
|
client/notifier.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/notifier.py
|
MIT
|
def getAllNotifications(self):
"""
Return a list of notifications in chronological order.
Note that this function is consuming, so consecutive calls
will yield different results.
"""
notifs = []
notif = self.getNotification()
while notif:
notifs.append(notif)
notif = self.getNotification()
return notifs
|
Return a list of notifications in chronological order.
Note that this function is consuming, so consecutive calls
will yield different results.
|
getAllNotifications
|
python
|
jasperproject/jasper-client
|
client/notifier.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/notifier.py
|
MIT
|
def __init__(self, vocabulary, hmm_dir="/usr/local/share/" +
"pocketsphinx/model/hmm/en_US/hub4wsj_sc_8k"):
"""
Initiates the pocketsphinx instance.
Arguments:
vocabulary -- a PocketsphinxVocabulary instance
hmm_dir -- the path of the Hidden Markov Model (HMM)
"""
self._logger = logging.getLogger(__name__)
# quirky bug where first import doesn't work
try:
import pocketsphinx as ps
except:
import pocketsphinx as ps
with tempfile.NamedTemporaryFile(prefix='psdecoder_',
suffix='.log', delete=False) as f:
self._logfile = f.name
self._logger.debug("Initializing PocketSphinx Decoder with hmm_dir " +
"'%s'", hmm_dir)
# Perform some checks on the hmm_dir so that we can display more
# meaningful error messages if neccessary
if not os.path.exists(hmm_dir):
msg = ("hmm_dir '%s' does not exist! Please make sure that you " +
"have set the correct hmm_dir in your profile.") % hmm_dir
self._logger.error(msg)
raise RuntimeError(msg)
# Lets check if all required files are there. Refer to:
# http://cmusphinx.sourceforge.net/wiki/acousticmodelformat
# for details
missing_hmm_files = []
for fname in ('mdef', 'feat.params', 'means', 'noisedict',
'transition_matrices', 'variances'):
if not os.path.exists(os.path.join(hmm_dir, fname)):
missing_hmm_files.append(fname)
mixweights = os.path.exists(os.path.join(hmm_dir, 'mixture_weights'))
sendump = os.path.exists(os.path.join(hmm_dir, 'sendump'))
if not mixweights and not sendump:
# We only need mixture_weights OR sendump
missing_hmm_files.append('mixture_weights or sendump')
if missing_hmm_files:
self._logger.warning("hmm_dir '%s' is missing files: %s. Please " +
"make sure that you have set the correct " +
"hmm_dir in your profile.",
hmm_dir, ', '.join(missing_hmm_files))
self._decoder = ps.Decoder(hmm=hmm_dir, logfn=self._logfile,
**vocabulary.decoder_kwargs)
|
Initiates the pocketsphinx instance.
Arguments:
vocabulary -- a PocketsphinxVocabulary instance
hmm_dir -- the path of the Hidden Markov Model (HMM)
|
__init__
|
python
|
jasperproject/jasper-client
|
client/stt.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
|
MIT
|
def transcribe(self, fp):
"""
Performs STT, transcribing an audio file and returning the result.
Arguments:
fp -- a file object containing audio data
"""
fp.seek(44)
# FIXME: Can't use the Decoder.decode_raw() here, because
# pocketsphinx segfaults with tempfile.SpooledTemporaryFile()
data = fp.read()
self._decoder.start_utt()
self._decoder.process_raw(data, False, True)
self._decoder.end_utt()
result = self._decoder.get_hyp()
with open(self._logfile, 'r+') as f:
for line in f:
self._logger.debug(line.strip())
f.truncate()
transcribed = [result[0]]
self._logger.info('Transcribed: %r', transcribed)
return transcribed
|
Performs STT, transcribing an audio file and returning the result.
Arguments:
fp -- a file object containing audio data
|
transcribe
|
python
|
jasperproject/jasper-client
|
client/stt.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
|
MIT
|
def __init__(self, api_key=None, language='en-us'):
# FIXME: get init args from config
"""
Arguments:
api_key - the public api key which allows access to Google APIs
"""
self._logger = logging.getLogger(__name__)
self._request_url = None
self._language = None
self._api_key = None
self._http = requests.Session()
self.language = language
self.api_key = api_key
|
Arguments:
api_key - the public api key which allows access to Google APIs
|
__init__
|
python
|
jasperproject/jasper-client
|
client/stt.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
|
MIT
|
def transcribe(self, fp):
"""
Performs STT via the Google Speech API, transcribing an audio file and
returning an English string.
Arguments:
audio_file_path -- the path to the .wav file to be transcribed
"""
if not self.api_key:
self._logger.critical('API key missing, transcription request ' +
'aborted.')
return []
elif not self.language:
self._logger.critical('Language info missing, transcription ' +
'request aborted.')
return []
wav = wave.open(fp, 'rb')
frame_rate = wav.getframerate()
wav.close()
data = fp.read()
headers = {'content-type': 'audio/l16; rate=%s' % frame_rate}
r = self._http.post(self.request_url, data=data, headers=headers)
try:
r.raise_for_status()
except requests.exceptions.HTTPError:
self._logger.critical('Request failed with http status %d',
r.status_code)
if r.status_code == requests.codes['forbidden']:
self._logger.warning('Status 403 is probably caused by an ' +
'invalid Google API key.')
return []
r.encoding = 'utf-8'
try:
# We cannot simply use r.json() because Google sends invalid json
# (i.e. multiple json objects, seperated by newlines. We only want
# the last one).
response = json.loads(list(r.text.strip().split('\n', 1))[-1])
if len(response['result']) == 0:
# Response result is empty
raise ValueError('Nothing has been transcribed.')
results = [alt['transcript'] for alt
in response['result'][0]['alternative']]
except ValueError as e:
self._logger.warning('Empty response: %s', e.args[0])
results = []
except (KeyError, IndexError):
self._logger.warning('Cannot parse response.', exc_info=True)
results = []
else:
# Convert all results to uppercase
results = tuple(result.upper() for result in results)
self._logger.info('Transcribed: %r', results)
return results
|
Performs STT via the Google Speech API, transcribing an audio file and
returning an English string.
Arguments:
audio_file_path -- the path to the .wav file to be transcribed
|
transcribe
|
python
|
jasperproject/jasper-client
|
client/stt.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
|
MIT
|
def get_engine_by_slug(slug=None):
"""
Returns:
An STT Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("Invalid slug '%s'", slug)
selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines())
if len(selected_engines) == 0:
raise ValueError("No STT engine found for slug '%s'" % slug)
else:
if len(selected_engines) > 1:
print(("WARNING: Multiple STT engines found for slug '%s'. " +
"This is most certainly a bug.") % slug)
engine = selected_engines[0]
if not engine.is_available():
raise ValueError(("STT engine '%s' is not available (due to " +
"missing dependencies, missing " +
"dependencies, etc.)") % slug)
return engine
|
Returns:
An STT Engine implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
|
get_engine_by_slug
|
python
|
jasperproject/jasper-client
|
client/stt.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/stt.py
|
MIT
|
def get_engine_by_slug(slug=None):
"""
Returns:
A speaker implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
"""
if not slug or type(slug) is not str:
raise TypeError("Invalid slug '%s'", slug)
selected_engines = filter(lambda engine: hasattr(engine, "SLUG") and
engine.SLUG == slug, get_engines())
if len(selected_engines) == 0:
raise ValueError("No TTS engine found for slug '%s'" % slug)
else:
if len(selected_engines) > 1:
print("WARNING: Multiple TTS engines found for slug '%s'. " +
"This is most certainly a bug." % slug)
engine = selected_engines[0]
if not engine.is_available():
raise ValueError(("TTS engine '%s' is not available (due to " +
"missing dependencies, etc.)") % slug)
return engine
|
Returns:
A speaker implementation available on the current platform
Raises:
ValueError if no speaker implementation is supported on this platform
|
get_engine_by_slug
|
python
|
jasperproject/jasper-client
|
client/tts.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/tts.py
|
MIT
|
def phrases_to_revision(cls, phrases):
"""
Calculates a revision from phrases by using the SHA1 hash function.
Arguments:
phrases -- a list of phrases
Returns:
A revision string for given phrases.
"""
sorted_phrases = sorted(phrases)
joined_phrases = '\n'.join(sorted_phrases)
sha1 = hashlib.sha1()
sha1.update(joined_phrases)
return sha1.hexdigest()
|
Calculates a revision from phrases by using the SHA1 hash function.
Arguments:
phrases -- a list of phrases
Returns:
A revision string for given phrases.
|
phrases_to_revision
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
def __init__(self, name='default', path='.'):
"""
Initializes a new Vocabulary instance.
Optional Arguments:
name -- (optional) the name of the vocabulary (Default: 'default')
path -- (optional) the path in which the vocabulary exists or will
be created (Default: '.')
"""
self.name = name
self.path = os.path.abspath(os.path.join(path, self.PATH_PREFIX, name))
self._logger = logging.getLogger(__name__)
|
Initializes a new Vocabulary instance.
Optional Arguments:
name -- (optional) the name of the vocabulary (Default: 'default')
path -- (optional) the path in which the vocabulary exists or will
be created (Default: '.')
|
__init__
|
python
|
jasperproject/jasper-client
|
client/vocabcompiler.py
|
https://github.com/jasperproject/jasper-client/blob/master/client/vocabcompiler.py
|
MIT
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.