id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
17,714 | import numpy as np
import torch
import torch.nn.functional as F
from torch.distributions import Normal
def to_one_hot(tensor, n, fill_with=1.0):
# we perform one hot encore with respect to the last axis
one_hot = torch.FloatTensor(tensor.size() + (n,)).zero_()
if tensor.is_cuda:
one_hot = one_hot.cuda()
one_hot.scatter_(len(tensor.size()), tensor.unsqueeze(-1), fill_with)
return one_hot
The provided code snippet includes necessary dependencies for implementing the `sample_from_mix_gaussian` function. Write a Python function `def sample_from_mix_gaussian(y, log_scale_min=-7.0)` to solve the following problem:
Sample from (discretized) mixture of gaussian distributions Args: y (Tensor): B x C x T log_scale_min (float): Log scale minimum value Returns: Tensor: sample in range of [-1, 1].
Here is the function:
def sample_from_mix_gaussian(y, log_scale_min=-7.0):
"""
Sample from (discretized) mixture of gaussian distributions
Args:
y (Tensor): B x C x T
log_scale_min (float): Log scale minimum value
Returns:
Tensor: sample in range of [-1, 1].
"""
C = y.size(1)
if C == 2:
nr_mix = 1
else:
assert y.size(1) % 3 == 0
nr_mix = y.size(1) // 3
# B x T x C
y = y.transpose(1, 2)
if C == 2:
logit_probs = None
else:
logit_probs = y[:, :, :nr_mix]
if nr_mix > 1:
# sample mixture indicator from softmax
temp = logit_probs.data.new(logit_probs.size()).uniform_(1e-5, 1.0 - 1e-5)
temp = logit_probs.data - torch.log(-torch.log(temp))
_, argmax = temp.max(dim=-1)
# (B, T) -> (B, T, nr_mix)
one_hot = to_one_hot(argmax, nr_mix)
# Select means and log scales
means = torch.sum(y[:, :, nr_mix : 2 * nr_mix] * one_hot, dim=-1)
log_scales = torch.sum(y[:, :, 2 * nr_mix : 3 * nr_mix] * one_hot, dim=-1)
else:
if C == 2:
means, log_scales = y[:, :, 0], y[:, :, 1]
elif C == 3:
means, log_scales = y[:, :, 1], y[:, :, 2]
else:
assert False, "shouldn't happen"
scales = torch.exp(log_scales)
dist = Normal(loc=means, scale=scales)
x = dist.sample()
x = torch.clamp(x, min=-1.0, max=1.0)
return x | Sample from (discretized) mixture of gaussian distributions Args: y (Tensor): B x C x T log_scale_min (float): Log scale minimum value Returns: Tensor: sample in range of [-1, 1]. |
17,715 | import humanfriendly
import numpy as np
import torch
def get_human_readable_count(number: int) -> str:
"""Return human_readable_count
Originated from:
https://github.com/PyTorchLightning/pytorch-lightning/blob/master/pytorch_lightning/core/memory.py
Abbreviates an integer number with K, M, B, T for thousands, millions,
billions and trillions, respectively.
Examples:
>>> get_human_readable_count(123)
'123 '
>>> get_human_readable_count(1234) # (one thousand)
'1 K'
>>> get_human_readable_count(2e6) # (two million)
'2 M'
>>> get_human_readable_count(3e9) # (three billion)
'3 B'
>>> get_human_readable_count(4e12) # (four trillion)
'4 T'
>>> get_human_readable_count(5e15) # (more than trillion)
'5,000 T'
Args:
number: a positive integer number
Return:
A string formatted according to the pattern described above.
"""
assert number >= 0
labels = [" ", "K", "M", "B", "T"]
num_digits = int(np.floor(np.log10(number)) + 1 if number > 0 else 1)
num_groups = int(np.ceil(num_digits / 3))
num_groups = min(num_groups, len(labels))
shift = -3 * (num_groups - 1)
number = number * (10**shift)
index = num_groups - 1
return f"{number:.2f} {labels[index]}"
def to_bytes(dtype) -> int:
return int(str(dtype)[-2:]) // 8
def model_summary(model: torch.nn.Module) -> str:
message = "Model structure:\n"
message += str(model)
tot_params = sum(p.numel() for p in model.parameters())
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
percent_trainable = "{:.1f}".format(num_params * 100.0 / tot_params)
tot_params = get_human_readable_count(tot_params)
num_params = get_human_readable_count(num_params)
message += "\n\nModel summary:\n"
message += f" Class Name: {model.__class__.__name__}\n"
message += f" Total Number of model parameters: {tot_params}\n"
message += (
f" Number of trainable parameters: {num_params} ({percent_trainable}%)\n"
)
num_bytes = humanfriendly.format_size(
sum(
p.numel() * to_bytes(p.dtype) for p in model.parameters() if p.requires_grad
)
)
message += f" Size: {num_bytes}\n"
dtype = next(iter(model.parameters())).dtype
message += f" Type: {dtype}"
return message | null |
17,716 | import os
import librosa
import torch
import numpy as np
from fairseq import checkpoint_utils
from tqdm import tqdm
import torch
def load_hubert_model(hps):
# Load model
ckpt_path = hps.hubert_file
print("Load Hubert Model...")
models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
[ckpt_path],
suffix="",
)
model = models[0]
model.eval()
if torch.cuda.is_available():
model = model.cuda()
return model | null |
17,717 | import os
import librosa
import torch
import numpy as np
from fairseq import checkpoint_utils
from tqdm import tqdm
import torch
The provided code snippet includes necessary dependencies for implementing the `repeat_expand_2d` function. Write a Python function `def repeat_expand_2d(content, target_len)` to solve the following problem:
content : [hubert_dim(256), src_len] target: [hubert_dim(256), target_len]
Here is the function:
def repeat_expand_2d(content, target_len):
"""
content : [hubert_dim(256), src_len]
target: [hubert_dim(256), target_len]
"""
src_len = content.shape[-1]
target = torch.zeros([content.shape[0], target_len], dtype=torch.float).to(
content.device
)
temp = torch.arange(src_len + 1) * target_len / src_len
current_pos = 0
for i in range(target_len):
if i < temp[current_pos + 1]:
target[:, i] = content[:, current_pos]
else:
current_pos += 1
target[:, i] = content[:, current_pos]
return target | content : [hubert_dim(256), src_len] target: [hubert_dim(256), target_len] |
17,718 | import os
import librosa
import torch
import numpy as np
from fairseq import checkpoint_utils
from tqdm import tqdm
import torch
The provided code snippet includes necessary dependencies for implementing the `get_mapped_features` function. Write a Python function `def get_mapped_features(raw_content_features, mapping_features)` to solve the following problem:
Content Vector: frameshift = 20ms, hop_size = 480 in 24k Now it's only used for mapping to bigvgan's mels (sr = 24k, hop_size = 256, frameshift ~= 10.7 ms)
Here is the function:
def get_mapped_features(raw_content_features, mapping_features):
"""
Content Vector: frameshift = 20ms, hop_size = 480 in 24k
Now it's only used for mapping to bigvgan's mels (sr = 24k, hop_size = 256, frameshift ~= 10.7 ms)
"""
source_hop = 480
target_hop = 256
factor = np.gcd(source_hop, target_hop)
source_hop //= factor
target_hop //= factor
print(
"Mapping source's {} frames => target's {} frames".format(
target_hop, source_hop
)
)
results = []
for index, mapping_feat in enumerate(tqdm(mapping_features)):
# mappping_feat: (mels_frame_len, n_mels)
target_len = len(mapping_feat)
# (source_len, 256)
raw_feats = raw_content_features[index][0].cpu().numpy().T
source_len, width = raw_feats.shape
# const ~= target_len * target_hop
const = source_len * source_hop // target_hop * target_hop
# (source_len * source_hop, dim)
up_sampling_feats = np.repeat(raw_feats, source_hop, axis=0)
# (const, dim) -> (const/target_hop, target_hop, dim) -> (const/target_hop, dim)
down_sampling_feats = np.average(
up_sampling_feats[:const].reshape(-1, target_hop, width), axis=1
)
err = abs(target_len - len(down_sampling_feats))
if err > 3:
print("index:", index)
print("mels:", mapping_feat.shape)
print("raw content vector:", raw_feats.shape)
print("up_sampling:", up_sampling_feats.shape)
print("down_sampling_feats:", down_sampling_feats.shape)
exit()
if len(down_sampling_feats) < target_len:
# (1, dim) -> (err, dim)
end = down_sampling_feats[-1][None, :].repeat(err, axis=0)
down_sampling_feats = np.concatenate([down_sampling_feats, end], axis=0)
# (target_len, dim)
feats = down_sampling_feats[:target_len]
results.append(feats)
return results | Content Vector: frameshift = 20ms, hop_size = 480 in 24k Now it's only used for mapping to bigvgan's mels (sr = 24k, hop_size = 256, frameshift ~= 10.7 ms) |
17,719 | import os
import librosa
import torch
import numpy as np
from fairseq import checkpoint_utils
from tqdm import tqdm
import torch
def content_vector_encoder(model, audio_path, default_sampling_rate=16000):
"""
# content vector default sr: 16000
"""
wav16k, sr = librosa.load(audio_path, sr=default_sampling_rate)
device = next(model.parameters()).device
wav16k = torch.from_numpy(wav16k).to(device)
# (1, 256, frame_len)
content_feature = get_hubert_content(model, wav_16k_tensor=wav16k)
return content_feature.cpu().detach().numpy()
def extract_hubert_features_of_dataset(datasets, model, out_dir):
for utt in tqdm(datasets):
uid = utt["Uid"]
audio_path = utt["Path"]
content_vector_feature = content_vector_encoder(model, audio_path) # (T, 256)
save_path = os.path.join(out_dir, uid + ".npy")
np.save(save_path, content_vector_feature) | null |
17,720 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numbers
import re
import six
The provided code snippet includes necessary dependencies for implementing the `_cast_to_type_if_compatible` function. Write a Python function `def _cast_to_type_if_compatible(name, param_type, value)` to solve the following problem:
Cast hparam to the provided type, if compatible. Args: name: Name of the hparam to be cast. param_type: The type of the hparam. value: The value to be cast, if compatible. Returns: The result of casting `value` to `param_type`. Raises: ValueError: If the type of `value` is not compatible with param_type. * If `param_type` is a string type, but `value` is not. * If `param_type` is a boolean, but `value` is not, or vice versa. * If `param_type` is an integer type, but `value` is not. * If `param_type` is a float type, but `value` is not a numeric type.
Here is the function:
def _cast_to_type_if_compatible(name, param_type, value):
"""Cast hparam to the provided type, if compatible.
Args:
name: Name of the hparam to be cast.
param_type: The type of the hparam.
value: The value to be cast, if compatible.
Returns:
The result of casting `value` to `param_type`.
Raises:
ValueError: If the type of `value` is not compatible with param_type.
* If `param_type` is a string type, but `value` is not.
* If `param_type` is a boolean, but `value` is not, or vice versa.
* If `param_type` is an integer type, but `value` is not.
* If `param_type` is a float type, but `value` is not a numeric type.
"""
fail_msg = "Could not cast hparam '%s' of type '%s' from value %r" % (
name,
param_type,
value,
)
# Some callers use None, for which we can't do any casting/checking. :(
if issubclass(param_type, type(None)):
return value
# Avoid converting a non-string type to a string.
if issubclass(param_type, (six.string_types, six.binary_type)) and not isinstance(
value, (six.string_types, six.binary_type)
):
raise ValueError(fail_msg)
# Avoid converting a number or string type to a boolean or vice versa.
if issubclass(param_type, bool) != isinstance(value, bool):
raise ValueError(fail_msg)
# Avoid converting float to an integer (the reverse is fine).
if issubclass(param_type, numbers.Integral) and not isinstance(
value, numbers.Integral
):
raise ValueError(fail_msg)
# Avoid converting a non-numeric type to a numeric type.
if issubclass(param_type, numbers.Number) and not isinstance(value, numbers.Number):
raise ValueError(fail_msg)
return param_type(value) | Cast hparam to the provided type, if compatible. Args: name: Name of the hparam to be cast. param_type: The type of the hparam. value: The value to be cast, if compatible. Returns: The result of casting `value` to `param_type`. Raises: ValueError: If the type of `value` is not compatible with param_type. * If `param_type` is a string type, but `value` is not. * If `param_type` is a boolean, but `value` is not, or vice versa. * If `param_type` is an integer type, but `value` is not. * If `param_type` is a float type, but `value` is not a numeric type. |
17,721 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import numbers
import re
import six
PARAM_RE = re.compile(
r"""
(?P<name>[a-zA-Z][\w\.]*) # variable name: "var" or "x"
(\[\s*(?P<index>\d+)\s*\])? # (optional) index: "1" or None
\s*=\s*
((?P<val>[^,\[]*) # single value: "a" or None
|
\[(?P<vals>[^\]]*)\]) # list of values: None or "1,2,3"
($|,\s*)""",
re.VERBOSE,
)
def _parse_fail(name, var_type, value, values):
"""Helper function for raising a value error for bad assignment."""
raise ValueError(
"Could not parse hparam '%s' of type '%s' with value '%s' in %s"
% (name, var_type.__name__, value, values)
)
def _process_scalar_value(name, parse_fn, var_type, m_dict, values, results_dictionary):
"""Update results_dictionary with a scalar value.
Used to update the results_dictionary to be returned by parse_values when
encountering a clause with a scalar RHS (e.g. "s=5" or "arr[0]=5".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("s" or "arr").
parse_fn: Function for parsing the actual value.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
m_dict['index']: List index value (or None)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has already been used.
"""
try:
parsed_value = parse_fn(m_dict["val"])
except ValueError:
_parse_fail(name, var_type, m_dict["val"], values)
# If no index is provided
if not m_dict["index"]:
if name in results_dictionary:
_reuse_fail(name, values)
results_dictionary[name] = parsed_value
else:
if name in results_dictionary:
# The name has already been used as a scalar, then it
# will be in this dictionary and map to a non-dictionary.
if not isinstance(results_dictionary.get(name), dict):
_reuse_fail(name, values)
else:
results_dictionary[name] = {}
index = int(m_dict["index"])
# Make sure the index position hasn't already been assigned a value.
if index in results_dictionary[name]:
_reuse_fail("{}[{}]".format(name, index), values)
results_dictionary[name][index] = parsed_value
def _process_list_value(name, parse_fn, var_type, m_dict, values, results_dictionary):
"""Update results_dictionary from a list of values.
Used to update results_dictionary to be returned by parse_values when
encountering a clause with a list RHS (e.g. "arr=[1,2,3]".)
Mutates results_dictionary.
Args:
name: Name of variable in assignment ("arr").
parse_fn: Function for parsing individual values.
var_type: Type of named variable.
m_dict: Dictionary constructed from regex parsing.
m_dict['val']: RHS value (scalar)
values: Full expression being parsed
results_dictionary: The dictionary being updated for return by the parsing
function.
Raises:
ValueError: If the name has an index or the values cannot be parsed.
"""
if m_dict["index"] is not None:
raise ValueError("Assignment of a list to a list index.")
elements = filter(None, re.split("[ ,]", m_dict["vals"]))
# Make sure the name hasn't already been assigned a value
if name in results_dictionary:
raise _reuse_fail(name, values)
try:
results_dictionary[name] = [parse_fn(e) for e in elements]
except ValueError:
_parse_fail(name, var_type, m_dict["vals"], values)
The provided code snippet includes necessary dependencies for implementing the `parse_values` function. Write a Python function `def parse_values(values, type_map, ignore_unknown=False)` to solve the following problem:
Parses hyperparameter values from a string into a python map. `values` is a string containing comma-separated `name=value` pairs. For each pair, the value of the hyperparameter named `name` is set to `value`. If a hyperparameter name appears multiple times in `values`, a ValueError is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). If a hyperparameter name in both an index assignment and scalar assignment, a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). The hyperparameter name may contain '.' symbols, which will result in an attribute name that is only accessible through the getattr and setattr functions. (And must be first explicit added through add_hparam.) WARNING: Use of '.' in your variable names is allowed, but is not well supported and not recommended. The `value` in `name=value` must follows the syntax according to the type of the parameter: * Scalar integer: A Python-parsable integer point value. E.g.: 1, 100, -12. * Scalar float: A Python-parsable floating point value. E.g.: 1.0, -.54e89. * Boolean: Either true or false. * Scalar string: A non-empty sequence of characters, excluding comma, spaces, and square brackets. E.g.: foo, bar_1. * List: A comma separated list of scalar values of the parameter type enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. When index assignment is used, the corresponding type_map key should be the list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not "arr[1]"). Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. type_map: A dictionary mapping hyperparameter names to types. Note every parameter name in values must be a key in type_map. The values must conform to the types indicated, where a value V is said to conform to a type T if either V has type T, or V is a list of elements of type T. Hence, for a multidimensional parameter 'x' taking float values, 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. ignore_unknown: Bool. Whether values that are missing a type in type_map should be ignored. If set to True, a ValueError will not be raised for unknown hyperparameter type. Returns: A python map mapping each name to either: * A scalar value. * A list of scalar values. * A dictionary mapping index numbers to scalar values. (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") Raises: ValueError: If there is a problem with input. * If `values` cannot be parsed. * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2', or 'a=1,a=[1]')
Here is the function:
def parse_values(values, type_map, ignore_unknown=False):
"""Parses hyperparameter values from a string into a python map.
`values` is a string containing comma-separated `name=value` pairs.
For each pair, the value of the hyperparameter named `name` is set to
`value`.
If a hyperparameter name appears multiple times in `values`, a ValueError
is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2').
If a hyperparameter name in both an index assignment and scalar assignment,
a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1').
The hyperparameter name may contain '.' symbols, which will result in an
attribute name that is only accessible through the getattr and setattr
functions. (And must be first explicit added through add_hparam.)
WARNING: Use of '.' in your variable names is allowed, but is not well
supported and not recommended.
The `value` in `name=value` must follows the syntax according to the
type of the parameter:
* Scalar integer: A Python-parsable integer point value. E.g.: 1,
100, -12.
* Scalar float: A Python-parsable floating point value. E.g.: 1.0,
-.54e89.
* Boolean: Either true or false.
* Scalar string: A non-empty sequence of characters, excluding comma,
spaces, and square brackets. E.g.: foo, bar_1.
* List: A comma separated list of scalar values of the parameter type
enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low].
When index assignment is used, the corresponding type_map key should be the
list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not
"arr[1]").
Args:
values: String. Comma separated list of `name=value` pairs where
'value' must follow the syntax described above.
type_map: A dictionary mapping hyperparameter names to types. Note every
parameter name in values must be a key in type_map. The values must
conform to the types indicated, where a value V is said to conform to a
type T if either V has type T, or V is a list of elements of type T.
Hence, for a multidimensional parameter 'x' taking float values,
'x=[0.1,0.2]' will parse successfully if type_map['x'] = float.
ignore_unknown: Bool. Whether values that are missing a type in type_map
should be ignored. If set to True, a ValueError will not be raised for
unknown hyperparameter type.
Returns:
A python map mapping each name to either:
* A scalar value.
* A list of scalar values.
* A dictionary mapping index numbers to scalar values.
(e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}")
Raises:
ValueError: If there is a problem with input.
* If `values` cannot be parsed.
* If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]').
* If the same rvalue is assigned two different values (e.g. 'a=1,a=2',
'a[1]=1,a[1]=2', or 'a=1,a=[1]')
"""
results_dictionary = {}
pos = 0
while pos < len(values):
m = PARAM_RE.match(values, pos)
if not m:
raise ValueError("Malformed hyperparameter value: %s" % values[pos:])
# Check that there is a comma between parameters and move past it.
pos = m.end()
# Parse the values.
m_dict = m.groupdict()
name = m_dict["name"]
if name not in type_map:
if ignore_unknown:
continue
raise ValueError("Unknown hyperparameter type for %s" % name)
type_ = type_map[name]
# Set up correct parsing function (depending on whether type_ is a bool)
if type_ == bool:
def parse_bool(value):
if value in ["true", "True"]:
return True
elif value in ["false", "False"]:
return False
else:
try:
return bool(int(value))
except ValueError:
_parse_fail(name, type_, value, values)
parse = parse_bool
else:
parse = type_
# If a singe value is provided
if m_dict["val"] is not None:
_process_scalar_value(
name, parse, type_, m_dict, values, results_dictionary
)
# If the assigned value is a list:
elif m_dict["vals"] is not None:
_process_list_value(name, parse, type_, m_dict, values, results_dictionary)
else: # Not assigned a list or value
_parse_fail(name, type_, "", values)
return results_dictionary | Parses hyperparameter values from a string into a python map. `values` is a string containing comma-separated `name=value` pairs. For each pair, the value of the hyperparameter named `name` is set to `value`. If a hyperparameter name appears multiple times in `values`, a ValueError is raised (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2'). If a hyperparameter name in both an index assignment and scalar assignment, a ValueError is raised. (e.g. 'a=[1,2,3],a[0] = 1'). The hyperparameter name may contain '.' symbols, which will result in an attribute name that is only accessible through the getattr and setattr functions. (And must be first explicit added through add_hparam.) WARNING: Use of '.' in your variable names is allowed, but is not well supported and not recommended. The `value` in `name=value` must follows the syntax according to the type of the parameter: * Scalar integer: A Python-parsable integer point value. E.g.: 1, 100, -12. * Scalar float: A Python-parsable floating point value. E.g.: 1.0, -.54e89. * Boolean: Either true or false. * Scalar string: A non-empty sequence of characters, excluding comma, spaces, and square brackets. E.g.: foo, bar_1. * List: A comma separated list of scalar values of the parameter type enclosed in square brackets. E.g.: [1,2,3], [1.0,1e-12], [high,low]. When index assignment is used, the corresponding type_map key should be the list name. E.g. for "arr[1]=0" the type_map must have the key "arr" (not "arr[1]"). Args: values: String. Comma separated list of `name=value` pairs where 'value' must follow the syntax described above. type_map: A dictionary mapping hyperparameter names to types. Note every parameter name in values must be a key in type_map. The values must conform to the types indicated, where a value V is said to conform to a type T if either V has type T, or V is a list of elements of type T. Hence, for a multidimensional parameter 'x' taking float values, 'x=[0.1,0.2]' will parse successfully if type_map['x'] = float. ignore_unknown: Bool. Whether values that are missing a type in type_map should be ignored. If set to True, a ValueError will not be raised for unknown hyperparameter type. Returns: A python map mapping each name to either: * A scalar value. * A list of scalar values. * A dictionary mapping index numbers to scalar values. (e.g. "x=5,L=[1,2],arr[1]=3" results in {'x':5,'L':[1,2],'arr':{1:3}}") Raises: ValueError: If there is a problem with input. * If `values` cannot be parsed. * If a list is assigned to a list index (e.g. 'a[1] = [1,2,3]'). * If the same rvalue is assigned two different values (e.g. 'a=1,a=2', 'a[1]=1,a[1]=2', or 'a=1,a=[1]') |
17,722 | import torchaudio
import pyworld as pw
import numpy as np
import torch
import diffsptk
import os
from tqdm import tqdm
import pickle
import torchaudio
def extract_world_features(waveform, frameshift=10):
# waveform: (1, seq)
# x: (seq,)
x = np.array(waveform, dtype=np.double)
_f0, t = pw.dio(x, fs, frame_period=frameshift) # raw pitch extractor
f0 = pw.stonemask(x, _f0, t, fs) # pitch refinement
sp = pw.cheaptrick(x, f0, t, fs) # extract smoothed spectrogram
ap = pw.d4c(x, f0, t, fs) # extract aperiodicity
return f0, sp, ap, fs | null |
17,723 | import torchaudio
import pyworld as pw
import numpy as np
import torch
import diffsptk
import os
from tqdm import tqdm
import pickle
import torchaudio
def get_mcep_params(fs):
"""Hyperparameters of transformation between SP and MCEP
Reference:
https://github.com/CSTR-Edinburgh/merlin/blob/master/misc/scripts/vocoder/world_v2/copy_synthesis.sh
"""
if fs in [44100, 48000]:
fft_size = 2048
alpha = 0.77
if fs in [16000]:
fft_size = 1024
alpha = 0.58
return fft_size, alpha
def sp2mcep(x, mcsize, fs):
fft_size, alpha = get_mcep_params(fs)
x = torch.as_tensor(x, dtype=torch.float)
tmp = diffsptk.ScalarOperation("SquareRoot")(x)
tmp = diffsptk.ScalarOperation("Multiplication", 32768.0)(tmp)
mgc = diffsptk.MelCepstralAnalysis(
cep_order=mcsize - 1, fft_length=fft_size, alpha=alpha, n_iter=1
)(tmp)
return mgc.numpy() | null |
17,724 | import torchaudio
import pyworld as pw
import numpy as np
import torch
import diffsptk
import os
from tqdm import tqdm
import pickle
import torchaudio
def get_mcep_params(fs):
def mcep2sp(x, mcsize, fs):
fft_size, alpha = get_mcep_params(fs)
x = torch.as_tensor(x, dtype=torch.float)
tmp = diffsptk.MelGeneralizedCepstrumToSpectrum(
alpha=alpha,
cep_order=mcsize - 1,
fft_length=fft_size,
)(x)
tmp = diffsptk.ScalarOperation("Division", 32768.0)(tmp)
sp = diffsptk.ScalarOperation("Power", 2)(tmp)
return sp.double().numpy() | null |
17,725 | import torchaudio
import pyworld as pw
import numpy as np
import torch
import diffsptk
import os
from tqdm import tqdm
import pickle
import torchaudio
def f0_statistics(f0_features, path):
print("\nF0 statistics...")
total_f0 = []
for f0 in tqdm(f0_features):
total_f0 += [f for f in f0 if f != 0]
mean = sum(total_f0) / len(total_f0)
print("Min = {}, Max = {}, Mean = {}".format(min(total_f0), max(total_f0), mean))
with open(path, "wb") as f:
pickle.dump([mean, total_f0], f) | null |
17,726 | import torchaudio
import pyworld as pw
import numpy as np
import torch
import diffsptk
import os
from tqdm import tqdm
import pickle
import torchaudio
def world_synthesis(f0, sp, ap, fs, frameshift):
y = pw.synthesize(
f0, sp, ap, fs, frame_period=frameshift
) # synthesize an utterance using the parameters
return y | null |
17,727 | import librosa
import numpy as np
import torch
import parselmouth
import torchcrepe
import pyworld as pw
The provided code snippet includes necessary dependencies for implementing the `f0_to_coarse` function. Write a Python function `def f0_to_coarse(f0, pitch_bin, f0_min, f0_max)` to solve the following problem:
Convert f0 (Hz) to pitch (mel scale), and then quantize the mel-scale pitch to the range from [1, 2, 3, ..., pitch_bin-1] Reference: https://en.wikipedia.org/wiki/Mel_scale Args: f0 (array or Tensor): Hz pitch_bin (int): the vocabulary size f0_min (int): the minimum f0 (Hz) f0_max (int): the maximum f0 (Hz) Returns: quantized f0 (array or Tensor)
Here is the function:
def f0_to_coarse(f0, pitch_bin, f0_min, f0_max):
"""
Convert f0 (Hz) to pitch (mel scale), and then quantize the mel-scale pitch to the
range from [1, 2, 3, ..., pitch_bin-1]
Reference: https://en.wikipedia.org/wiki/Mel_scale
Args:
f0 (array or Tensor): Hz
pitch_bin (int): the vocabulary size
f0_min (int): the minimum f0 (Hz)
f0_max (int): the maximum f0 (Hz)
Returns:
quantized f0 (array or Tensor)
"""
f0_mel_min = 1127 * np.log(1 + f0_min / 700)
f0_mel_max = 1127 * np.log(1 + f0_max / 700)
is_torch = isinstance(f0, torch.Tensor)
f0_mel = 1127 * (1 + f0 / 700).log() if is_torch else 1127 * np.log(1 + f0 / 700)
f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * (pitch_bin - 2) / (
f0_mel_max - f0_mel_min
) + 1
f0_mel[f0_mel <= 1] = 1
f0_mel[f0_mel > pitch_bin - 1] = pitch_bin - 1
f0_coarse = (f0_mel + 0.5).long() if is_torch else np.rint(f0_mel).astype(np.int32)
assert f0_coarse.max() <= 255 and f0_coarse.min() >= 1, (
f0_coarse.max(),
f0_coarse.min(),
)
return f0_coarse | Convert f0 (Hz) to pitch (mel scale), and then quantize the mel-scale pitch to the range from [1, 2, 3, ..., pitch_bin-1] Reference: https://en.wikipedia.org/wiki/Mel_scale Args: f0 (array or Tensor): Hz pitch_bin (int): the vocabulary size f0_min (int): the minimum f0 (Hz) f0_max (int): the maximum f0 (Hz) Returns: quantized f0 (array or Tensor) |
17,728 | import librosa
import numpy as np
import torch
import parselmouth
import torchcrepe
import pyworld as pw
def get_log_f0(f0):
f0[np.where(f0 == 0)] = 1
log_f0 = np.log(f0)
return log_f0 | null |
17,729 | import librosa
import numpy as np
import torch
import parselmouth
import torchcrepe
import pyworld as pw
The provided code snippet includes necessary dependencies for implementing the `get_f0_features_using_harvest` function. Write a Python function `def get_f0_features_using_harvest(audio, mel_len, fs, hop_length, f0_min, f0_max)` to solve the following problem:
Using harvest to extract the f0 feature. Args: audio mel_len fs hop_length f0_min f0_max Returns: f0: numpy array of shape (frame_len,)
Here is the function:
def get_f0_features_using_harvest(audio, mel_len, fs, hop_length, f0_min, f0_max):
"""Using harvest to extract the f0 feature.
Args:
audio
mel_len
fs
hop_length
f0_min
f0_max
Returns:
f0: numpy array of shape (frame_len,)
"""
f0, _ = pw.harvest(
audio.astype("double"),
fs,
f0_floor=f0_min,
f0_ceil=f0_max,
frame_period=(1000 * hop_length / fs),
)
f0 = f0.astype("float")[:mel_len]
return f0 | Using harvest to extract the f0 feature. Args: audio mel_len fs hop_length f0_min f0_max Returns: f0: numpy array of shape (frame_len,) |
17,730 | import librosa
import numpy as np
import torch
import parselmouth
import torchcrepe
import pyworld as pw
The provided code snippet includes necessary dependencies for implementing the `get_f0_features_using_crepe` function. Write a Python function `def get_f0_features_using_crepe( audio, mel_len, fs, hop_length, hop_length_new, f0_min, f0_max, threshold=0.3 )` to solve the following problem:
Using torchcrepe to extract the f0 feature. Args: audio mel_len fs hop_length hop_length_new f0_min f0_max threshold(default=0.3) Returns: f0: numpy array of shape (frame_len,)
Here is the function:
def get_f0_features_using_crepe(
audio, mel_len, fs, hop_length, hop_length_new, f0_min, f0_max, threshold=0.3
):
"""Using torchcrepe to extract the f0 feature.
Args:
audio
mel_len
fs
hop_length
hop_length_new
f0_min
f0_max
threshold(default=0.3)
Returns:
f0: numpy array of shape (frame_len,)
"""
# Currently, crepe only supports 16khz audio
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
audio_16k = librosa.resample(audio, orig_sr=fs, target_sr=16000)
audio_16k_torch = torch.FloatTensor(audio_16k).unsqueeze(0).to(device)
# Get the raw pitch
f0, pd = torchcrepe.predict(
audio_16k_torch,
16000,
hop_length_new,
f0_min,
f0_max,
pad=True,
model="full",
batch_size=1024,
device=device,
return_periodicity=True,
)
# Filter, de-silence, set up threshold for unvoiced part
pd = torchcrepe.filter.median(pd, 3)
pd = torchcrepe.threshold.Silence(-60.0)(pd, audio_16k_torch, 16000, hop_length_new)
f0 = torchcrepe.threshold.At(threshold)(f0, pd)
f0 = torchcrepe.filter.mean(f0, 3)
# Convert unvoiced part to 0hz
f0 = torch.where(torch.isnan(f0), torch.full_like(f0, 0), f0)
# Interpolate f0
nzindex = torch.nonzero(f0[0]).squeeze()
f0 = torch.index_select(f0[0], dim=0, index=nzindex).cpu().numpy()
time_org = 0.005 * nzindex.cpu().numpy()
time_frame = np.arange(mel_len) * hop_length / fs
f0 = np.interp(time_frame, time_org, f0, left=f0[0], right=f0[-1])
return f0 | Using torchcrepe to extract the f0 feature. Args: audio mel_len fs hop_length hop_length_new f0_min f0_max threshold(default=0.3) Returns: f0: numpy array of shape (frame_len,) |
17,731 | import librosa
import numpy as np
import torch
import parselmouth
import torchcrepe
import pyworld as pw
def get_cents(f0_hz):
"""
F_{cent} = 1200 * log2 (F/440)
Reference:
APSIPA'17, Perceptual Evaluation of Singing Quality
"""
voiced_f0 = f0_hz[f0_hz != 0]
return 1200 * np.log2(voiced_f0 / 440)
The provided code snippet includes necessary dependencies for implementing the `get_pitch_derivatives` function. Write a Python function `def get_pitch_derivatives(f0_hz)` to solve the following problem:
f0_hz: (,T)
Here is the function:
def get_pitch_derivatives(f0_hz):
"""
f0_hz: (,T)
"""
f0_cent = get_cents(f0_hz)
return f0_cent[1:] - f0_cent[:-1] | f0_hz: (,T) |
17,732 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `gaussian_normalize_mel_channel` function. Write a Python function `def gaussian_normalize_mel_channel(mel, mu, sigma)` to solve the following problem:
Shift to Standorm Normal Distribution Args: mel: (n_mels, frame_len) mu: (n_mels,), mean value sigma: (n_mels,), sd value Return: Tensor like mel
Here is the function:
def gaussian_normalize_mel_channel(mel, mu, sigma):
"""
Shift to Standorm Normal Distribution
Args:
mel: (n_mels, frame_len)
mu: (n_mels,), mean value
sigma: (n_mels,), sd value
Return:
Tensor like mel
"""
mu = np.expand_dims(mu, -1)
sigma = np.expand_dims(sigma, -1)
return (mel - mu) / sigma | Shift to Standorm Normal Distribution Args: mel: (n_mels, frame_len) mu: (n_mels,), mean value sigma: (n_mels,), sd value Return: Tensor like mel |
17,733 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `de_gaussian_normalize_mel_channel` function. Write a Python function `def de_gaussian_normalize_mel_channel(mel, mu, sigma)` to solve the following problem:
Args: mel: (n_mels, frame_len) mu: (n_mels,), mean value sigma: (n_mels,), sd value Return: Tensor like mel
Here is the function:
def de_gaussian_normalize_mel_channel(mel, mu, sigma):
"""
Args:
mel: (n_mels, frame_len)
mu: (n_mels,), mean value
sigma: (n_mels,), sd value
Return:
Tensor like mel
"""
mu = np.expand_dims(mu, -1)
sigma = np.expand_dims(sigma, -1)
return sigma * mel + mu | Args: mel: (n_mels, frame_len) mu: (n_mels,), mean value sigma: (n_mels,), sd value Return: Tensor like mel |
17,734 | import numpy as np
import torch
def decompress(audio_compressed, bits):
mu = 2**bits - 1
audio = np.sign(audio_compressed) / mu * ((1 + mu) ** np.abs(audio_compressed) - 1)
return audio | null |
17,735 | import numpy as np
import torch
def label_to_audio(quant, bits):
classes = 2**bits
audio = 2 * quant / (classes - 1.0) - 1.0
return audio | null |
17,736 | import numpy as np
import torch
The provided code snippet includes necessary dependencies for implementing the `label_to_onehot` function. Write a Python function `def label_to_onehot(x, bits)` to solve the following problem:
Converts a class vector (integers) to binary class matrix. Args: x: class vector to be converted into a matrix (integers from 0 to num_classes). num_classes: total number of classes. Returns: A binary matrix representation of the input. The classes axis is placed last.
Here is the function:
def label_to_onehot(x, bits):
"""Converts a class vector (integers) to binary class matrix.
Args:
x: class vector to be converted into a matrix
(integers from 0 to num_classes).
num_classes: total number of classes.
Returns:
A binary matrix representation of the input. The classes axis
is placed last.
"""
classes = 2**bits
result = torch.zeros((x.shape[0], classes), dtype=torch.float32)
for i in range(x.shape[0]):
result[i, x[i]] = 1
output_shape = x.shape + (classes,)
output = torch.reshape(result, output_shape)
return output | Converts a class vector (integers) to binary class matrix. Args: x: class vector to be converted into a matrix (integers from 0 to num_classes). num_classes: total number of classes. Returns: A binary matrix representation of the input. The classes axis is placed last. |
17,737 | import torch
import torch.nn.functional as F
import numpy as np
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from librosa.filters import mel as librosa_mel_fn
import torch
import numpy as np
import librosa.util as librosa_util
from scipy.signal import get_window
The provided code snippet includes necessary dependencies for implementing the `window_sumsquare` function. Write a Python function `def window_sumsquare( window, n_frames, hop_length, win_length, n_fft, dtype=np.float32, norm=None, )` to solve the following problem:
# from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function
Here is the function:
def window_sumsquare(
window,
n_frames,
hop_length,
win_length,
n_fft,
dtype=np.float32,
norm=None,
):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm) ** 2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample : min(n, sample + n_fft)] += win_sq[: max(0, min(n_fft, n - sample))]
return x | # from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function |
17,738 | import torch
import torch.nn.functional as F
import numpy as np
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from librosa.filters import mel as librosa_mel_fn
import torch
import numpy as np
import librosa.util as librosa_util
from scipy.signal import get_window
The provided code snippet includes necessary dependencies for implementing the `griffin_lim` function. Write a Python function `def griffin_lim(magnitudes, stft_fn, n_iters=30)` to solve the following problem:
PARAMS ------ magnitudes: spectrogram magnitudes stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
Here is the function:
def griffin_lim(magnitudes, stft_fn, n_iters=30):
"""
PARAMS
------
magnitudes: spectrogram magnitudes
stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods
"""
angles = np.angle(np.exp(2j * np.pi * np.random.rand(*magnitudes.size())))
angles = angles.astype(np.float32)
angles = torch.autograd.Variable(torch.from_numpy(angles))
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
for i in range(n_iters):
_, angles = stft_fn.transform(signal)
signal = stft_fn.inverse(magnitudes, angles).squeeze(1)
return signal | PARAMS ------ magnitudes: spectrogram magnitudes stft_fn: STFT class with transform (STFT) and inverse (ISTFT) methods |
17,739 | import torch
import torch.nn.functional as F
import numpy as np
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from librosa.filters import mel as librosa_mel_fn
import torch
import numpy as np
import librosa.util as librosa_util
from scipy.signal import get_window
The provided code snippet includes necessary dependencies for implementing the `dynamic_range_compression` function. Write a Python function `def dynamic_range_compression(x, C=1, clip_val=1e-5)` to solve the following problem:
PARAMS ------ C: compression factor
Here is the function:
def dynamic_range_compression(x, C=1, clip_val=1e-5):
"""
PARAMS
------
C: compression factor
"""
return torch.log(torch.clamp(x, min=clip_val) * C) | PARAMS ------ C: compression factor |
17,740 | import torch
import torch.nn.functional as F
import numpy as np
from scipy.signal import get_window
from librosa.util import pad_center, tiny
from librosa.filters import mel as librosa_mel_fn
import torch
import numpy as np
import librosa.util as librosa_util
from scipy.signal import get_window
The provided code snippet includes necessary dependencies for implementing the `dynamic_range_decompression` function. Write a Python function `def dynamic_range_decompression(x, C=1)` to solve the following problem:
PARAMS ------ C: compression factor used to compress
Here is the function:
def dynamic_range_decompression(x, C=1):
"""
PARAMS
------
C: compression factor used to compress
"""
return torch.exp(x) / C | PARAMS ------ C: compression factor used to compress |
17,741 | import os
import subprocess
from multiprocessing import Pool
from tqdm import tqdm
import torchaudio
from pathlib import Path
def remove_empty_dirs(path):
"""remove empty directories in a given path"""
# Check if the given path is a directory
if not os.path.isdir(path):
print(f"{path} is not a directory")
return
# Walk through all directories and subdirectories
for root, dirs, _ in os.walk(path, topdown=False):
for dir in dirs:
dir_path = os.path.join(root, dir)
# Check if the directory is empty
if not os.listdir(dir_path):
os.rmdir(dir_path) # "Removed empty directory
def process_single_wav_file(task):
"""process a single wav file"""
wav_file, output_dir = task
speaker_id, book_name, filename = Path(wav_file).parts[-3:]
output_book_dir = Path(output_dir, speaker_id)
output_book_dir.mkdir(parents=True, exist_ok=True)
new_filename = f"{speaker_id}_{book_name}_{filename}"
new_wav_file = Path(output_book_dir, new_filename)
command = [
"ffmpeg",
"-nostdin",
"-hide_banner",
"-loglevel",
"error",
"-nostats",
"-i",
wav_file,
"-acodec",
"pcm_s16le",
"-ar",
"16000",
new_wav_file,
]
subprocess.check_call(
command
) # Run the command to convert the file to 16kHz and 16-bit PCM
os.remove(wav_file)
The provided code snippet includes necessary dependencies for implementing the `process_wav_files` function. Write a Python function `def process_wav_files(wav_files, output_dir, n_process)` to solve the following problem:
process wav files in parallel
Here is the function:
def process_wav_files(wav_files, output_dir, n_process):
"""process wav files in parallel"""
tasks = [(wav_file, output_dir) for wav_file in wav_files]
print(f"Processing {len(tasks)} files")
with Pool(processes=n_process) as pool:
for _ in tqdm(
pool.imap_unordered(process_single_wav_file, tasks), total=len(tasks)
):
pass
print("Removing empty directories...")
remove_empty_dirs(output_dir)
print("Done!") | process wav files in parallel |
17,742 | import os
import subprocess
from multiprocessing import Pool
from tqdm import tqdm
import torchaudio
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `get_wav_files` function. Write a Python function `def get_wav_files(dataset_path)` to solve the following problem:
get all wav files in the dataset
Here is the function:
def get_wav_files(dataset_path):
"""get all wav files in the dataset"""
wav_files = []
for speaker_id in os.listdir(dataset_path):
speaker_dir = os.path.join(dataset_path, speaker_id)
if not os.path.isdir(speaker_dir):
continue
for book_name in os.listdir(speaker_dir):
book_dir = os.path.join(speaker_dir, book_name)
if not os.path.isdir(book_dir):
continue
for file in os.listdir(book_dir):
if file.endswith(".wav"):
wav_files.append(os.path.join(book_dir, file))
print("Found {} wav files".format(len(wav_files)))
return wav_files | get all wav files in the dataset |
17,743 | import os
import subprocess
from multiprocessing import Pool
from tqdm import tqdm
import torchaudio
from pathlib import Path
The provided code snippet includes necessary dependencies for implementing the `filter_wav_files_by_length` function. Write a Python function `def filter_wav_files_by_length(wav_files, max_len_sec=15)` to solve the following problem:
filter wav files by length
Here is the function:
def filter_wav_files_by_length(wav_files, max_len_sec=15):
"""filter wav files by length"""
print("original wav files: {}".format(len(wav_files)))
filtered_wav_files = []
for audio_file in wav_files:
metadata = torchaudio.info(str(audio_file))
audio_length = metadata.num_frames / metadata.sample_rate
if audio_length <= max_len_sec:
filtered_wav_files.append(audio_file)
else:
os.remove(audio_file)
print("filtered wav files: {}".format(len(filtered_wav_files)))
return filtered_wav_files | filter wav files by length |
17,744 | import torch
def check_nan(logger, loss, y_pred, y_gt):
if torch.any(torch.isnan(loss)):
logger.info("out has nan: ", torch.any(torch.isnan(y_pred)))
logger.info("y_gt has nan: ", torch.any(torch.isnan(y_gt)))
logger.info("out: ", y_pred)
logger.info("y_gt: ", y_gt)
logger.info("loss = {:.4f}\n".format(loss.item()))
exit() | null |
17,745 | import os
import json
import numpy as np
from tqdm import tqdm
import torch
import torchaudio
from utils.io import save_audio
from utils.audio import load_audio_torch
def save_audio(path, waveform, fs, add_silence=False, turn_up=False, volume_peak=0.9):
"""Save audio to path with processing (turn up volume, add silence)
Args:
path (str): path to save audio
waveform (numpy array): waveform to save
fs (int): sampling rate
add_silence (bool, optional): whether to add silence to beginning and end. Defaults to False.
turn_up (bool, optional): whether to turn up volume. Defaults to False.
volume_peak (float, optional): volume peak. Defaults to 0.9.
"""
if turn_up:
# continue to turn up to volume_peak
ratio = volume_peak / max(waveform.max(), abs(waveform.min()))
waveform = waveform * ratio
if add_silence:
silence_len = fs // 20
silence = np.zeros((silence_len,), dtype=waveform.dtype)
result = np.concatenate([silence, waveform, silence])
waveform = result
waveform = torch.as_tensor(waveform, dtype=torch.float32, device="cpu")
if len(waveform.size()) == 1:
waveform = waveform[None, :]
elif waveform.size(0) != 1:
# Stereo to mono
waveform = torch.mean(waveform, dim=0, keepdim=True)
torchaudio.save(path, waveform, fs, encoding="PCM_S", bits_per_sample=16)
def load_audio_torch(wave_file, fs):
"""Load audio data into torch tensor
Args:
wave_file (str): path to wave file
fs (int): sample rate
Returns:
audio (tensor): audio data in tensor
fs (int): sample rate
"""
audio, sample_rate = librosa.load(wave_file, sr=fs, mono=True)
# audio: (T,)
assert len(audio) > 2
# Check the audio type (for soundfile loading backbone) - float, 8bit or 16bit
if np.issubdtype(audio.dtype, np.integer):
max_mag = -np.iinfo(audio.dtype).min
else:
max_mag = max(np.amax(audio), -np.amin(audio))
max_mag = (
(2**31) + 1
if max_mag > (2**15)
else ((2**15) + 1 if max_mag > 1.01 else 1.0)
)
# Normalize the audio
audio = torch.FloatTensor(audio.astype(np.float32)) / max_mag
if (torch.isnan(audio) | torch.isinf(audio)).any():
return [], sample_rate or fs or 48000
# Resample the audio to our target samplerate
if fs is not None and fs != sample_rate:
audio = torch.from_numpy(
librosa.core.resample(audio.numpy(), orig_sr=sample_rate, target_sr=fs)
)
sample_rate = fs
return audio, fs
The provided code snippet includes necessary dependencies for implementing the `merge_segments_torchaudio` function. Write a Python function `def merge_segments_torchaudio(wav_files, fs, output_path, overlap_duration=1.0)` to solve the following problem:
Merge the given wav_files (may have overlaps) into a long audio fs: The sampling rate of the wav files. output_path: The output path to save the merged audio. overlap_duration (float, optional): Each segment has "overlap duration" (second) overlap with its previous and next segment. Defaults to 1.0.
Here is the function:
def merge_segments_torchaudio(wav_files, fs, output_path, overlap_duration=1.0):
"""Merge the given wav_files (may have overlaps) into a long audio
fs:
The sampling rate of the wav files.
output_path:
The output path to save the merged audio.
overlap_duration (float, optional):
Each segment has "overlap duration" (second) overlap with its previous and next segment. Defaults to 1.0.
"""
waveforms = []
for file in wav_files:
# (T,)
waveform, _ = load_audio_torch(file, fs)
waveforms.append(waveform)
if len(waveforms) == 1:
save_audio(output_path, waveforms[0], fs, add_silence=False, turn_up=False)
return
overlap_len = int(overlap_duration * fs)
fade_out = torchaudio.transforms.Fade(fade_out_len=overlap_len)
fade_in = torchaudio.transforms.Fade(fade_in_len=overlap_len)
fade_in_and_out = torchaudio.transforms.Fade(fade_out_len=overlap_len)
segments_lens = [len(wav) for wav in waveforms]
merged_waveform_len = sum(segments_lens) - overlap_len * (len(waveforms) - 1)
merged_waveform = torch.zeros(merged_waveform_len)
start = 0
for index, wav in enumerate(
tqdm(waveforms, desc="Merge for {}".format(output_path))
):
wav_len = len(wav)
if index == 0:
wav = fade_out(wav)
elif index == len(waveforms) - 1:
wav = fade_in(wav)
else:
wav = fade_in_and_out(wav)
merged_waveform[start : start + wav_len] = wav
start += wav_len - overlap_len
save_audio(output_path, merged_waveform, fs, add_silence=False, turn_up=True) | Merge the given wav_files (may have overlaps) into a long audio fs: The sampling rate of the wav files. output_path: The output path to save the merged audio. overlap_duration (float, optional): Each segment has "overlap duration" (second) overlap with its previous and next segment. Defaults to 1.0. |
17,746 | import pathlib
import soundfile as sf
import numpy as np
import json
import multiprocessing
import tqdm
def cut_book(task):
"""process each book in the dataset"""
path_book, root_out, target_len_sec, extension = task
speaker = pathlib.Path(path_book.parent.name)
for i, meta_file_path in enumerate(path_book.glob("*.json")):
with open(meta_file_path, "r") as f:
meta = json.loads(f.read())
book_id = meta["book_meta"]["id"]
vad = meta["voice_activity"]
sound_file = meta_file_path.parent / (meta_file_path.stem + ".flac")
path_out = root_out / speaker / book_id / (meta_file_path.stem)
cut_sequence(sound_file, vad, path_out, target_len_sec, extension)
The provided code snippet includes necessary dependencies for implementing the `cut_segments` function. Write a Python function `def cut_segments( input_dir, output_dir, target_len_sec=30, n_process=32, out_extension=".wav" )` to solve the following problem:
Main function to cut segments from audio files
Here is the function:
def cut_segments(
input_dir, output_dir, target_len_sec=30, n_process=32, out_extension=".wav"
):
"""Main function to cut segments from audio files"""
pathlib.Path(output_dir).mkdir(exist_ok=True, parents=True)
list_dir = pathlib.Path(input_dir).glob("*/*")
list_dir = [x for x in list_dir if x.is_dir()]
print(f"{len(list_dir)} directories detected")
print(f"Launching {n_process} processes")
# Create tasks for multiprocessing
tasks = [
(path_book, output_dir, target_len_sec, out_extension) for path_book in list_dir
]
# Process tasks in parallel using multiprocessing
with multiprocessing.Pool(processes=n_process) as pool:
for _ in tqdm.tqdm(pool.imap_unordered(cut_book, tasks), total=len(tasks)):
pass | Main function to cut segments from audio files |
17,747 | import os
import numpy as np
import torch
import torchaudio
The provided code snippet includes necessary dependencies for implementing the `async_load_audio` function. Write a Python function `async def async_load_audio(path, sample_rate: int = 24000)` to solve the following problem:
r""" Args: path: The source loading path. sample_rate: The target sample rate, will automatically resample if necessary. Returns: waveform: The waveform object. Should be [1 x sequence_len].
Here is the function:
async def async_load_audio(path, sample_rate: int = 24000):
r"""
Args:
path: The source loading path.
sample_rate: The target sample rate, will automatically resample if necessary.
Returns:
waveform: The waveform object. Should be [1 x sequence_len].
"""
async def use_torchaudio_load(path):
return torchaudio.load(path)
waveform, sr = await use_torchaudio_load(path)
waveform = torch.mean(waveform, dim=0, keepdim=True)
if sr != sample_rate:
waveform = torchaudio.functional.resample(waveform, sr, sample_rate)
if torch.any(torch.isnan(waveform) or torch.isinf(waveform)):
raise ValueError("NaN or Inf found in waveform.")
return waveform | r""" Args: path: The source loading path. sample_rate: The target sample rate, will automatically resample if necessary. Returns: waveform: The waveform object. Should be [1 x sequence_len]. |
17,748 | import os
import numpy as np
import torch
import torchaudio
The provided code snippet includes necessary dependencies for implementing the `async_save_audio` function. Write a Python function `async def async_save_audio( path, waveform, sample_rate: int = 24000, add_silence: bool = False, volume_peak: float = 0.9, )` to solve the following problem:
r""" Args: path: The target saving path. waveform: The waveform object. Should be [n_channel x sequence_len]. sample_rate: Sample rate. add_silence: If ``true``, concat 0.05s silence to beginning and end. volume_peak: Turn up volume for larger number, vice versa.
Here is the function:
async def async_save_audio(
path,
waveform,
sample_rate: int = 24000,
add_silence: bool = False,
volume_peak: float = 0.9,
):
r"""
Args:
path: The target saving path.
waveform: The waveform object. Should be [n_channel x sequence_len].
sample_rate: Sample rate.
add_silence: If ``true``, concat 0.05s silence to beginning and end.
volume_peak: Turn up volume for larger number, vice versa.
"""
async def use_torchaudio_save(path, waveform, sample_rate):
torchaudio.save(
path, waveform, sample_rate, encoding="PCM_S", bits_per_sample=16
)
waveform = torch.as_tensor(waveform, device="cpu", dtype=torch.float32)
shape = waveform.size()[:-1]
ratio = abs(volume_peak) / max(waveform.max(), abs(waveform.min()))
waveform = waveform * ratio
if add_silence:
silence_len = sample_rate // 20
silence = torch.zeros((*shape, silence_len), dtype=waveform.type())
waveform = torch.concatenate((silence, waveform, silence), dim=-1)
if waveform.dim() == 1:
waveform = waveform[None]
await use_torchaudio_save(path, waveform, sample_rate) | r""" Args: path: The target saving path. waveform: The waveform object. Should be [n_channel x sequence_len]. sample_rate: Sample rate. add_silence: If ``true``, concat 0.05s silence to beginning and end. volume_peak: Turn up volume for larger number, vice versa. |
17,749 | import torch
from tqdm import tqdm
import numpy as np
from transformers import Wav2Vec2FeatureExtractor
from transformers import AutoModel
import torchaudio
import torchaudio.transforms as T
from sklearn.preprocessing import StandardScaler
The provided code snippet includes necessary dependencies for implementing the `mert_encoder` function. Write a Python function `def mert_encoder(model, processor, audio_path, hps)` to solve the following problem:
# mert default sr: 24000
Here is the function:
def mert_encoder(model, processor, audio_path, hps):
"""
# mert default sr: 24000
"""
with torch.no_grad():
resample_rate = processor.sampling_rate
device = next(model.parameters()).device
input_audio, sampling_rate = torchaudio.load(audio_path)
input_audio = input_audio.squeeze()
if sampling_rate != resample_rate:
resampler = T.Resample(sampling_rate, resample_rate)
input_audio = resampler(input_audio)
inputs = processor(
input_audio, sampling_rate=resample_rate, return_tensors="pt"
).to(
device
) # {input_values: tensor, attention_mask: tensor}
outputs = model(**inputs, output_hidden_states=True) # list: len is 25
# [25 layer, Time steps, 1024 feature_dim]
# all_layer_hidden_states = torch.stack(outputs.hidden_states).squeeze()
# mert_features.append(all_layer_hidden_states)
feature = outputs.hidden_states[
hps.mert_feature_layer
].squeeze() # [1, frame len, 1024] -> [frame len, 1024]
return feature.cpu().detach().numpy() | # mert default sr: 24000 |
17,750 | import torch
from tqdm import tqdm
import numpy as np
from transformers import Wav2Vec2FeatureExtractor
from transformers import AutoModel
import torchaudio
import torchaudio.transforms as T
from sklearn.preprocessing import StandardScaler
def mert_features_normalization(raw_mert_features):
normalized_mert_features = list()
mert_features = np.array(raw_mert_features)
scaler = StandardScaler().fit(mert_features)
for raw_mert_feature in raw_mert_feature:
normalized_mert_feature = scaler.transform(raw_mert_feature)
normalized_mert_features.append(normalized_mert_feature)
return normalized_mert_features | null |
17,751 | import torch
from tqdm import tqdm
import numpy as np
from transformers import Wav2Vec2FeatureExtractor
from transformers import AutoModel
import torchaudio
import torchaudio.transforms as T
from sklearn.preprocessing import StandardScaler
def get_mapped_mert_features(raw_mert_features, mapping_features, fast_mapping=True):
source_hop = 320
target_hop = 256
factor = np.gcd(source_hop, target_hop)
source_hop //= factor
target_hop //= factor
print(
"Mapping source's {} frames => target's {} frames".format(
target_hop, source_hop
)
)
mert_features = []
for index, mapping_feat in enumerate(tqdm(mapping_features)):
# mapping_feat: (mels_frame_len, n_mels)
target_len = mapping_feat.shape[0]
# (frame_len, 1024)
raw_feats = raw_mert_features[index].cpu().numpy()
source_len, width = raw_feats.shape
# const ~= target_len * target_hop
const = source_len * source_hop // target_hop * target_hop
# (source_len * source_hop, dim)
up_sampling_feats = np.repeat(raw_feats, source_hop, axis=0)
# (const, dim) -> (const/target_hop, target_hop, dim) -> (const/target_hop, dim)
down_sampling_feats = np.average(
up_sampling_feats[:const].reshape(-1, target_hop, width), axis=1
)
err = abs(target_len - len(down_sampling_feats))
if err > 3:
print("index:", index)
print("mels:", mapping_feat.shape)
print("raw mert vector:", raw_feats.shape)
print("up_sampling:", up_sampling_feats.shape)
print("const:", const)
print("down_sampling_feats:", down_sampling_feats.shape)
exit()
if len(down_sampling_feats) < target_len:
# (1, dim) -> (err, dim)
end = down_sampling_feats[-1][None, :].repeat(err, axis=0)
down_sampling_feats = np.concatenate([down_sampling_feats, end], axis=0)
# (target_len, dim)
feats = down_sampling_feats[:target_len]
mert_features.append(feats)
return mert_features | null |
17,752 | import torch
from tqdm import tqdm
import numpy as np
from transformers import Wav2Vec2FeatureExtractor
from transformers import AutoModel
import torchaudio
import torchaudio.transforms as T
from sklearn.preprocessing import StandardScaler
def load_mert_model(hps):
print("Loading MERT Model: ", hps.mert_model)
# Load model
model_name = hps.mert_model
model = AutoModel.from_pretrained(model_name, trust_remote_code=True)
if torch.cuda.is_available():
model = model.cuda()
# model = model.eval()
preprocessor = Wav2Vec2FeatureExtractor.from_pretrained(
model_name, trust_remote_code=True
)
return model, preprocessor | null |
17,753 | import os
import pathlib
import string
import time
from multiprocessing import Pool, Value, Lock
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
import torch
import whisper
def asr_wav_files(file_list, gpu_id, total_files, model_id):
"""Transcribe wav files in a list"""
device = f"cuda:{gpu_id}" if torch.cuda.is_available() else "cpu"
whisper_model, processor = init_whisper(model_id, device)
print(f"Processing on {device} starts")
start_time = time.time()
for audio_file in file_list:
try:
transcription = transcribe_audio(
whisper_model, processor, audio_file, device
)
write_transcription(audio_file, transcription)
with lock:
processed_files_count.value += 1
if processed_files_count.value % 5 == 0:
current_time = time.time()
avg_time_per_file = (current_time - start_time) / (
processed_files_count.value
)
remaining_files = total_files - processed_files_count.value
estimated_time_remaining = avg_time_per_file * remaining_files
remaining_time_formatted = time.strftime(
"%H:%M:%S", time.gmtime(estimated_time_remaining)
)
print(
f"Processed {processed_files_count.value}/{total_files} files, time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())}, Estimated time remaining: {remaining_time_formatted}"
)
except Exception as e:
print(f"Error processing file {audio_file}: {e}")
The provided code snippet includes necessary dependencies for implementing the `asr_main` function. Write a Python function `def asr_main(input_dir, num_gpus, model_id)` to solve the following problem:
Transcribe wav files in a directory
Here is the function:
def asr_main(input_dir, num_gpus, model_id):
"""Transcribe wav files in a directory"""
num_processes = min(num_gpus, os.cpu_count())
print(f"Using {num_processes} GPUs for transcription")
wav_files = list(pathlib.Path(input_dir).rglob("*.wav"))
total_files = len(wav_files)
print(f"Found {total_files} wav files in {input_dir}")
files_per_process = len(wav_files) // num_processes
print(f"Processing {files_per_process} files per process")
with Pool(num_processes) as p:
p.starmap(
asr_wav_files,
[
(
wav_files[i * files_per_process : (i + 1) * files_per_process],
i % num_gpus,
total_files,
model_id,
)
for i in range(num_processes)
],
)
print("Done!") | Transcribe wav files in a directory |
17,754 | import torch
from librosa.filters import mel as librosa_mel_fn
def spectral_normalize_torch(magnitudes):
output = dynamic_range_compression_torch(magnitudes)
return output
mel_basis = {}
hann_window = {}
The provided code snippet includes necessary dependencies for implementing the `mel_spectrogram_torch` function. Write a Python function `def mel_spectrogram_torch(y, cfg, center=False)` to solve the following problem:
TODO: to merge this funtion with the extract_mel_features below
Here is the function:
def mel_spectrogram_torch(y, cfg, center=False):
"""
TODO: to merge this funtion with the extract_mel_features below
"""
if torch.min(y) < -1.0:
print("min value is ", torch.min(y))
if torch.max(y) > 1.0:
print("max value is ", torch.max(y))
global mel_basis, hann_window
if cfg.fmax not in mel_basis:
mel = librosa_mel_fn(
sr=cfg.sample_rate,
n_fft=cfg.n_fft,
n_mels=cfg.n_mel,
fmin=cfg.fmin,
fmax=cfg.fmax,
)
mel_basis[str(cfg.fmax) + "_" + str(y.device)] = (
torch.from_numpy(mel).float().to(y.device)
)
hann_window[str(y.device)] = torch.hann_window(cfg.win_size).to(y.device)
y = torch.nn.functional.pad(
y.unsqueeze(1),
(int((cfg.n_fft - cfg.hop_size) / 2), int((cfg.n_fft - cfg.hop_size) / 2)),
mode="reflect",
)
y = y.squeeze(1)
spec = torch.stft(
y,
cfg.n_fft,
hop_length=cfg.hop_size,
win_length=cfg.win_size,
window=hann_window[str(y.device)],
center=center,
pad_mode="reflect",
normalized=False,
onesided=True,
return_complex=True,
)
spec = torch.view_as_real(spec)
spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)
spec = torch.matmul(mel_basis[str(cfg.fmax) + "_" + str(y.device)], spec)
spec = spectral_normalize_torch(spec)
return spec | TODO: to merge this funtion with the extract_mel_features below |
17,755 | import time
import progressbar
from simtk import openmm, unit
from simtk.openmm import app
from openmmtools.integrators import LangevinIntegrator
output_prefix = 'output/'
with open(output_prefix + equilibrated_pdb_filename, 'w') as outfile:
app.PDBFile.writeFile(
pdb.topology,
context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(),
file=outfile,
keepIds=True)
def serialize(filename, data):
with open(os.path.join(output_prefix, filename), 'w') as outfile:
xml = openmm.XmlSerializer.serialize(data)
outfile.write(xml) | null |
17,756 | import time
import progressbar
from simtk import openmm, unit
from simtk.openmm import app
from openmmtools.integrators import LangevinIntegrator
output_prefix = 'output/'
with open(output_prefix + equilibrated_pdb_filename, 'w') as outfile:
app.PDBFile.writeFile(
pdb.topology,
context.getState(getPositions=True, enforcePeriodicBox=True).getPositions(),
file=outfile,
keepIds=True)
def deserialize(filename):
with open(os.path.join(output_prefix, filename), 'r') as infile:
return openmm.XmlSerializer.deserialize(infile.read()) | null |
17,757 | import math
from typing import Optional
import mlx.core as mx
import mlx.nn as nn
from .config import UNetConfig
def upsample_nearest(x, scale: int = 2):
B, H, W, C = x.shape
x = mx.broadcast_to(x[:, :, None, :, None, :], (B, H, scale, W, scale, C))
x = x.reshape(B, H * scale, W * scale, C)
return x | null |
17,758 | import mlx.core as mx
from .config import DiffusionConfig
def _linspace(a, b, num):
x = mx.arange(0, num) / (num - 1)
return (b - a) * x + a | null |
17,759 | import mlx.core as mx
from .config import DiffusionConfig
The provided code snippet includes necessary dependencies for implementing the `_interp` function. Write a Python function `def _interp(y, x_new)` to solve the following problem:
Interpolate the function defined by (arange(0, len(y)), y) at positions x_new.
Here is the function:
def _interp(y, x_new):
"""Interpolate the function defined by (arange(0, len(y)), y) at positions x_new."""
x_low = x_new.astype(mx.int32)
x_high = mx.minimum(x_low + 1, len(y) - 1)
y_low = y[x_low]
y_high = y[x_high]
delta_x = x_new - x_low
y_new = y_low * (1 - delta_x) + delta_x * y_high
return y_new | Interpolate the function defined by (arange(0, len(y)), y) at positions x_new. |
17,760 | import json
from functools import partial
from typing import Optional
import mlx.core as mx
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
_DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base"
_MODELS = {
# See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
"stabilityai/sdxl-turbo": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"text_encoder_2_config": "text_encoder_2/config.json",
"text_encoder_2": "text_encoder_2/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
"tokenizer_2_vocab": "tokenizer_2/vocab.json",
"tokenizer_2_merges": "tokenizer_2/merges.txt",
},
# See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
"stabilityai/stable-diffusion-2-1-base": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
},
}
def map_unet_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
# Map transformer ffn
if "ff.net.2" in key:
key = key.replace("ff.net.2", "linear3")
if "ff.net.0" in key:
k1 = key.replace("ff.net.0.proj", "linear1")
k2 = key.replace("ff.net.0.proj", "linear2")
v1, v2 = mx.split(value, 2)
return [(k1, v1), (k2, v2)]
if "conv_shortcut.weight" in key:
value = value.squeeze()
# Transform the weights from 1x1 convs to linear
if len(value.shape) == 4 and ("proj_in" in key or "proj_out" in key):
value = value.squeeze()
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
value = value.reshape(-1).reshape(value.shape)
return [(key, value)]
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = mx.float16 if float16 else mx.float32
weights = mx.load(weight_file)
weights = _flatten([mapper(k, v.astype(dtype)) for k, v in weights.items()])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
class UNetConfig:
in_channels: int = 4
out_channels: int = 4
conv_in_kernel: int = 3
conv_out_kernel: int = 3
block_out_channels: Tuple[int] = (320, 640, 1280, 1280)
layers_per_block: Tuple[int] = (2, 2, 2, 2)
mid_block_layers: int = 2
transformer_layers_per_block: Tuple[int] = (1, 1, 1, 1)
num_attention_heads: Tuple[int] = (5, 10, 20, 20)
cross_attention_dim: Tuple[int] = (1024,) * 4
norm_num_groups: int = 32
down_block_types: Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
up_block_types: Tuple[str] = (
"UpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
"CrossAttnUpBlock2D",
)
addition_embed_type: Optional[str] = None
addition_time_embed_dim: Optional[int] = None
projection_class_embeddings_input_dim: Optional[int] = None
class UNetModel(nn.Module):
"""The conditional 2D UNet model that actually performs the denoising."""
def __init__(self, config: UNetConfig):
super().__init__()
self.conv_in = nn.Conv2d(
config.in_channels,
config.block_out_channels[0],
config.conv_in_kernel,
padding=(config.conv_in_kernel - 1) // 2,
)
self.timesteps = nn.SinusoidalPositionalEncoding(
config.block_out_channels[0],
max_freq=1,
min_freq=math.exp(
-math.log(10000) + 2 * math.log(10000) / config.block_out_channels[0]
),
scale=1.0,
cos_first=True,
full_turns=False,
)
self.time_embedding = TimestepEmbedding(
config.block_out_channels[0],
config.block_out_channels[0] * 4,
)
if config.addition_embed_type == "text_time":
self.add_time_proj = nn.SinusoidalPositionalEncoding(
config.addition_time_embed_dim,
max_freq=1,
min_freq=math.exp(
-math.log(10000)
+ 2 * math.log(10000) / config.addition_time_embed_dim
),
scale=1.0,
cos_first=True,
full_turns=False,
)
self.add_embedding = TimestepEmbedding(
config.projection_class_embeddings_input_dim,
config.block_out_channels[0] * 4,
)
# Make the downsampling blocks
block_channels = [config.block_out_channels[0]] + list(
config.block_out_channels
)
self.down_blocks = [
UNetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=config.block_out_channels[0] * 4,
num_layers=config.layers_per_block[i],
transformer_layers_per_block=config.transformer_layers_per_block[i],
num_attention_heads=config.num_attention_heads[i],
cross_attention_dim=config.cross_attention_dim[i],
resnet_groups=config.norm_num_groups,
add_downsample=(i < len(config.block_out_channels) - 1),
add_upsample=False,
add_cross_attention="CrossAttn" in config.down_block_types[i],
)
for i, (in_channels, out_channels) in enumerate(
zip(block_channels, block_channels[1:])
)
]
# Make the middle block
self.mid_blocks = [
ResnetBlock2D(
in_channels=config.block_out_channels[-1],
out_channels=config.block_out_channels[-1],
temb_channels=config.block_out_channels[0] * 4,
groups=config.norm_num_groups,
),
Transformer2D(
in_channels=config.block_out_channels[-1],
model_dims=config.block_out_channels[-1],
num_heads=config.num_attention_heads[-1],
num_layers=config.transformer_layers_per_block[-1],
encoder_dims=config.cross_attention_dim[-1],
),
ResnetBlock2D(
in_channels=config.block_out_channels[-1],
out_channels=config.block_out_channels[-1],
temb_channels=config.block_out_channels[0] * 4,
groups=config.norm_num_groups,
),
]
# Make the upsampling blocks
block_channels = (
[config.block_out_channels[0]]
+ list(config.block_out_channels)
+ [config.block_out_channels[-1]]
)
self.up_blocks = [
UNetBlock2D(
in_channels=in_channels,
out_channels=out_channels,
temb_channels=config.block_out_channels[0] * 4,
prev_out_channels=prev_out_channels,
num_layers=config.layers_per_block[i] + 1,
transformer_layers_per_block=config.transformer_layers_per_block[i],
num_attention_heads=config.num_attention_heads[i],
cross_attention_dim=config.cross_attention_dim[i],
resnet_groups=config.norm_num_groups,
add_downsample=False,
add_upsample=(i > 0),
add_cross_attention="CrossAttn" in config.up_block_types[i],
)
for i, (in_channels, out_channels, prev_out_channels) in reversed(
list(
enumerate(
zip(block_channels, block_channels[1:], block_channels[2:])
)
)
)
]
self.conv_norm_out = nn.GroupNorm(
config.norm_num_groups,
config.block_out_channels[0],
pytorch_compatible=True,
)
self.conv_out = nn.Conv2d(
config.block_out_channels[0],
config.out_channels,
config.conv_out_kernel,
padding=(config.conv_out_kernel - 1) // 2,
)
def __call__(
self,
x,
timestep,
encoder_x,
attn_mask=None,
encoder_attn_mask=None,
text_time=None,
):
# Compute the time embeddings
temb = self.timesteps(timestep).astype(x.dtype)
temb = self.time_embedding(temb)
# Add the extra text_time conditioning
if text_time is not None:
text_emb, time_ids = text_time
emb = self.add_time_proj(time_ids).flatten(1).astype(x.dtype)
emb = mx.concatenate([text_emb, emb], axis=-1)
emb = self.add_embedding(emb)
temb = temb + emb
# Preprocess the input
x = self.conv_in(x)
# Run the downsampling part of the unet
residuals = [x]
for block in self.down_blocks:
x, res = block(
x,
encoder_x=encoder_x,
temb=temb,
attn_mask=attn_mask,
encoder_attn_mask=encoder_attn_mask,
)
residuals.extend(res)
# Run the middle part of the unet
x = self.mid_blocks[0](x, temb)
x = self.mid_blocks[1](x, encoder_x, attn_mask, encoder_attn_mask)
x = self.mid_blocks[2](x, temb)
# Run the upsampling part of the unet
for block in self.up_blocks:
x, _ = block(
x,
encoder_x=encoder_x,
temb=temb,
attn_mask=attn_mask,
encoder_attn_mask=encoder_attn_mask,
residual_hidden_states=residuals,
)
# Postprocess the output
dtype = x.dtype
x = self.conv_norm_out(x.astype(mx.float32)).astype(dtype)
x = nn.silu(x)
x = self.conv_out(x)
return x
The provided code snippet includes necessary dependencies for implementing the `load_unet` function. Write a Python function `def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False)` to solve the following problem:
Load the stable diffusion UNet from Hugging Face Hub.
Here is the function:
def load_unet(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion UNet from Hugging Face Hub."""
_check_key(key, "load_unet")
# Download the config and create the model
unet_config = _MODELS[key]["unet_config"]
with open(hf_hub_download(key, unet_config)) as f:
config = json.load(f)
n_blocks = len(config["block_out_channels"])
model = UNetModel(
UNetConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=[config["layers_per_block"]] * n_blocks,
transformer_layers_per_block=config.get(
"transformer_layers_per_block", (1,) * 4
),
num_attention_heads=(
[config["attention_head_dim"]] * n_blocks
if isinstance(config["attention_head_dim"], int)
else config["attention_head_dim"]
),
cross_attention_dim=[config["cross_attention_dim"]] * n_blocks,
norm_num_groups=config["norm_num_groups"],
down_block_types=config["down_block_types"],
up_block_types=config["up_block_types"][::-1],
addition_embed_type=config.get("addition_embed_type", None),
addition_time_embed_dim=config.get("addition_time_embed_dim", None),
projection_class_embeddings_input_dim=config.get(
"projection_class_embeddings_input_dim", None
),
)
)
# Download the weights and map them into the model
unet_weights = _MODELS[key]["unet"]
weight_file = hf_hub_download(key, unet_weights)
_load_safetensor_weights(map_unet_weights, model, weight_file, float16)
return model | Load the stable diffusion UNet from Hugging Face Hub. |
17,761 | import json
from functools import partial
from typing import Optional
import mlx.core as mx
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
_DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base"
_MODELS = {
# See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
"stabilityai/sdxl-turbo": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"text_encoder_2_config": "text_encoder_2/config.json",
"text_encoder_2": "text_encoder_2/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
"tokenizer_2_vocab": "tokenizer_2/vocab.json",
"tokenizer_2_merges": "tokenizer_2/merges.txt",
},
# See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
"stabilityai/stable-diffusion-2-1-base": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
},
}
def map_clip_text_encoder_weights(key, value):
# Remove prefixes
if key.startswith("text_model."):
key = key[11:]
if key.startswith("embeddings."):
key = key[11:]
if key.startswith("encoder."):
key = key[8:]
# Map attention layers
if "self_attn." in key:
key = key.replace("self_attn.", "attention.")
if "q_proj." in key:
key = key.replace("q_proj.", "query_proj.")
if "k_proj." in key:
key = key.replace("k_proj.", "key_proj.")
if "v_proj." in key:
key = key.replace("v_proj.", "value_proj.")
# Map ffn layers
if "mlp.fc1" in key:
key = key.replace("mlp.fc1", "linear1")
if "mlp.fc2" in key:
key = key.replace("mlp.fc2", "linear2")
return [(key, value)]
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = mx.float16 if float16 else mx.float32
weights = mx.load(weight_file)
weights = _flatten([mapper(k, v.astype(dtype)) for k, v in weights.items()])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
class CLIPTextModel(nn.Module):
"""Implements the text encoder transformer from CLIP."""
def __init__(self, config: CLIPTextModelConfig):
super().__init__()
self.token_embedding = nn.Embedding(config.vocab_size, config.model_dims)
self.position_embedding = nn.Embedding(config.max_length, config.model_dims)
self.layers = [
CLIPEncoderLayer(config.model_dims, config.num_heads, config.hidden_act)
for i in range(config.num_layers)
]
self.final_layer_norm = nn.LayerNorm(config.model_dims)
if config.projection_dim is not None:
self.text_projection = nn.Linear(
config.model_dims, config.projection_dim, bias=False
)
def _get_mask(self, N, dtype):
indices = mx.arange(N)
mask = indices[:, None] < indices[None]
mask = mask.astype(dtype) * (-6e4 if dtype == mx.float16 else -1e9)
return mask
def __call__(self, x):
# Extract some shapes
B, N = x.shape
eos_tokens = x.argmax(-1)
# Compute the embeddings
x = self.token_embedding(x)
x = x + self.position_embedding.weight[:N]
# Compute the features from the transformer
mask = self._get_mask(N, x.dtype)
hidden_states = []
for l in self.layers:
x = l(x, mask)
hidden_states.append(x)
# Apply the final layernorm and return
x = self.final_layer_norm(x)
last_hidden_state = x
# Select the EOS token
pooled_output = x[mx.arange(len(x)), eos_tokens]
if "text_projection" in self:
pooled_output = self.text_projection(pooled_output)
return CLIPOutput(
pooled_output=pooled_output,
last_hidden_state=last_hidden_state,
hidden_states=hidden_states,
)
class CLIPTextModelConfig:
num_layers: int = 23
model_dims: int = 1024
num_heads: int = 16
max_length: int = 77
vocab_size: int = 49408
projection_dim: Optional[int] = None
hidden_act: str = "quick_gelu"
The provided code snippet includes necessary dependencies for implementing the `load_text_encoder` function. Write a Python function `def load_text_encoder( key: str = _DEFAULT_MODEL, float16: bool = False, model_key: str = "text_encoder", config_key: Optional[str] = None, )` to solve the following problem:
Load the stable diffusion text encoder from Hugging Face Hub.
Here is the function:
def load_text_encoder(
key: str = _DEFAULT_MODEL,
float16: bool = False,
model_key: str = "text_encoder",
config_key: Optional[str] = None,
):
"""Load the stable diffusion text encoder from Hugging Face Hub."""
_check_key(key, "load_text_encoder")
config_key = config_key or (model_key + "_config")
# Download the config and create the model
text_encoder_config = _MODELS[key][config_key]
with open(hf_hub_download(key, text_encoder_config)) as f:
config = json.load(f)
with_projection = "WithProjection" in config["architectures"][0]
model = CLIPTextModel(
CLIPTextModelConfig(
num_layers=config["num_hidden_layers"],
model_dims=config["hidden_size"],
num_heads=config["num_attention_heads"],
max_length=config["max_position_embeddings"],
vocab_size=config["vocab_size"],
projection_dim=config["projection_dim"] if with_projection else None,
hidden_act=config.get("hidden_act", "quick_gelu"),
)
)
# Download the weights and map them into the model
text_encoder_weights = _MODELS[key][model_key]
weight_file = hf_hub_download(key, text_encoder_weights)
_load_safetensor_weights(map_clip_text_encoder_weights, model, weight_file, float16)
return model | Load the stable diffusion text encoder from Hugging Face Hub. |
17,762 | import json
from functools import partial
from typing import Optional
import mlx.core as mx
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
_DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base"
_MODELS = {
# See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
"stabilityai/sdxl-turbo": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"text_encoder_2_config": "text_encoder_2/config.json",
"text_encoder_2": "text_encoder_2/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
"tokenizer_2_vocab": "tokenizer_2/vocab.json",
"tokenizer_2_merges": "tokenizer_2/merges.txt",
},
# See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
"stabilityai/stable-diffusion-2-1-base": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
},
}
def map_vae_weights(key, value):
# Map up/downsampling
if "downsamplers" in key:
key = key.replace("downsamplers.0.conv", "downsample")
if "upsamplers" in key:
key = key.replace("upsamplers.0.conv", "upsample")
# Map attention layers
if "to_k" in key:
key = key.replace("to_k", "key_proj")
if "to_out.0" in key:
key = key.replace("to_out.0", "out_proj")
if "to_q" in key:
key = key.replace("to_q", "query_proj")
if "to_v" in key:
key = key.replace("to_v", "value_proj")
# Map the mid block
if "mid_block.resnets.0" in key:
key = key.replace("mid_block.resnets.0", "mid_blocks.0")
if "mid_block.attentions.0" in key:
key = key.replace("mid_block.attentions.0", "mid_blocks.1")
if "mid_block.resnets.1" in key:
key = key.replace("mid_block.resnets.1", "mid_blocks.2")
# Map the quant/post_quant layers
if "quant_conv" in key:
key = key.replace("quant_conv", "quant_proj")
value = value.squeeze()
# Map the conv_shortcut to linear
if "conv_shortcut.weight" in key:
value = value.squeeze()
if len(value.shape) == 4:
value = value.transpose(0, 2, 3, 1)
value = value.reshape(-1).reshape(value.shape)
return [(key, value)]
def _load_safetensor_weights(mapper, model, weight_file, float16: bool = False):
dtype = mx.float16 if float16 else mx.float32
weights = mx.load(weight_file)
weights = _flatten([mapper(k, v.astype(dtype)) for k, v in weights.items()])
model.update(tree_unflatten(weights))
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
class AutoencoderConfig:
in_channels: int = 3
out_channels: int = 3
latent_channels_out: int = 8
latent_channels_in: int = 4
block_out_channels: Tuple[int] = (128, 256, 512, 512)
layers_per_block: int = 2
norm_num_groups: int = 32
scaling_factor: float = 0.18215
class Autoencoder(nn.Module):
"""The autoencoder that allows us to perform diffusion in the latent space."""
def __init__(self, config: AutoencoderConfig):
super().__init__()
self.latent_channels = config.latent_channels_in
self.scaling_factor = config.scaling_factor
self.encoder = Encoder(
config.in_channels,
config.latent_channels_out,
config.block_out_channels,
config.layers_per_block,
resnet_groups=config.norm_num_groups,
)
self.decoder = Decoder(
config.latent_channels_in,
config.out_channels,
config.block_out_channels,
config.layers_per_block + 1,
resnet_groups=config.norm_num_groups,
)
self.quant_proj = nn.Linear(
config.latent_channels_out, config.latent_channels_out
)
self.post_quant_proj = nn.Linear(
config.latent_channels_in, config.latent_channels_in
)
def decode(self, z):
z = z / self.scaling_factor
return self.decoder(self.post_quant_proj(z))
def encode(self, x):
x = self.encoder(x)
x = self.quant_proj(x)
mean, logvar = x.split(2, axis=-1)
mean = mean * self.scaling_factor
logvar = logvar + 2 * math.log(self.scaling_factor)
return mean, logvar
def __call__(self, x, key=None):
mean, logvar = self.encode(x)
z = mx.random.normal(mean.shape, key=key) * mx.exp(0.5 * logvar) + mean
x_hat = self.decode(z)
return dict(x_hat=x_hat, z=z, mean=mean, logvar=logvar)
The provided code snippet includes necessary dependencies for implementing the `load_autoencoder` function. Write a Python function `def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False)` to solve the following problem:
Load the stable diffusion autoencoder from Hugging Face Hub.
Here is the function:
def load_autoencoder(key: str = _DEFAULT_MODEL, float16: bool = False):
"""Load the stable diffusion autoencoder from Hugging Face Hub."""
_check_key(key, "load_autoencoder")
# Download the config and create the model
vae_config = _MODELS[key]["vae_config"]
with open(hf_hub_download(key, vae_config)) as f:
config = json.load(f)
model = Autoencoder(
AutoencoderConfig(
in_channels=config["in_channels"],
out_channels=config["out_channels"],
latent_channels_out=2 * config["latent_channels"],
latent_channels_in=config["latent_channels"],
block_out_channels=config["block_out_channels"],
layers_per_block=config["layers_per_block"],
norm_num_groups=config["norm_num_groups"],
scaling_factor=config.get("scaling_factor", 0.18215),
)
)
# Download the weights and map them into the model
vae_weights = _MODELS[key]["vae"]
weight_file = hf_hub_download(key, vae_weights)
_load_safetensor_weights(map_vae_weights, model, weight_file, float16)
return model | Load the stable diffusion autoencoder from Hugging Face Hub. |
17,763 | import json
from functools import partial
from typing import Optional
import mlx.core as mx
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
_DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base"
_MODELS = {
# See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
"stabilityai/sdxl-turbo": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"text_encoder_2_config": "text_encoder_2/config.json",
"text_encoder_2": "text_encoder_2/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
"tokenizer_2_vocab": "tokenizer_2/vocab.json",
"tokenizer_2_merges": "tokenizer_2/merges.txt",
},
# See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
"stabilityai/stable-diffusion-2-1-base": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
},
}
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
class DiffusionConfig:
beta_schedule: str = "scaled_linear"
beta_start: float = 0.00085
beta_end: float = 0.012
num_train_steps: int = 1000
The provided code snippet includes necessary dependencies for implementing the `load_diffusion_config` function. Write a Python function `def load_diffusion_config(key: str = _DEFAULT_MODEL)` to solve the following problem:
Load the stable diffusion config from Hugging Face Hub.
Here is the function:
def load_diffusion_config(key: str = _DEFAULT_MODEL):
"""Load the stable diffusion config from Hugging Face Hub."""
_check_key(key, "load_diffusion_config")
diffusion_config = _MODELS[key]["diffusion_config"]
with open(hf_hub_download(key, diffusion_config)) as f:
config = json.load(f)
return DiffusionConfig(
beta_start=config["beta_start"],
beta_end=config["beta_end"],
beta_schedule=config["beta_schedule"],
num_train_steps=config["num_train_timesteps"],
) | Load the stable diffusion config from Hugging Face Hub. |
17,764 | import json
from functools import partial
from typing import Optional
import mlx.core as mx
from huggingface_hub import hf_hub_download
from mlx.utils import tree_unflatten
from .clip import CLIPTextModel
from .config import AutoencoderConfig, CLIPTextModelConfig, DiffusionConfig, UNetConfig
from .tokenizer import Tokenizer
from .unet import UNetModel
from .vae import Autoencoder
_DEFAULT_MODEL = "stabilityai/stable-diffusion-2-1-base"
_MODELS = {
# See https://huggingface.co/stabilityai/sdxl-turbo for the model details and license
"stabilityai/sdxl-turbo": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"text_encoder_2_config": "text_encoder_2/config.json",
"text_encoder_2": "text_encoder_2/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
"tokenizer_2_vocab": "tokenizer_2/vocab.json",
"tokenizer_2_merges": "tokenizer_2/merges.txt",
},
# See https://huggingface.co/stabilityai/stable-diffusion-2-1-base for the model details and license
"stabilityai/stable-diffusion-2-1-base": {
"unet_config": "unet/config.json",
"unet": "unet/diffusion_pytorch_model.safetensors",
"text_encoder_config": "text_encoder/config.json",
"text_encoder": "text_encoder/model.safetensors",
"vae_config": "vae/config.json",
"vae": "vae/diffusion_pytorch_model.safetensors",
"diffusion_config": "scheduler/scheduler_config.json",
"tokenizer_vocab": "tokenizer/vocab.json",
"tokenizer_merges": "tokenizer/merges.txt",
},
}
def _check_key(key: str, part: str):
if key not in _MODELS:
raise ValueError(
f"[{part}] '{key}' model not found, choose one of {{{','.join(_MODELS.keys())}}}"
)
class Tokenizer:
"""A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ ."""
def __init__(self, bpe_ranks, vocab):
self.bpe_ranks = bpe_ranks
self.vocab = vocab
self.pat = regex.compile(
r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
regex.IGNORECASE,
)
self._cache = {self.bos: self.bos, self.eos: self.eos}
def bos(self):
return "<|startoftext|>"
def bos_token(self):
return self.vocab[self.bos]
def eos(self):
return "<|endoftext|>"
def eos_token(self):
return self.vocab[self.eos]
def bpe(self, text):
if text in self._cache:
return self._cache[text]
unigrams = list(text[:-1]) + [text[-1] + "</w>"]
unique_bigrams = set(zip(unigrams, unigrams[1:]))
if not unique_bigrams:
return unigrams
# In every iteration try to merge the two most likely bigrams. If none
# was merged we are done.
#
# Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_clip.py
while unique_bigrams:
bigram = min(
unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))
)
if bigram not in self.bpe_ranks:
break
new_unigrams = []
skip = False
for a, b in zip(unigrams, unigrams[1:]):
if skip:
skip = False
continue
if (a, b) == bigram:
new_unigrams.append(a + b)
skip = True
else:
new_unigrams.append(a)
if not skip:
new_unigrams.append(b)
unigrams = new_unigrams
unique_bigrams = set(zip(unigrams, unigrams[1:]))
self._cache[text] = unigrams
return unigrams
def tokenize(self, text, prepend_bos=True, append_eos=True):
if isinstance(text, list):
return [self.tokenize(t, prepend_bos, append_eos) for t in text]
# Lower case cleanup and split according to self.pat. Hugging Face does
# a much more thorough job here but this should suffice for 95% of
# cases.
clean_text = regex.sub(r"\s+", " ", text.lower())
tokens = regex.findall(self.pat, clean_text)
# Split the tokens according to the byte-pair merge file
bpe_tokens = [ti for t in tokens for ti in self.bpe(t)]
# Map to token ids and return
tokens = [self.vocab[t] for t in bpe_tokens]
if prepend_bos:
tokens = [self.bos_token] + tokens
if append_eos:
tokens.append(self.eos_token)
return tokens
def load_tokenizer(
key: str = _DEFAULT_MODEL,
vocab_key: str = "tokenizer_vocab",
merges_key: str = "tokenizer_merges",
):
_check_key(key, "load_tokenizer")
vocab_file = hf_hub_download(key, _MODELS[key][vocab_key])
with open(vocab_file, encoding="utf-8") as f:
vocab = json.load(f)
merges_file = hf_hub_download(key, _MODELS[key][merges_key])
with open(merges_file, encoding="utf-8") as f:
bpe_merges = f.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
bpe_merges = [tuple(m.split()) for m in bpe_merges]
bpe_ranks = dict(map(reversed, enumerate(bpe_merges)))
return Tokenizer(bpe_ranks, vocab) | null |
17,765 | import argparse
import copy
import glob
import json
import shutil
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import torch
from mixtral import Mixtral, ModelArgs
from mlx.utils import tree_flatten, tree_map, tree_unflatten
def convert(tf, config):
def convert_single(k, v):
v = v.to(torch.float16).numpy()
if "block_sparse_moe" not in k:
return [(k, v)]
if "gate" in k:
return [(k.replace("block_sparse_moe", "feed_forward"), v)]
# From: layers.N.block_sparse_moe.w
# To: layers.N.experts.M.w
num_experts = config["moe"]["num_experts"]
key_path = k.split(".")
v = np.split(v, num_experts, axis=0)
if key_path[-1] == "w2":
v = [u.T for u in v]
w_name = key_path.pop()
key_path[-1] = "feed_forward.experts"
return [
(".".join(key_path + [str(e), w_name, "weight"]), u)
for e, u in enumerate(v)
]
state = torch.load(tf)
weights = {}
for k, v in state.items():
weights.update(convert_single(k, v))
return weights | null |
17,766 | import argparse
import copy
import glob
import json
import shutil
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import torch
from mixtral import Mixtral, ModelArgs
from mlx.utils import tree_flatten, tree_map, tree_unflatten
class ModelArgs(BaseModelArgs):
def __post_init__(self):
def quantize(weights, config, args):
quantized_config = copy.deepcopy(config)
# Load the model and update with the subset of weights:
config.pop("quantization", None)
model = Mixtral(ModelArgs(**config))
all_weights = dict(tree_flatten(model.parameters()))
weights = tree_map(mx.array, weights)
all_weights.update(weights)
all_weights = tree_unflatten(list(all_weights.items()))
model.update(all_weights)
# Quantize the model:
nn.QuantizedLinear.quantize_module(
model,
args.q_group_size,
args.q_bits,
# TODO: Quantize gate matrices when < 32 tiles supported
linear_class_predicate=lambda m: isinstance(m, nn.Linear)
and m.weight.shape[0] != 8,
)
# Extract the subset of quantized weights:
all_weights = dict(tree_flatten(model.parameters()))
quantized_weights = {}
for k, v in all_weights.items():
if k not in weights:
continue
quantized_weights[k] = v
prefix = k.split(".")[:-1]
for qw in ["scales", "biases"]:
if (k := ".".join(prefix + [qw])) in all_weights:
quantized_weights[k] = all_weights[k]
# Update the config:
quantized_config["quantization"] = {
"group_size": args.q_group_size,
"bits": args.q_bits,
}
return quantized_weights, quantized_config | null |
17,767 | import argparse
import glob
import json
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_map, tree_unflatten
from sentencepiece import SentencePieceProcessor
class ModelArgs:
class Mixtral(nn.Module):
def __init__(self, args: ModelArgs):
def __call__(
self,
inputs: mx.array,
cache=None,
):
class Tokenizer:
def __init__(self, model_path: str):
def eos_id(self) -> int:
def pad_id(self) -> int:
def encode(self, s: str) -> List[int]:
def decode(self, t: List[int]) -> str:
def load_model(folder: str):
model_path = Path(folder)
tokenizer = Tokenizer(str(model_path / "tokenizer.model"))
with open(model_path / "config.json", "r") as f:
config = json.loads(f.read())
config.pop("model_type", None)
quantization = config.pop("quantization", None)
model_args = ModelArgs(**config)
weight_files = glob.glob(str(model_path / "weights.*.npz"))
weights = {}
for wf in weight_files:
weights.update(mx.load(wf).items())
weights = tree_unflatten(list(weights.items()))
model = Mixtral(model_args)
if quantization is not None:
# TODO: Quantize gate matrices when < 32 tiles supported
quantization["linear_class_predicate"] = (
lambda m: isinstance(m, nn.Linear) and m.weight.shape[0] != 8
)
nn.QuantizedLinear.quantize_module(model, **quantization)
model.update(weights)
return model, tokenizer | null |
17,768 | import argparse
import glob
import json
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_map, tree_unflatten
from sentencepiece import SentencePieceProcessor
class Mixtral(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.n_layers = args.n_layers
assert self.vocab_size > 0
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
self.layers = [MOETransformerBlock(args=args) for _ in range(args.n_layers)]
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
cache=None,
):
h = self.tok_embeddings(inputs)
mask = None
T = h.shape[1]
if T > 1:
mask = nn.MultiHeadAttention.create_additive_causal_mask(T)
mask = mask.astype(h.dtype)
if cache is None:
cache = [None] * len(self.layers)
for e, layer in enumerate(self.layers):
h, cache[e] = layer(h, mask, cache[e])
return self.output(self.norm(h[:, T - 1 : T, :])), cache
def generate(prompt: mx.array, model: Mixtral, temp: Optional[float] = 0.0):
def sample(logits):
if temp == 0:
return mx.argmax(logits, axis=-1)
else:
return mx.random.categorical(logits * (1 / temp))
logits, cache = model(prompt[None])
y = sample(logits[:, -1, :])
yield y
while True:
logits, cache = model(y[:, None], cache)
y = sample(logits.squeeze(1))
yield y | null |
17,769 | import argparse
import copy
import json
import shutil
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import torch
from mistral import Mistral, ModelArgs
from mlx.utils import tree_flatten, tree_map, tree_unflatten
def quantize(weights, config, args):
quantized_config = copy.deepcopy(config)
# Load the model:
config.pop("sliding_window", None)
model = Mistral(ModelArgs(**config))
weights = tree_map(mx.array, weights)
model.update(tree_unflatten(list(weights.items())))
# Quantize the model:
nn.QuantizedLinear.quantize_module(model, args.q_group_size, args.q_bits)
# Update the config:
quantized_config["quantization"] = {
"group_size": args.q_group_size,
"bits": args.q_bits,
}
quantized_weights = dict(tree_flatten(model.parameters()))
return quantized_weights, quantized_config | null |
17,770 | import argparse
import json
import time
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_unflatten
from sentencepiece import SentencePieceProcessor
class ModelArgs:
dim: int
n_layers: int
head_dim: int
hidden_dim: int
n_heads: int
n_kv_heads: int
norm_eps: float
vocab_size: int
rope_theta: float = 10000
class Mistral(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.n_layers = args.n_layers
assert self.vocab_size > 0
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
self.layers = [TransformerBlock(args=args) for _ in range(args.n_layers)]
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
cache=None,
):
h = self.tok_embeddings(inputs)
mask = None
if h.shape[1] > 1:
mask = nn.MultiHeadAttention.create_additive_causal_mask(h.shape[1])
mask = mask.astype(h.dtype)
if cache is None:
cache = [None] * len(self.layers)
for e, layer in enumerate(self.layers):
h, cache[e] = layer(h, mask, cache[e])
return self.output(self.norm(h)), cache
class Tokenizer:
def __init__(self, model_path: str):
assert Path(model_path).exists(), model_path
self._model = SentencePieceProcessor(model_file=model_path)
self._sep = "▁"
assert self._model.vocab_size() == self._model.get_piece_size()
def eos_id(self) -> int:
return self._model.eos_id()
def pad_id(self) -> int:
return self._model.pad_id()
def encode(self, s: str) -> List[int]:
return [self._model.bos_id(), *self._model.encode(s)]
def decode(self, t: List[int]) -> str:
out = self._model.decode(t)
if t and self._model.id_to_piece(t[0])[0] == self._sep:
return " " + out
return out
def load_model(folder: str):
model_path = Path(folder)
tokenizer = Tokenizer(str(model_path / "tokenizer.model"))
with open(model_path / "config.json", "r") as f:
config = json.loads(f.read())
config.pop("sliding_window", None)
config.pop("model_type", None)
quantization = config.pop("quantization", None)
model_args = ModelArgs(**config)
weights = mx.load(str(model_path / "weights.npz"))
weights = tree_unflatten(list(weights.items()))
model = Mistral(model_args)
if quantization is not None:
nn.QuantizedLinear.quantize_module(model, **quantization)
model.update(weights)
mx.eval(model.parameters())
return model, tokenizer | null |
17,771 | import argparse
import json
import time
from dataclasses import dataclass
from pathlib import Path
from typing import List, Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_unflatten
from sentencepiece import SentencePieceProcessor
class Mistral(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.n_layers = args.n_layers
assert self.vocab_size > 0
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
self.layers = [TransformerBlock(args=args) for _ in range(args.n_layers)]
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
cache=None,
):
h = self.tok_embeddings(inputs)
mask = None
if h.shape[1] > 1:
mask = nn.MultiHeadAttention.create_additive_causal_mask(h.shape[1])
mask = mask.astype(h.dtype)
if cache is None:
cache = [None] * len(self.layers)
for e, layer in enumerate(self.layers):
h, cache[e] = layer(h, mask, cache[e])
return self.output(self.norm(h)), cache
def generate(prompt: mx.array, model: Mistral, temp: Optional[float] = 0.0):
def sample(logits):
if temp == 0:
return mx.argmax(logits, axis=-1)
else:
return mx.random.categorical(logits * (1 / temp))
logits, cache = model(prompt[None])
y = sample(logits[:, -1, :])
yield y
while True:
logits, cache = model(y[:, None], cache)
y = sample(logits.squeeze(1))
yield y | null |
17,772 | import argparse
import glob
import json
import time
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
from decoder import SpeculativeDecoder
from mlx.utils import tree_unflatten
from model import Model
from transformers import T5Config
class Model(nn.Module):
def __init__(self, config: T5Config):
self.wte = nn.Embedding(config.vocab_size, config.d_model)
self.encoder = TransformerEncoder(config)
self.decoder = TransformerDecoder(config)
self.tie_word_embeddings = config.tie_word_embeddings
if not self.tie_word_embeddings:
self.lm_head = OutputHead(config)
self.model_dim = config.d_model
self.reset_cache()
def encode(self, inputs: mx.array):
return self.encoder(self.wte(inputs))
def truncate_cache(self, num_to_truncate):
if num_to_truncate <= 0:
return
cache_length = self.cache[0][0].shape[2]
if num_to_truncate < cache_length:
self.cache = tree_map(lambda x: x[:, :, :-num_to_truncate, :], self.cache)
else:
self.reset_cache()
def reset_cache(self):
self.cache = [None] * len(self.decoder.layers)
def decode(
self,
inputs: mx.array,
memory: mx.array,
):
inputs = self.wte(inputs)
y, self.cache = self.decoder(inputs, memory=memory, cache=self.cache)
if not self.tie_word_embeddings:
y *= self.model_dim**-0.5
y = self.lm_head(y)
else:
y = y @ self.wte.weight.T
return y
def __call__(
self,
inputs: mx.array,
decoder_inputs: mx.array,
):
return self.decode(decoder_inputs, self.encode(inputs))[0]
def load_model(model_name: str):
config = T5Config.from_pretrained(model_name)
model = Model(config)
weights = mx.load(f"{model_name}.npz")
weights = tree_unflatten(list(weights.items()))
model.update(weights)
mx.eval(model.parameters())
return model | null |
17,773 | import numpy as np
from transformers import T5ForConditionalGeneration
def replace_key(key: str) -> str:
for old, new in SHARED_REPLACEMENT_PATTERNS:
key = key.replace(old, new)
if key.startswith("encoder."):
for old, new in ENCODER_REPLACEMENT_PATTERNS:
key = key.replace(old, new)
elif key.startswith("decoder."):
for old, new in DECODER_REPLACEMENT_PATTERNS:
key = key.replace(old, new)
return key
def convert(model_name, dtype):
dtype = getattr(np, dtype)
model = T5ForConditionalGeneration.from_pretrained(model_name, torch_dtype="auto")
weights = {
replace_key(k): v.numpy().astype(dtype) for k, v in model.state_dict().items()
}
file_name = model_name.replace("/", "-")
print(f"Saving weights to {file_name}.npz")
np.savez(f"{file_name}.npz", **weights) | null |
17,774 | from typing import List, Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
import numpy as np
from mlx.utils import tree_map, tree_unflatten
from transformers import AutoTokenizer, T5Config
The provided code snippet includes necessary dependencies for implementing the `_relative_position_bucket` function. Write a Python function `def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 )` to solve the following problem:
Adapted from HF Tensorflow: https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
Here is the function:
def _relative_position_bucket(
relative_position, bidirectional=True, num_buckets=32, max_distance=128
):
"""
Adapted from HF Tensorflow:
https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).astype(mx.int16) * num_buckets
relative_position = mx.abs(relative_position)
else:
relative_position = -mx.minimum(
relative_position, mx.zeros_like(relative_position)
)
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
scale = (num_buckets - max_exact) / np.log(max_distance / max_exact)
relative_position_if_large = max_exact + (
mx.log(relative_position.astype(mx.float32) / max_exact) * scale
).astype(mx.int16)
relative_position_if_large = mx.minimum(relative_position_if_large, num_buckets - 1)
relative_buckets += mx.where(
is_small, relative_position, relative_position_if_large
)
return relative_buckets | Adapted from HF Tensorflow: https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) |
17,775 | from typing import List, Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
import numpy as np
from mlx.utils import tree_map, tree_unflatten
from transformers import AutoTokenizer, T5Config
def create_additive_causal_mask(N: int, offset: int = 0):
rinds = mx.arange(offset + N)
linds = mx.arange(offset, offset + N) if offset else rinds
mask = linds[:, None] < rinds[None]
return mask * -1e9 | null |
17,776 | import sentencepiece as spm
import sentencepiece.sentencepiece_model_pb2 as model
def spm_tokenizer(metadata):
tokens = metadata["tokenizer.ggml.tokens"]
bos = metadata["tokenizer.ggml.bos_token_id"].item()
eos = metadata["tokenizer.ggml.eos_token_id"].item()
unk = metadata["tokenizer.ggml.unknown_token_id"].item()
normalizer_spec = model.NormalizerSpec(
name="identity",
precompiled_charsmap=b"",
add_dummy_prefix=True,
remove_extra_whitespaces=False,
normalization_rule_tsv=b"",
)
trainer_spec = model.TrainerSpec(
model_type="BPE",
vocab_size=len(tokens),
input_format="text",
split_by_unicode_script=True,
split_by_whitespace=True,
split_by_number=True,
treat_whitespace_as_suffix=False,
split_digits=True,
allow_whitespace_only_pieces=True,
vocabulary_output_piece_score=True,
byte_fallback=True,
unk_id=unk,
bos_id=bos,
eos_id=eos,
pad_id=-1,
unk_piece="<unk>",
bos_piece="<s>",
eos_piece="</s>",
pad_piece="<pad>",
pretokenization_delimiter="",
)
m = model.ModelProto(trainer_spec=trainer_spec, normalizer_spec=normalizer_spec)
scores = metadata.get("tokenizer.ggml.scores", None)
scores = scores.tolist() if scores is not None else None
token_types = metadata.get("tokenizer.ggml.token_type", None)
token_types = token_types.tolist() if token_types is not None else None
for i, token in enumerate(tokens):
score = scores[i] if scores else 0
token_type = token_types[i] if token_types else 0
m.pieces.append(
model.ModelProto.SentencePiece(piece=token, score=score, type=token_type)
)
tokenizer = spm.SentencePieceProcessor(model_proto=m.SerializeToString())
return tokenizer | null |
17,777 | from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import utils
from huggingface_hub import snapshot_download
from mlx.utils import tree_flatten, tree_unflatten
class ModelArgs:
hidden_size: int
num_hidden_layers: int
intermediate_size: int
num_attention_heads: int
rms_norm_eps: float
vocab_size: int
num_key_value_heads: int = None
rope_theta: float = 10000
rope_traditional: bool = False
model_type: str = None
rope_scaling: Optional[Dict[str, Union[float, str]]] = None
def __post_init__(self):
if self.num_key_value_heads is None:
self.num_key_value_heads = self.num_attention_heads
if self.rope_scaling:
required_keys = {"factor", "type"}
if not all(key in self.rope_scaling for key in required_keys):
raise ValueError(f"rope_scaling must contain keys {required_keys}")
if self.rope_scaling["type"] != "linear":
raise ValueError("rope_scaling 'type' currently only supports 'linear'")
def from_dict(cls, params):
return cls(
**{
k: v
for k, v in params.items()
if k in inspect.signature(cls).parameters
}
)
class Model(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.model = LlamaModel(args)
self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
cache=None,
):
out, cache = self.model(inputs, cache)
return self.lm_head(out), cache
def get_config(metadata: dict):
output = {
"hidden_size": metadata["llama.embedding_length"],
"num_hidden_layers": metadata["llama.block_count"],
"num_attention_heads": metadata["llama.attention.head_count"],
"intermediate_size": metadata["llama.feed_forward_length"],
"num_key_value_heads": metadata["llama.attention.head_count_kv"],
"rms_norm_eps": metadata["llama.attention.layer_norm_rms_epsilon"],
"vocab_size": len(metadata["tokenizer.ggml.tokens"]),
"rope_theta": metadata["llama.rope.freq_base"],
"rope_traditional": True,
}
output = {k: v.item() if isinstance(v, mx.array) else v for k, v in output.items()}
return output
class GGUFTokenizer:
def __init__(self, metadata):
self._tokenizer = utils.spm_tokenizer(metadata)
def encode(self, s: str) -> mx.array:
return mx.array([self._tokenizer.bos_id()] + self._tokenizer.encode(s))
def eos_token_id(self):
return self._tokenizer.eos_id()
def decode(self, toks: List[int]) -> str:
return self._tokenizer.decode(toks)
def translate_weight_names(name):
name = name.replace("blk.", "model.layers.")
name = name.replace("ffn_gate", "mlp.gate_proj")
name = name.replace("ffn_down", "mlp.down_proj")
name = name.replace("ffn_up", "mlp.up_proj")
name = name.replace("attn_q", "self_attn.q_proj")
name = name.replace("attn_k", "self_attn.k_proj")
name = name.replace("attn_v", "self_attn.v_proj")
name = name.replace("attn_output", "self_attn.o_proj")
name = name.replace("attn_norm", "input_layernorm")
name = name.replace("ffn_norm", "post_attention_layernorm")
name = name.replace("token_embd", "model.embed_tokens")
name = name.replace("output_norm", "model.norm")
name = name.replace("output", "lm_head")
return name
def load(gguf_file: str, repo: str = None):
# If the gguf_file exists, try to load model from it.
# Otherwise try to download and cache from the HF repo
if not Path(gguf_file).exists():
if repo is None:
raise ValueError(
f"Could not find file {gguf_file}, and no Hugging Face"
" repo provided for download."
)
model_path = snapshot_download(
repo_id=repo,
allow_patterns=[gguf_file],
)
if not (Path(model_path) / gguf_file).exists():
raise ValueError(f"File {gguf_file} not in repo {repo}.")
gguf_file = str(Path(model_path) / gguf_file)
print(f"[INFO] Loading model from {gguf_file}")
weights, metadata = mx.load(gguf_file, return_metadata=True)
gguf_ft = metadata["general.file_type"]
if gguf_ft == 0 or gguf_ft == 1:
# ALL_F32 or MOSTLY_F16
quantization = None
pass
elif gguf_ft == 2 or gguf_ft == 3:
# MOSTLY_Q4_0 or MOSTLY_Q4_1
quantization = {"group_size": 32, "bits": 4}
elif gguf_ft == 7:
# MOSTLY_Q8_0 = 7
quantization = {"group_size": 32, "bits": 8}
else:
quantization = None
print("[WARNING] Using unsupported GGUF quantization. Casting to float16.")
weights = {translate_weight_names(k): v for k, v in weights.items()}
config = get_config(metadata)
model = Model(ModelArgs(**config))
if quantization is not None:
# quantized the LM head?
qm = model if "lm_head.scales" in weights else model.model
nn.QuantizedLinear.quantize_module(
qm,
**quantization,
)
def dequantize(k):
weight = weights.pop(f"{k}.weight")
scales = weights.pop(f"{k}.scales")
biases = weights.pop(f"{k}.biases")
weights[f"{k}.weight"] = mx.dequantize(
weight, scales=scales, biases=biases, **quantization
)
# Dequantize embeddings
dequantize("model.embed_tokens")
tokenizer = GGUFTokenizer(metadata)
model.load_weights(list(weights.items()))
return model, tokenizer | null |
17,778 | from dataclasses import dataclass
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import utils
from huggingface_hub import snapshot_download
from mlx.utils import tree_flatten, tree_unflatten
class Model(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.model = LlamaModel(args)
self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False)
def __call__(
self,
inputs: mx.array,
cache=None,
):
out, cache = self.model(inputs, cache)
return self.lm_head(out), cache
def generate(prompt: mx.array, model: Model, temp: float = 0.0):
def sample(logits):
if temp == 0:
return mx.argmax(logits, axis=-1)
else:
return mx.random.categorical(logits * (1 / temp))
y = prompt
cache = None
while True:
logits, cache = model(y[None], cache=cache)
logits = logits[:, -1, :]
y = sample(logits)
yield y | null |
17,779 | import argparse
import time
import mlx.core as mx
import models
def generate(
model: models.Model,
tokenizer: models.GGUFTokenizer,
prompt: str,
max_tokens: int,
temp: float = 0.0,
):
prompt = tokenizer.encode(prompt)
tic = time.time()
tokens = []
skip = 0
for token, n in zip(
models.generate(prompt, model, args.temp),
range(args.max_tokens),
):
if token == tokenizer.eos_token_id:
break
if n == 0:
prompt_time = time.time() - tic
tic = time.time()
tokens.append(token.item())
s = tokenizer.decode(tokens)
print(s[skip:], end="", flush=True)
skip = len(s)
print(tokenizer.decode(tokens)[skip:], flush=True)
gen_time = time.time() - tic
print("=" * 10)
if len(tokens) == 0:
print("No tokens generated for this prompt")
return
prompt_tps = prompt.size / prompt_time
gen_tps = (len(tokens) - 1) / gen_time
print(f"Prompt: {prompt_tps:.3f} tokens-per-sec")
print(f"Generation: {gen_tps:.3f} tokens-per-sec") | null |
17,780 | import argparse
import glob
import json
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_unflatten
from sentencepiece import SentencePieceProcessor
def tic():
return time.time()
def toc(msg, start):
end = time.time()
return f"[INFO] {msg}: {end - start:.3f} s"
def generate(args):
input("Press enter to start generation")
print("------")
print(args.prompt)
x = mx.array([[tokenizer.bos_id()] + tokenizer.encode(args.prompt)])
skip = 0
prompt_processing = None
tokens = []
start = tic()
for token in model.generate(x, args.temp):
tokens.append(token)
if len(tokens) == 1:
# Actually perform the computation to measure the prompt processing time
mx.eval(token)
prompt_processing = toc("Prompt processing", start)
if len(tokens) >= args.max_tokens:
break
elif (len(tokens) % args.write_every) == 0:
# It is perfectly ok to eval things we have already eval-ed.
mx.eval(tokens)
s = tokenizer.decode([t.item() for t in tokens])
print(s[skip:], end="", flush=True)
skip = len(s)
mx.eval(tokens)
full_gen = toc("Full generation", start)
s = tokenizer.decode([t.item() for t in tokens])
print(s[skip:], flush=True)
print("------")
print(prompt_processing)
print(full_gen)
def few_shot_generate(args):
def possible_end(s):
word = "[Instruction]"
for i in range(len(word) - 1, 0, -1):
if s[-i:] == word[:i]:
return 0
if s[-len(word) :] == word:
return 1
return -1
def generate(question):
x = mx.array([[tokenizer.bos_id()] + tokenizer.encode(question)])
skip = 0
prompt_processing = None
tokens = []
start = tic()
for token in model.generate(x, args.temp):
tokens.append(token)
if len(tokens) == 1:
# Actually perform the computation to measure the prompt processing time
mx.eval(token)
prompt_processing = toc("Prompt processing", start)
if len(tokens) >= args.max_tokens:
break
mx.eval(tokens)
token_list = [t.item() for t in tokens]
s = tokenizer.decode(token_list)
end = possible_end(s)
if end == 0:
continue
if end == 1:
skip = len(s)
break
print(s[skip:], end="", flush=True)
skip = len(s)
if token_list[-1] == tokenizer.eos_id():
break
mx.eval(tokens)
full_gen = toc("Full generation", start)
s = tokenizer.decode([t.item() for t in tokens])
print(s[skip:], end="", flush=True)
print("[INFO] Loading few-shot examples from: {}".format(args.few_shot))
prompt = open(args.few_shot).read().strip()
while True:
question = input("Ask a question: ")
generate(prompt.replace("{}", question))
print() | null |
17,781 | import argparse
import glob
import json
import time
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Tuple
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_unflatten
from sentencepiece import SentencePieceProcessor
class ModelArgs:
dim: int
n_layers: int
head_dim: int
hidden_dim: int
n_heads: int
n_kv_heads: int
norm_eps: float
vocab_size: int
rope_theta: float
rope_traditional: bool = True
class Llama(nn.Module):
def __init__(self, args: ModelArgs):
super().__init__()
self.args = args
self.vocab_size = args.vocab_size
self.tok_embeddings = nn.Embedding(args.vocab_size, args.dim)
self.layers = [TransformerBlock(args=args) for _ in range(args.n_layers)]
self.norm = RMSNorm(args.dim, eps=args.norm_eps)
self.output = nn.Linear(args.dim, args.vocab_size, bias=False)
def __call__(self, x):
mask = nn.MultiHeadAttention.create_additive_causal_mask(x.shape[1])
mask = mask.astype(self.tok_embeddings.weight.dtype)
x = self.tok_embeddings(x)
for l in self.layers:
x, _ = l(x, mask)
x = self.norm(x)
return self.output(x)
def generate(self, x, temp=1.0):
def sample(logits):
if temp == 0:
return mx.argmax(logits, axis=-1)
else:
return mx.random.categorical(logits * (1 / temp))
cache = []
# Make an additive causal mask. We will need that to process the prompt.
mask = nn.MultiHeadAttention.create_additive_causal_mask(x.shape[1])
mask = mask.astype(self.tok_embeddings.weight.dtype)
# First we process the prompt x the same was as in __call__ but
# save the caches in cache
x = self.tok_embeddings(x)
for l in self.layers:
x, c = l(x, mask=mask)
# We store the per layer cache in a simple python list
cache.append(c)
x = self.norm(x)
# We only care about the last logits that generate the next token
y = self.output(x[:, -1])
y = sample(y)
# y now has size [1]
# Since MLX is lazily evaluated nothing is computed yet.
# Calling y.item() would force the computation to happen at
# this point but we can also choose not to do that and let the
# user choose when to start the computation.
yield y
# Now we parsed the prompt and generated the first token we
# need to feed it back into the model and loop to generate the
# rest.
while True:
# Unsqueezing the last dimension to add a sequence length
# dimension of 1
x = y[:, None]
x = self.tok_embeddings(x)
for i in range(len(cache)):
# We are overwriting the arrays in the cache list. When
# the computation will happen, MLX will be discarding the
# old cache the moment it is not needed anymore.
x, cache[i] = self.layers[i](x, mask=None, cache=cache[i])
x = self.norm(x)
y = sample(self.output(x[:, -1]))
yield y
def sanitize_config(config, weights):
config.pop("model_type", None)
n_heads = config["n_heads"]
if "n_kv_heads" not in config:
config["n_kv_heads"] = n_heads
if "head_dim" not in config:
config["head_dim"] = config["dim"] // n_heads
if "hidden_dim" not in config:
config["hidden_dim"] = weights["layers.0.feed_forward.w1.weight"].shape[0]
if config.get("vocab_size", -1) < 0:
config["vocab_size"] = weights["output.weight"].shape[-1]
if "rope_theta" not in config:
config["rope_theta"] = 10000
unused = ["multiple_of", "ffn_dim_multiplier"]
for k in unused:
config.pop(k, None)
return config
def load_model(model_path):
model_path = Path(model_path)
unsharded_weights_path = Path(model_path / "weights.npz")
if unsharded_weights_path.is_file():
print("[INFO] Loading model from {}.".format(unsharded_weights_path))
weights = mx.load(str(unsharded_weights_path))
else:
sharded_weights_glob = str(model_path / "weights.*.npz")
weight_files = glob.glob(sharded_weights_glob)
print("[INFO] Loading model from {}.".format(sharded_weights_glob))
if len(weight_files) == 0:
raise FileNotFoundError("No weights found in {}".format(model_path))
weights = {}
for wf in weight_files:
weights.update(mx.load(wf).items())
with open(model_path / "config.json", "r") as f:
config = sanitize_config(json.loads(f.read()), weights)
quantization = config.pop("quantization", None)
model = Llama(ModelArgs(**config))
if quantization is not None:
nn.QuantizedLinear.quantize_module(model, **quantization)
model.update(tree_unflatten(list(weights.items())))
tokenizer = SentencePieceProcessor(model_file=str(model_path / "tokenizer.model"))
return model, tokenizer | null |
17,782 | import argparse
import collections
import copy
import glob
import json
import shutil
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import torch
from llama import Llama, ModelArgs, sanitize_config
from mlx.utils import tree_flatten, tree_map, tree_unflatten
def torch_to_mx(a: torch.Tensor, *, dtype: str) -> mx.array:
def llama(model_path, *, dtype: str):
SHARD_FIRST = ["wv", "wq", "wk", "w1", "w3", "output"]
SHARD_SECOND = ["tok_embeddings", "wo", "w2"]
SHARD_WEIGHTS = set(SHARD_FIRST + SHARD_SECOND)
def shard_key(k):
keys = k.split(".")
if len(keys) < 2:
return None
return keys[-2]
def unshard(k, v):
wn = shard_key(k)
if wn not in SHARD_WEIGHTS:
return v
elif wn in SHARD_FIRST:
axis = 0
elif wn in SHARD_SECOND:
axis = 1
else:
raise ValueError("Invalid weight name")
return mx.concatenate(v, axis=axis)
torch_files = glob.glob(str(model_path / "consolidated.*.pth"))
weights = collections.defaultdict(list)
for wf in torch_files:
state = torch.load(wf, map_location=torch.device("cpu"))
for k, v in state.items():
v = torch_to_mx(v, dtype=dtype)
state[k] = None # free memory
if shard_key(k) in SHARD_WEIGHTS:
weights[k].append(v)
else:
weights[k] = v
for k, v in weights.items():
weights[k] = unshard(k, v)
with open(model_path / "params.json", "r") as f:
params = json.loads(f.read())
return weights, params | null |
17,783 | import argparse
import collections
import copy
import glob
import json
import shutil
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import torch
from llama import Llama, ModelArgs, sanitize_config
from mlx.utils import tree_flatten, tree_map, tree_unflatten
def torch_to_mx(a: torch.Tensor, *, dtype: str) -> mx.array:
# bfloat16 is not numpy convertible. Upcast to float32 to avoid precision loss
a = a.to(torch.float32) if dtype == "bfloat16" else a.to(getattr(torch, dtype))
return mx.array(a.numpy(), getattr(mx, dtype))
def tiny_llama(model_path, *, dtype: str):
try:
import transformers
except ImportError:
print("The transformers package must be installed for this model conversion:")
print("pip install transformers")
exit(1)
model = transformers.AutoModelForCausalLM.from_pretrained(
str(model_path)
).state_dict()
config = transformers.AutoConfig.from_pretrained(model_path)
# things to change
# 1. there's no "model." in the weight names
model = {k.replace("model.", ""): v for k, v in model.items()}
# 2. mlp is called feed_forward
model = {k.replace("mlp", "feed_forward"): v for k, v in model.items()}
# 3. up_proj, down_proj, gate_proj
model = {k.replace("down_proj", "w2"): v for k, v in model.items()}
model = {k.replace("up_proj", "w3"): v for k, v in model.items()}
model = {k.replace("gate_proj", "w1"): v for k, v in model.items()}
# 4. layernorms
model = {
k.replace("input_layernorm", "attention_norm"): v for k, v in model.items()
}
model = {
k.replace("post_attention_layernorm", "ffn_norm"): v for k, v in model.items()
}
# 5. lm head
model = {k.replace("lm_head", "output"): v for k, v in model.items()}
# 6. token emb
model = {k.replace("embed_tokens", "tok_embeddings"): v for k, v in model.items()}
# 7. attention
model = {k.replace("self_attn", "attention"): v for k, v in model.items()}
model = {k.replace("q_proj", "wq"): v for k, v in model.items()}
model = {k.replace("k_proj", "wk"): v for k, v in model.items()}
model = {k.replace("v_proj", "wv"): v for k, v in model.items()}
model = {k.replace("o_proj", "wo"): v for k, v in model.items()}
params = {}
params["dim"] = config.hidden_size
params["hidden_dim"] = config.intermediate_size
params["n_heads"] = config.num_attention_heads
if hasattr(config, "num_key_value_heads"):
params["n_kv_heads"] = config.num_key_value_heads
params["n_layers"] = config.num_hidden_layers
params["vocab_size"] = config.vocab_size
params["norm_eps"] = config.rms_norm_eps
params["rope_traditional"] = False
weights = {k: torch_to_mx(v, dtype=dtype) for k, v in model.items()}
return weights, params | null |
17,784 | import argparse
import collections
import copy
import glob
import json
import shutil
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import torch
from llama import Llama, ModelArgs, sanitize_config
from mlx.utils import tree_flatten, tree_map, tree_unflatten
class ModelArgs(BaseModelArgs):
model_type: str
hidden_size: int
num_hidden_layers: int
intermediate_size: int
num_attention_heads: int
rms_norm_eps: float
vocab_size: int
num_key_value_heads: int = None
rope_theta: float = 10000
rope_traditional: bool = False
rope_scaling: Optional[Dict[str, Union[float, str]]] = None
def __post_init__(self):
if self.num_key_value_heads is None:
self.num_key_value_heads = self.num_attention_heads
if self.rope_scaling:
required_keys = {"factor", "type"}
if not all(key in self.rope_scaling for key in required_keys):
raise ValueError(f"rope_scaling must contain keys {required_keys}")
if self.rope_scaling["type"] != "linear":
raise ValueError("rope_scaling 'type' currently only supports 'linear'")
def quantize(weights, config, args):
quantized_config = copy.deepcopy(config)
# Load the model:
config = sanitize_config(config, weights)
model = Llama(ModelArgs(**config))
weights = tree_map(mx.array, weights)
model.update(tree_unflatten(list(weights.items())))
# Quantize the model:
nn.QuantizedLinear.quantize_module(model, args.q_group_size, args.q_bits)
# Update the config:
quantized_config["quantization"] = {
"group_size": args.q_group_size,
"bits": args.q_bits,
}
quantized_weights = dict(tree_flatten(model.parameters()))
return quantized_weights, quantized_config | null |
17,785 | import argparse
import collections
import copy
import glob
import json
import shutil
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import torch
from llama import Llama, ModelArgs, sanitize_config
from mlx.utils import tree_flatten, tree_map, tree_unflatten
def make_shards(weights: dict, max_file_size_gibibyte: int = 15):
max_file_size_bytes = max_file_size_gibibyte << 30
shards = []
shard, shard_size = {}, 0
for k, v in weights.items():
if shard_size + v.nbytes > max_file_size_bytes:
shards.append(shard)
shard, shard_size = {}, 0
shard[k] = v
shard_size += v.nbytes
shards.append(shard)
return shards | null |
17,786 | import argparse
import glob
import json
import shutil
from pathlib import Path
from typing import Optional
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import yaml
from mlx.utils import tree_flatten, tree_map
from .utils import (
fetch_from_hub,
get_model_path,
save_config,
save_weights,
upload_to_hub,
)
The provided code snippet includes necessary dependencies for implementing the `configure_parser` function. Write a Python function `def configure_parser() -> argparse.ArgumentParser` to solve the following problem:
Configures and returns the argument parser for the script. Returns: argparse.ArgumentParser: Configured argument parser.
Here is the function:
def configure_parser() -> argparse.ArgumentParser:
"""
Configures and returns the argument parser for the script.
Returns:
argparse.ArgumentParser: Configured argument parser.
"""
parser = argparse.ArgumentParser(description="Merge multiple models.")
parser.add_argument("--config", type=str, help="Path to the YAML config.")
parser.add_argument(
"--mlx-path",
type=str,
default="mlx_merged_model",
help="Path to save the MLX model.",
)
parser.add_argument(
"--upload-repo",
help="The Hugging Face repo to upload the model to.",
type=str,
default=None,
)
return parser | Configures and returns the argument parser for the script. Returns: argparse.ArgumentParser: Configured argument parser. |
17,787 | import argparse
import glob
import json
import shutil
from pathlib import Path
from typing import Optional
import mlx.core as mx
import mlx.nn as nn
import numpy as np
import yaml
from mlx.utils import tree_flatten, tree_map
from .utils import (
fetch_from_hub,
get_model_path,
save_config,
save_weights,
upload_to_hub,
)
def merge_models(base_model: nn.Module, model: nn.Module, config: dict):
method = config.get("method", None)
if method != "slerp":
raise ValueError(f"Merge method {method} not supported")
num_layers = len(model.layers)
def unpack_values(vals):
if isinstance(vals, (int, float)):
return np.full(num_layers, vals)
bins = len(vals) - 1
sizes = [num_layers // bins] * bins
sizes[-1] = num_layers - sum(sizes[:-1])
return np.concatenate(
[np.linspace(v1, v2, s) for v1, v2, s in zip(vals[:-1], vals[1:], sizes)]
)
param_list = config["parameters"]["t"]
params = {}
filter_keys = set()
for pl in param_list[:-1]:
params[pl["filter"]] = unpack_values(pl["value"])
filter_keys.add(pl["filter"])
default = unpack_values(param_list[-1]["value"])
for e in range(num_layers):
bl = base_model.layers[e]
l = model.layers[e]
base_weights = bl.parameters()
weights = l.parameters()
for k, w1 in base_weights.items():
w2 = weights[k]
t = params.get(k, default)[e]
base_weights[k] = tree_map(lambda x, y: slerp(t, x, y), w1, w2)
base_model.update(base_weights)
def get_model_path(path_or_hf_repo: str, revision: Optional[str] = None) -> Path:
"""
Ensures the model is available locally. If the path does not exist locally,
it is downloaded from the Hugging Face Hub.
Args:
path_or_hf_repo (str): The local path or Hugging Face repository ID of the model.
revision (str, optional): A revision id which can be a branch name, a tag, or a commit hash.
Returns:
Path: The path to the model.
"""
model_path = Path(path_or_hf_repo)
if not model_path.exists():
model_path = Path(
snapshot_download(
repo_id=path_or_hf_repo,
revision=revision,
allow_patterns=[
"*.json",
"*.safetensors",
"*.py",
"tokenizer.model",
"*.tiktoken",
"*.txt",
],
)
)
return model_path
def fetch_from_hub(
model_path: Path, lazy: bool = False
) -> Tuple[nn.Module, dict, PreTrainedTokenizer]:
model = load_model(model_path, lazy)
config = AutoConfig.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
return model, config.to_dict(), tokenizer
def upload_to_hub(path: str, upload_repo: str, hf_path: str):
"""
Uploads the model to Hugging Face hub.
Args:
path (str): Local path to the model.
upload_repo (str): Name of the HF repo to upload to.
hf_path (str): Path to the original Hugging Face model.
"""
import os
from huggingface_hub import HfApi, ModelCard, logging
card = ModelCard.load(hf_path)
card.data.tags = ["mlx"] if card.data.tags is None else card.data.tags + ["mlx"]
card.text = dedent(
f"""
# {upload_repo}
This model was converted to MLX format from [`{hf_path}`]().
Refer to the [original model card](https://huggingface.co/{hf_path}) for more details on the model.
## Use with mlx
```bash
pip install mlx-lm
```
```python
from mlx_lm import load, generate
model, tokenizer = load("{upload_repo}")
response = generate(model, tokenizer, prompt="hello", verbose=True)
```
"""
)
card.save(os.path.join(path, "README.md"))
logging.set_verbosity_info()
api = HfApi()
api.create_repo(repo_id=upload_repo, exist_ok=True)
api.upload_folder(
folder_path=path,
repo_id=upload_repo,
repo_type="model",
)
print(f"Upload successful, go to https://huggingface.co/{upload_repo} for details.")
def save_weights(
save_path: Union[str, Path],
weights: Dict[str, Any],
*,
donate_weights: bool = False,
) -> None:
"""Save model weights into specified directory."""
if isinstance(save_path, str):
save_path = Path(save_path)
save_path.mkdir(parents=True, exist_ok=True)
shards = make_shards(weights)
shards_count = len(shards)
shard_file_format = (
"model-{:05d}-of-{:05d}.safetensors"
if shards_count > 1
else "model.safetensors"
)
total_size = sum(v.nbytes for v in weights.values())
index_data = {"metadata": {"total_size": total_size}, "weight_map": {}}
# Write the weights and make sure no references are kept other than the
# necessary ones
if donate_weights:
weights.clear()
del weights
for i in range(len(shards)):
shard = shards[i]
shards[i] = None
shard_name = shard_file_format.format(i + 1, shards_count)
shard_path = save_path / shard_name
mx.save_safetensors(str(shard_path), shard, metadata={"format": "mlx"})
for weight_name in shard.keys():
index_data["weight_map"][weight_name] = shard_name
del shard
index_data["weight_map"] = {
k: index_data["weight_map"][k] for k in sorted(index_data["weight_map"])
}
with open(save_path / "model.safetensors.index.json", "w") as f:
json.dump(
index_data,
f,
indent=4,
)
def save_config(
config: dict,
config_path: Union[str, Path],
) -> None:
"""Save the model configuration to the ``config_path``.
The final configuration will be sorted before saving for better readability.
Args:
config (dict): The model configuration.
config_path (Union[str, Path]): Model configuration file path.
"""
# Clean unused keys
config.pop("_name_or_path", None)
# sort the config for better readability
config = dict(sorted(config.items()))
# write the updated config to the config_path (if provided)
with open(config_path, "w") as fid:
json.dump(config, fid, indent=4)
def merge(
config: str,
mlx_path: str = "mlx_model",
upload_repo: Optional[str] = None,
):
with open(config, "r") as fid:
merge_conf = yaml.safe_load(fid)
print("[INFO] Loading")
model_paths = merge_conf.get("models", [])
if len(model_paths) < 2:
raise ValueError(f"Expected at least 2 models, got {len(model_paths)}.")
# Load all models
base_hf_path = model_paths[0]
base_path = get_model_path(base_hf_path)
base_model, base_config, tokenizer = fetch_from_hub(base_path, lazy=True)
models = []
for mp in model_paths[1:]:
model, model_config, _ = fetch_from_hub(get_model_path(mp), lazy=True)
base_type = base_config["model_type"]
model_type = model_config["model_type"]
if base_type != model_type:
raise ValueError(
f"Can only merge models of the same type,"
f" but got {base_type} and {model_type}."
)
models.append(model)
# Merge models into base model
for m in models:
merge_models(base_model, m, merge_conf)
# Save base model
mlx_path = Path(mlx_path)
weights = dict(tree_flatten(base_model.parameters()))
del models, base_model
save_weights(mlx_path, weights, donate_weights=True)
py_files = glob.glob(str(base_path / "*.py"))
for file in py_files:
shutil.copy(file, mlx_path)
tokenizer.save_pretrained(mlx_path)
save_config(config, config_path=mlx_path / "config.json")
if upload_repo is not None:
upload_to_hub(mlx_path, upload_repo, base_hf_path) | null |
17,788 | import copy
import glob
import importlib
import json
import logging
import shutil
import time
from pathlib import Path
from textwrap import dedent
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import mlx.core as mx
import mlx.nn as nn
from huggingface_hub import snapshot_download
from mlx.utils import tree_flatten
from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer
from .tuner.utils import apply_lora_layers
def generate_step(
prompt: mx.array,
model: nn.Module,
temp: float = 0.0,
repetition_penalty: Optional[float] = None,
repetition_context_size: Optional[int] = 20,
top_p: float = 1.0,
) -> Generator[Tuple[mx.array, mx.array], None, None]:
"""
A generator producing text based on the given prompt from the model.
Args:
prompt (mx.array): The input prompt.
model (nn.Module): The model to use for generation.
temp (float): The temperature for sampling, if 0 the argmax is used.
repetition_penalty (float, optional): The penalty factor for repeating tokens.
repetition_context_size (int, optional): The number of tokens to consider for repetition penalty (default 20).
top_p (float, optional): Nulceus sampling, higher means model considers more less likely words
Yields:
Generator[Tuple[mx.array, mx.array]]: A generator producing
one token and probability per call.
"""
def sample(logits: mx.array) -> Tuple[mx.array, float]:
softmax_logits = mx.softmax(logits)
if temp == 0:
token = mx.argmax(logits, axis=-1)
else:
if top_p > 0 and top_p < 1.0:
if (
logits.dtype == mx.bfloat16
): # workdaround for unable to load kernel contiguous_scan_inclusive_sum_bfloat16_bfloat16
logits = logits.astype(mx.float32)
probs = mx.softmax(logits / temp, axis=-1)
sorted_probs = mx.sort(probs)[::-1]
sorted_indices = mx.argsort(probs)[::-1]
cumulative_probs = mx.cumsum(sorted_probs, axis=-1)
top_probs = mx.where(
cumulative_probs > 1 - top_p,
sorted_probs,
mx.zeros_like(sorted_probs),
)
sorted_token = mx.random.categorical(mx.log(top_probs))
token = sorted_indices.squeeze(0)[sorted_token]
else:
token = mx.random.categorical(logits * (1 / temp))
prob = softmax_logits[0, token]
return token, prob
if repetition_penalty and (
repetition_penalty < 0 or not isinstance(repetition_penalty, float)
):
raise ValueError(
f"repetition_penalty must be a non-negative float, got {repetition_penalty}"
)
y = prompt
cache = None
repetition_context = prompt.tolist()
if repetition_context_size:
repetition_context = repetition_context[-repetition_context_size:]
while True:
logits, cache = model(y[None], cache=cache)
logits = logits[:, -1, :]
if repetition_penalty:
logits = apply_repetition_penalty(
logits, repetition_context, repetition_penalty
)
y, prob = sample(logits)
repetition_context.append(y.item())
else:
y, prob = sample(logits)
if repetition_context_size:
if len(repetition_context) > repetition_context_size:
repetition_context = repetition_context[-repetition_context_size:]
yield y, prob
The provided code snippet includes necessary dependencies for implementing the `generate` function. Write a Python function `def generate( model: nn.Module, tokenizer: PreTrainedTokenizer, prompt: str, temp: float = 0.0, max_tokens: int = 100, verbose: bool = False, formatter: Optional[Callable] = None, repetition_penalty: Optional[float] = None, repetition_context_size: Optional[int] = None, top_p: float = 1.0, ) -> str` to solve the following problem:
Generate text from the model. Args: model (nn.Module): The language model. tokenizer (PreTrainedTokenizer): The tokenizer. prompt (str): The string prompt. temp (float): The temperature for sampling (default 0). max_tokens (int): The maximum number of tokens (default 100). verbose (bool): If ``True``, print tokens and timing information (default ``False``). formatter (Optional[Callable]): A function which takes a token and a probability and displays it. repetition_penalty (float, optional): The penalty factor for repeating tokens. repetition_context_size (int, optional): The number of tokens to consider for repetition penalty.
Here is the function:
def generate(
model: nn.Module,
tokenizer: PreTrainedTokenizer,
prompt: str,
temp: float = 0.0,
max_tokens: int = 100,
verbose: bool = False,
formatter: Optional[Callable] = None,
repetition_penalty: Optional[float] = None,
repetition_context_size: Optional[int] = None,
top_p: float = 1.0,
) -> str:
"""
Generate text from the model.
Args:
model (nn.Module): The language model.
tokenizer (PreTrainedTokenizer): The tokenizer.
prompt (str): The string prompt.
temp (float): The temperature for sampling (default 0).
max_tokens (int): The maximum number of tokens (default 100).
verbose (bool): If ``True``, print tokens and timing information
(default ``False``).
formatter (Optional[Callable]): A function which takes a token and a
probability and displays it.
repetition_penalty (float, optional): The penalty factor for repeating tokens.
repetition_context_size (int, optional): The number of tokens to consider for repetition penalty.
"""
if verbose:
print("=" * 10)
print("Prompt:", prompt)
prompt_tokens = mx.array(tokenizer.encode(prompt))
tic = time.perf_counter()
tokens = []
skip = 0
REPLACEMENT_CHAR = "\ufffd"
for (token, prob), n in zip(
generate_step(
prompt_tokens,
model,
temp,
repetition_penalty,
repetition_context_size,
top_p,
),
range(max_tokens),
):
if token == tokenizer.eos_token_id:
break
if n == 0:
prompt_time = time.perf_counter() - tic
tic = time.perf_counter()
tokens.append(token.item())
if verbose:
s = tokenizer.decode(tokens)
if formatter:
formatter(s[skip:], prob.item())
skip = len(s)
elif REPLACEMENT_CHAR not in s:
print(s[skip:], end="", flush=True)
skip = len(s)
token_count = len(tokens)
token_string = tokenizer.decode(tokens).replace(REPLACEMENT_CHAR, "")
if verbose:
print(token_string[skip:], flush=True)
gen_time = time.perf_counter() - tic
print("=" * 10)
if token_count == 0:
print("No tokens generated for this prompt")
return
prompt_tps = prompt_tokens.size / prompt_time
gen_tps = (token_count - 1) / gen_time
print(f"Prompt: {prompt_tps:.3f} tokens-per-sec")
print(f"Generation: {gen_tps:.3f} tokens-per-sec")
return token_string | Generate text from the model. Args: model (nn.Module): The language model. tokenizer (PreTrainedTokenizer): The tokenizer. prompt (str): The string prompt. temp (float): The temperature for sampling (default 0). max_tokens (int): The maximum number of tokens (default 100). verbose (bool): If ``True``, print tokens and timing information (default ``False``). formatter (Optional[Callable]): A function which takes a token and a probability and displays it. repetition_penalty (float, optional): The penalty factor for repeating tokens. repetition_context_size (int, optional): The number of tokens to consider for repetition penalty. |
17,789 | import copy
import glob
import importlib
import json
import logging
import shutil
import time
from pathlib import Path
from textwrap import dedent
from typing import Any, Callable, Dict, Generator, List, Optional, Tuple, Union
import mlx.core as mx
import mlx.nn as nn
from huggingface_hub import snapshot_download
from mlx.utils import tree_flatten
from transformers import AutoConfig, AutoTokenizer, PreTrainedTokenizer
from .tuner.utils import apply_lora_layers
def get_model_path(path_or_hf_repo: str, revision: Optional[str] = None) -> Path:
def fetch_from_hub(
model_path: Path, lazy: bool = False
) -> Tuple[nn.Module, dict, PreTrainedTokenizer]:
def upload_to_hub(path: str, upload_repo: str, hf_path: str):
def save_weights(
save_path: Union[str, Path],
weights: Dict[str, Any],
*,
donate_weights: bool = False,
) -> None:
def quantize_model(
model: nn.Module, config: dict, q_group_size: int, q_bits: int
) -> Tuple:
def save_config(
config: dict,
config_path: Union[str, Path],
) -> None:
def convert(
hf_path: str,
mlx_path: str = "mlx_model",
quantize: bool = False,
q_group_size: int = 64,
q_bits: int = 4,
dtype: str = "float16",
upload_repo: str = None,
revision: Optional[str] = None,
):
print("[INFO] Loading")
model_path = get_model_path(hf_path, revision=revision)
model, config, tokenizer = fetch_from_hub(model_path, lazy=True)
weights = dict(tree_flatten(model.parameters()))
dtype = mx.float16 if quantize else getattr(mx, dtype)
weights = {k: v.astype(dtype) for k, v in weights.items()}
if quantize:
print("[INFO] Quantizing")
model.load_weights(list(weights.items()))
weights, config = quantize_model(model, config, q_group_size, q_bits)
if isinstance(mlx_path, str):
mlx_path = Path(mlx_path)
del model
save_weights(mlx_path, weights, donate_weights=True)
py_files = glob.glob(str(model_path / "*.py"))
for file in py_files:
shutil.copy(file, mlx_path)
tokenizer.save_pretrained(mlx_path)
save_config(config, config_path=mlx_path / "config.json")
if upload_repo is not None:
upload_to_hub(mlx_path, upload_repo, hf_path) | null |
17,790 | import argparse
import glob
import json
import shutil
from pathlib import Path
from typing import Any, Dict, Union
from mlx.utils import tree_flatten, tree_unflatten
from .tuner.lora import LoRALinear
from .tuner.utils import apply_lora_layers, dequantize
from .utils import (
fetch_from_hub,
get_model_path,
save_config,
save_weights,
upload_to_hub,
)
def parse_arguments() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="LoRA or QLoRA finetuning.")
parser.add_argument(
"--model",
default="mlx_model",
help="The path to the local model directory or Hugging Face repo.",
)
parser.add_argument(
"--save-path",
default="lora_fused_model",
help="The path to save the fused model.",
)
parser.add_argument(
"--adapter-file",
type=str,
default="adapters.npz",
help="Path to the trained adapter weights (npz or safetensors).",
)
parser.add_argument(
"--hf-path",
type=str,
default=None,
help="Path to the original Hugging Face model. Required for upload if --model is a local directory.",
)
parser.add_argument(
"--upload-repo",
help="The Hugging Face repo to upload the model to.",
type=str,
default=None,
)
parser.add_argument(
"--de-quantize",
help="Generate a de-quantized model.",
action="store_true",
)
return parser.parse_args() | null |
17,791 | import argparse
from .utils import convert
The provided code snippet includes necessary dependencies for implementing the `configure_parser` function. Write a Python function `def configure_parser() -> argparse.ArgumentParser` to solve the following problem:
Configures and returns the argument parser for the script. Returns: argparse.ArgumentParser: Configured argument parser.
Here is the function:
def configure_parser() -> argparse.ArgumentParser:
"""
Configures and returns the argument parser for the script.
Returns:
argparse.ArgumentParser: Configured argument parser.
"""
parser = argparse.ArgumentParser(
description="Convert Hugging Face model to MLX format"
)
parser.add_argument("--hf-path", type=str, help="Path to the Hugging Face model.")
parser.add_argument(
"--mlx-path", type=str, default="mlx_model", help="Path to save the MLX model."
)
parser.add_argument(
"-q", "--quantize", help="Generate a quantized model.", action="store_true"
)
parser.add_argument(
"--q-group-size", help="Group size for quantization.", type=int, default=64
)
parser.add_argument(
"--q-bits", help="Bits per weight for quantization.", type=int, default=4
)
parser.add_argument(
"--dtype",
help="Type to save the parameters, ignored if -q is given.",
type=str,
choices=["float16", "bfloat16", "float32"],
default="float16",
)
parser.add_argument(
"--upload-repo",
help="The Hugging Face repo to upload the model to.",
type=str,
default=None,
)
return parser | Configures and returns the argument parser for the script. Returns: argparse.ArgumentParser: Configured argument parser. |
17,792 | import os
from typing import Dict
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_unflatten
from .lora import LoRALinear
The provided code snippet includes necessary dependencies for implementing the `dequantize` function. Write a Python function `def dequantize(model: nn.Module) -> nn.Module` to solve the following problem:
Dequantize the quantized linear layers in the model. Args: model (nn.Module): The model with quantized linear layers. Returns: nn.Module: The model with dequantized layers.
Here is the function:
def dequantize(model: nn.Module) -> nn.Module:
"""
Dequantize the quantized linear layers in the model.
Args:
model (nn.Module): The model with quantized linear layers.
Returns:
nn.Module: The model with dequantized layers.
"""
de_quantize_layers = []
for name, module in model.named_modules():
if isinstance(module, nn.QuantizedLinear):
bias = "bias" in module
weight = module.weight
weight = mx.dequantize(
weight,
module.scales,
module.biases,
module.group_size,
module.bits,
).astype(mx.float16)
output_dims, input_dims = weight.shape
linear = nn.Linear(input_dims, output_dims, bias=bias)
linear.weight = weight
if bias:
linear.bias = module.bias
de_quantize_layers.append((name, linear))
if len(de_quantize_layers) > 0:
model.update_modules(tree_unflatten(de_quantize_layers))
return model | Dequantize the quantized linear layers in the model. Args: model (nn.Module): The model with quantized linear layers. Returns: nn.Module: The model with dequantized layers. |
17,793 | import os
from typing import Dict
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_unflatten
from .lora import LoRALinear
class LoRALinear(nn.Module):
def from_linear(
linear: nn.Linear,
r: int = 8,
alpha: float = 16,
dropout: float = 0.0,
scale: float = 10.0,
):
# TODO remove when input_dims and output_dims are attributes
# on linear and quantized linear
output_dims, input_dims = linear.weight.shape
if isinstance(linear, nn.QuantizedLinear):
input_dims *= 32 // linear.bits
lora_lin = LoRALinear(
input_dims=input_dims,
output_dims=output_dims,
r=r,
alpha=alpha,
dropout=dropout,
scale=scale,
)
lora_lin.linear = linear
return lora_lin
def to_linear(self, de_quantize: bool = False):
linear = self.linear
bias = "bias" in linear
weight = linear.weight
is_quantized = isinstance(linear, nn.QuantizedLinear)
# Use the same type as the linear weight if not quantized
dtype = weight.dtype
if is_quantized:
dtype = mx.float16
weight = mx.dequantize(
weight,
linear.scales,
linear.biases,
linear.group_size,
linear.bits,
)
output_dims, input_dims = weight.shape
fused_linear = nn.Linear(input_dims, output_dims, bias=bias)
lora_b = (self.scale * self.lora_b.T).astype(dtype)
lora_a = self.lora_a.T.astype(dtype)
fused_linear.weight = weight + lora_b @ lora_a
if bias:
fused_linear.bias = linear.bias
if is_quantized and not de_quantize:
fused_linear = nn.QuantizedLinear.from_linear(
fused_linear,
linear.group_size,
linear.bits,
)
return fused_linear
def __init__(
self,
input_dims: int,
output_dims: int,
r: int = 8,
alpha: float = 16,
dropout: float = 0.0,
scale: float = 10.0,
bias: bool = False,
):
super().__init__()
# Regular linear layer weights
self.linear = nn.Linear(input_dims, output_dims, bias=bias)
self.dropout = nn.Dropout(p=dropout)
# Scale for low-rank update
self.scale = scale * (alpha / r)
# Low rank lora weights
scale = 1 / math.sqrt(input_dims)
self.lora_a = mx.random.uniform(
low=-scale,
high=scale,
shape=(input_dims, r),
)
self.lora_b = mx.zeros(shape=(r, output_dims))
def __call__(self, x):
dtype = self.linear.weight.dtype
if isinstance(self.linear, nn.QuantizedLinear):
dtype = self.linear.scales.dtype
y = self.linear(x.astype(dtype))
z = (self.dropout(x) @ self.lora_a) @ self.lora_b
return y + self.scale * z
The provided code snippet includes necessary dependencies for implementing the `remove_lora_layers` function. Write a Python function `def remove_lora_layers(model: nn.Module) -> nn.Module` to solve the following problem:
Remove the LoRA layers from the model. Args: model (nn.Module): The model with LoRA layers. Returns: nn.Module: The model without LoRA layers.
Here is the function:
def remove_lora_layers(model: nn.Module) -> nn.Module:
"""
Remove the LoRA layers from the model.
Args:
model (nn.Module): The model with LoRA layers.
Returns:
nn.Module: The model without LoRA layers.
"""
reset_layers = []
for name, module in model.named_modules():
if isinstance(module, LoRALinear):
reset_layers.append((name, module.linear))
if len(reset_layers) > 0:
model.update_modules(tree_unflatten(reset_layers))
return model | Remove the LoRA layers from the model. Args: model (nn.Module): The model with LoRA layers. Returns: nn.Module: The model without LoRA layers. |
17,794 | import time
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
import mlx.core as mx
import mlx.nn as nn
import numpy as np
from mlx.utils import tree_flatten
def iterate_batches(dataset, tokenizer, batch_size, max_seq_length, train=False):
# Sort by length:
idx = sorted(range(len(dataset)), key=lambda idx: len(dataset[idx]))
# Make the batches:
batch_idx = [
idx[i : i + batch_size] for i in range(0, len(idx) - batch_size + 1, batch_size)
]
while True:
indices = np.random.permutation(len(batch_idx))
for i in indices:
# Encode batch
batch = [tokenizer.encode(dataset[j]) for j in batch_idx[i]]
lengths = [len(x) for x in batch]
if max(lengths) > max_seq_length:
print(
f"[WARNING] Some sequences are longer than {max_seq_length} tokens. "
f"The longest sentence {max(lengths)} will be truncated to {max_seq_length}. "
"Consider pre-splitting your data to save memory."
)
# Pad to the nearest multiple of 8 or the maximum length
pad_to = 8
max_length_in_batch = pad_to * ((max(lengths) + pad_to - 1) // pad_to)
max_length_in_batch = min(max_length_in_batch, max_seq_length)
batch_arr = np.zeros((batch_size, max_length_in_batch), np.int32)
for j in range(batch_size):
truncated_length = min(lengths[j], max_seq_length)
batch_arr[j, :truncated_length] = batch[j][:truncated_length]
lengths[j] = (
truncated_length # Update lengths to match truncated lengths
)
batch = mx.array(batch_arr)
yield batch[:, :-1], batch[:, 1:], mx.array(lengths)
if not train:
break | null |
17,795 | import argparse
import json
import time
import uuid
import warnings
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import List, Literal, NamedTuple, Optional, Union
import mlx.core as mx
import mlx.nn as nn
from transformers import PreTrainedTokenizer
from .utils import generate_step, load
class StopCondition(NamedTuple):
stop_met: bool
trim_length: int
The provided code snippet includes necessary dependencies for implementing the `stopping_criteria` function. Write a Python function `def stopping_criteria( tokens: List[int], stop_id_sequences: List[List[int]], eos_token_id: Union[int, None], ) -> StopCondition` to solve the following problem:
Determines whether the token generation should stop based on predefined conditions. Args: tokens (List[int]): The current sequence of generated tokens. stop_id_sequences (List[List[[int]]): A list of integer lists, each representing a sequence of token IDs. If the end of the `tokens` list matches any of these sequences, the generation should stop. eos_token_id (Union[int, None]): The token ID that represents the end-of-sequence. If the last token in `tokens` matches this, the generation should stop. Returns: StopCondition: A named tuple indicating whether the stop condition has been met (`stop_met`) and how many tokens should be trimmed from the end if it has (`trim_length`).
Here is the function:
def stopping_criteria(
tokens: List[int],
stop_id_sequences: List[List[int]],
eos_token_id: Union[int, None],
) -> StopCondition:
"""
Determines whether the token generation should stop based on predefined conditions.
Args:
tokens (List[int]): The current sequence of generated tokens.
stop_id_sequences (List[List[[int]]): A list of integer lists, each representing a sequence of token IDs.
If the end of the `tokens` list matches any of these sequences, the generation should stop.
eos_token_id (Union[int, None]): The token ID that represents the end-of-sequence. If the last token in `tokens` matches this,
the generation should stop.
Returns:
StopCondition: A named tuple indicating whether the stop condition has been met (`stop_met`)
and how many tokens should be trimmed from the end if it has (`trim_length`).
"""
if tokens and tokens[-1] == eos_token_id:
return StopCondition(stop_met=True, trim_length=1)
for stop_ids in stop_id_sequences:
if len(tokens) >= len(stop_ids):
if tokens[-len(stop_ids) :] == stop_ids:
return StopCondition(stop_met=True, trim_length=len(stop_ids))
return StopCondition(stop_met=False, trim_length=0) | Determines whether the token generation should stop based on predefined conditions. Args: tokens (List[int]): The current sequence of generated tokens. stop_id_sequences (List[List[[int]]): A list of integer lists, each representing a sequence of token IDs. If the end of the `tokens` list matches any of these sequences, the generation should stop. eos_token_id (Union[int, None]): The token ID that represents the end-of-sequence. If the last token in `tokens` matches this, the generation should stop. Returns: StopCondition: A named tuple indicating whether the stop condition has been met (`stop_met`) and how many tokens should be trimmed from the end if it has (`trim_length`). |
17,796 | import argparse
import json
import time
import uuid
import warnings
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import List, Literal, NamedTuple, Optional, Union
import mlx.core as mx
import mlx.nn as nn
from transformers import PreTrainedTokenizer
from .utils import generate_step, load
def convert_chat(messages: List[dict], role_mapping: Optional[dict] = None):
default_role_mapping = {
"system_prompt": "A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.",
"system": "ASSISTANT's RULE: ",
"user": "USER: ",
"assistant": "ASSISTANT: ",
"stop": "\n",
}
role_mapping = role_mapping if role_mapping is not None else default_role_mapping
prompt = ""
for line in messages:
role_prefix = role_mapping.get(line["role"], "")
stop = role_mapping.get("stop", "")
content = line.get("content", "")
prompt += f"{role_prefix}{content}{stop}"
prompt += role_mapping.get("assistant", "")
return prompt.rstrip() | null |
17,797 | import argparse
import json
import time
import uuid
import warnings
from http.server import BaseHTTPRequestHandler, HTTPServer
from typing import List, Literal, NamedTuple, Optional, Union
import mlx.core as mx
import mlx.nn as nn
from transformers import PreTrainedTokenizer
from .utils import generate_step, load
class APIHandler(BaseHTTPRequestHandler):
def __init__(self, *args, **kwargs):
def _set_completion_headers(self, status_code: int = 200):
def _set_stream_headers(self, status_code: int = 200):
def do_OPTIONS(self):
def do_POST(self):
def generate_response(
self,
text: str,
finish_reason: Union[Literal["length", "stop"], None],
prompt_token_count: Optional[int] = None,
completion_token_count: Optional[int] = None,
) -> dict:
def handle_completion(
self,
prompt: mx.array,
stop_id_sequences: List[List[int]],
):
def handle_stream(
self,
prompt: mx.array,
stop_id_sequences: List[List[int]],
):
def handle_chat_completions(self) -> mx.array:
def handle_text_completions(self) -> mx.array:
def run(host: str, port: int, server_class=HTTPServer, handler_class=APIHandler):
server_address = (host, port)
httpd = server_class(server_address, handler_class)
warnings.warn(
"mlx_lm.server is not recommended for production as "
"it only implements basic security checks."
)
print(f"Starting httpd at {host} on port {port}...")
httpd.serve_forever() | null |
17,798 | import argparse
import json
import math
import re
import types
from pathlib import Path
import mlx.optimizers as optim
import numpy as np
import yaml
from mlx.utils import tree_flatten
from .tuner.trainer import TrainingArgs, TrainingCallback, evaluate, train
from .tuner.utils import linear_to_lora_layers
from .utils import load
def build_parser():
parser = argparse.ArgumentParser(description="LoRA or QLoRA finetuning.")
parser.add_argument(
"--model",
help="The path to the local model directory or Hugging Face repo.",
)
# Training args
parser.add_argument(
"--train",
action="store_true",
help="Do training",
)
parser.add_argument(
"--data",
type=str,
help="Directory with {train, valid, test}.jsonl files",
)
parser.add_argument(
"--lora-layers",
type=int,
help="Number of layers to fine-tune",
)
parser.add_argument("--batch-size", type=int, help="Minibatch size.")
parser.add_argument("--iters", type=int, help="Iterations to train for.")
parser.add_argument(
"--val-batches",
type=int,
help="Number of validation batches, -1 uses the entire validation set.",
)
parser.add_argument("--learning-rate", type=float, help="Adam learning rate.")
parser.add_argument(
"--steps-per-report",
type=int,
help="Number of training steps between loss reporting.",
)
parser.add_argument(
"--steps-per-eval",
type=int,
help="Number of training steps between validations.",
)
parser.add_argument(
"--resume-adapter-file",
type=str,
help="Load path to resume training with the given adapter weights.",
)
parser.add_argument(
"--adapter-file",
type=str,
help="Save/load path for the trained adapter weights.",
)
parser.add_argument(
"--save-every",
type=int,
help="Save the model every N iterations.",
)
parser.add_argument(
"--test",
action="store_true",
help="Evaluate on the test set after training",
)
parser.add_argument(
"--test-batches",
type=int,
help="Number of test set batches, -1 uses the entire test set.",
)
parser.add_argument(
"--max-seq-length",
type=int,
help="Maximum sequence length.",
)
parser.add_argument(
"-c",
"--config",
default=None,
help="A YAML configuration file with the training options",
)
parser.add_argument(
"--grad-checkpoint",
action="store_true",
help="Use gradient checkpointing to reduce memory use.",
)
parser.add_argument("--seed", type=int, default=0, help="The PRNG seed")
return parser | null |
17,799 | import argparse
import json
import math
import re
import types
from pathlib import Path
import mlx.optimizers as optim
import numpy as np
import yaml
from mlx.utils import tree_flatten
from .tuner.trainer import TrainingArgs, TrainingCallback, evaluate, train
from .tuner.utils import linear_to_lora_layers
from .utils import load
def load_dataset(args):
names = ("train", "valid", "test")
train, valid, test = (Dataset(Path(args.data) / f"{n}.jsonl") for n in names)
if args.train and len(train) == 0:
raise ValueError(
"Training set not found or empty. Must provide training set for fine-tuning."
)
if args.train and len(valid) == 0:
raise ValueError(
"Validation set not found or empty. Must provide validation set for fine-tuning."
)
if args.test and len(test) == 0:
raise ValueError(
"Test set not found or empty. Must provide test set for evaluation."
)
return train, valid, test
def print_trainable_parameters(model):
total_p = sum(v.size for _, v in tree_flatten(model.parameters())) / 10**6
trainable_p = (
sum(v.size for _, v in tree_flatten(model.trainable_parameters())) / 10**6
)
print(
f"Trainable parameters: {(trainable_p * 100 / total_p):.3f}% "
f"({trainable_p:.3f}M/{total_p:.3f}M)"
)
class TrainingArgs:
lora_layers: int = field(
default=16, metadata={"help": "Number of layers to fine-tune"}
)
batch_size: int = field(default=4, metadata={"help": "Minibatch size."})
iters: int = field(default=100, metadata={"help": "Iterations to train for."})
val_batches: int = field(
default=25,
metadata={
"help": "Number of validation batches, -1 uses the entire validation set."
},
)
steps_per_report: int = field(
default=10,
metadata={"help": "Number of training steps between loss reporting."},
)
steps_per_eval: int = field(
default=200, metadata={"help": "Number of training steps between validations."}
)
steps_per_save: int = field(
default=100, metadata={"help": "Save the model every number steps"}
)
max_seq_length: int = field(
default=2048, metadata={"help": "Maximum sequence length."}
)
adapter_file: str = field(
default="adapter.npz",
metadata={"help": "Save/load path for the trained adapter weights."},
)
grad_checkpoint: bool = field(
default=False,
metadata={"help": "Use gradient checkpointing to reduce memory use."},
)
def evaluate(
model,
dataset,
tokenizer,
batch_size,
num_batches,
max_seq_length=2048,
loss: callable = default_loss,
iterate_batches: callable = iterate_batches,
):
all_losses = []
ntokens = 0
for it, batch in zip(
range(num_batches),
iterate_batches(
dataset=dataset,
tokenizer=tokenizer,
batch_size=batch_size,
max_seq_length=max_seq_length,
),
):
losses, toks = loss(model, *batch)
all_losses.append((losses * toks).item())
ntokens += toks.item()
return np.sum(all_losses) / ntokens
class TrainingCallback:
def on_train_loss_report(self, train_info: dict):
"""Called to report training loss at specified intervals."""
pass
def on_val_loss_report(self, val_info: dict):
"""Called to report validation loss at specified intervals or the beginning."""
pass
def train(
model,
tokenizer,
optimizer,
train_dataset,
val_dataset,
args: TrainingArgs = TrainingArgs(),
loss: callable = default_loss,
iterate_batches: callable = iterate_batches,
training_callback: TrainingCallback = None,
):
print(f"Starting training..., iters: {args.iters}")
def checkpoints_path(adapter_file) -> str:
checkpoints_path = Path("checkpoints")
if Path(adapter_file).parent:
checkpoints_path = Path(adapter_file).parent / "checkpoints"
checkpoints_path.mkdir(parents=True, exist_ok=True)
return str(checkpoints_path)
# Create checkpoints directory if it does not exist
adapter_path = checkpoints_path(args.adapter_file)
if args.grad_checkpoint:
grad_checkpoint(model.layers[0])
state = [model.state, optimizer.state]
def step(batch):
# Forward and backward pass
(lvalue, toks), grad = loss_value_and_grad(model, *batch)
# Model update
optimizer.update(model, grad)
return lvalue, toks
loss_value_and_grad = nn.value_and_grad(model, loss)
losses = []
n_tokens = 0
trained_tokens = 0
# Main training loop
start = time.perf_counter()
for it, batch in zip(
range(args.iters),
iterate_batches(
dataset=train_dataset,
tokenizer=tokenizer,
batch_size=args.batch_size,
max_seq_length=args.max_seq_length,
train=True,
),
):
lvalue, toks = step(batch)
mx.eval(state, lvalue, toks)
# Record loss
losses.append(lvalue.item())
n_tokens += toks.item()
# Report training loss if needed
if (it + 1) % args.steps_per_report == 0:
train_loss = np.mean(losses)
stop = time.perf_counter()
learning_rate = optimizer.learning_rate.item()
it_sec = args.steps_per_report / (stop - start)
tokens_sec = float(n_tokens) / (stop - start)
trained_tokens += n_tokens
peak_mem = mx.metal.get_peak_memory() / 2**30
print(
f"Iter {it + 1}: Train loss {train_loss:.3f}, "
f"Learning Rate {learning_rate:.3e}, "
f"It/sec {it_sec:.3f}, "
f"Tokens/sec {tokens_sec:.3f}, "
f"Trained Tokens {trained_tokens}, "
f"Peak mem {peak_mem:.3f} GB"
)
if training_callback is not None:
train_info = {
"iteration": it + 1,
"train_loss": train_loss,
"learning_rate": learning_rate,
"iterations_per_second": it_sec,
"tokens_per_second": tokens_sec,
"trained_tokens": trained_tokens,
"peak_memory": peak_mem,
}
training_callback.on_train_loss_report(train_info)
losses = []
n_tokens = 0
start = time.perf_counter()
# Report validation loss if needed
if it == 0 or (it + 1) % args.steps_per_eval == 0:
stop = time.perf_counter()
val_loss = evaluate(
model=model,
dataset=val_dataset,
loss=loss,
tokenizer=tokenizer,
batch_size=args.batch_size,
num_batches=args.val_batches,
max_seq_length=args.max_seq_length,
iterate_batches=iterate_batches,
)
val_time = time.perf_counter() - stop
print(
f"Iter {it + 1}: "
f"Val loss {val_loss:.3f}, "
f"Val took {val_time:.3f}s"
)
if training_callback is not None:
val_info = {
"iteration": it + 1,
"val_loss": val_loss,
"val_time": val_time,
}
training_callback.on_val_loss_report(val_info)
start = time.perf_counter()
# Save adapter weights if needed
if (it + 1) % args.steps_per_save == 0:
checkpoint_adapter_file = (
f"{adapter_path}/{it + 1}_{Path(args.adapter_file).name}"
)
save_adapter(model=model, adapter_file=checkpoint_adapter_file)
print(f"Iter {it + 1}: Saved adapter weights to {checkpoint_adapter_file}.")
# save final adapter weights
save_adapter(model=model, adapter_file=args.adapter_file)
print(f"Saved final adapter weights to {args.adapter_file}.")
def linear_to_lora_layers(
model: nn.Module,
num_lora_layers: int,
config: Dict,
):
"""
Convert some of the models linear layers to lora layers.
Args:
model (nn.Module): The neural network model.
num_lora_layers (int): The number of blocks to convert to lora layers
starting from the last layer.
config (dict): More configuration parameters for LoRA, including the
rank, alpha, scale, and optional layer keys.
"""
num_layers = len(model.layers)
if num_lora_layers > num_layers:
raise ValueError(
f"Requested {num_lora_layers} LoRA layers "
f"but the model only has {num_layers} layers."
)
to_lora = lambda lin: LoRALinear.from_linear(
lin, r=config["rank"], alpha=config["alpha"], scale=config["scale"]
)
keys = config.get("keys", None)
if keys is not None:
keys = set(keys)
elif model.model_type in [
"mistral",
"llama",
"phi",
"mixtral",
"stablelm",
"qwen2",
"gemma",
"starcoder2",
"cohere",
]:
keys = set(["self_attn.q_proj", "self_attn.v_proj"])
if model.model_type == "mixtral":
keys.add("block_sparse_moe.gate")
elif model.model_type == "olmo":
keys = set(["att_proj"])
elif model.model_type == "phi-msft":
keys = set(["mixer.Wqkv", "moe.gate"])
else:
raise ValueError(f"Lora does not support {model.model_type}")
for l in model.layers[num_layers - num_lora_layers :]:
modules = l.named_modules()
lora_layers = [(k, to_lora(m)) for k, m in l.named_modules() if k in keys]
l.update_modules(tree_unflatten(lora_layers))
def load(
path_or_hf_repo: str,
tokenizer_config={},
adapter_file: Optional[str] = None,
lazy: bool = False,
) -> Tuple[nn.Module, PreTrainedTokenizer]:
"""
Load the model and tokenizer from a given path or a huggingface repository.
Args:
path_or_hf_repo (Path): The path or the huggingface repository to load the model from.
tokenizer_config (dict, optional): Configuration parameters specifically for the tokenizer.
Defaults to an empty dictionary.
adapter_file (str, optional): Path to the adapter file. If provided, applies LoRA layers to the model.
Defaults to None.
lazy (bool): If False eval the model parameters to make sure they are
loaded in memory before returning, otherwise they will be loaded
when needed. Default: ``False``
Returns:
Tuple[nn.Module, PreTrainedTokenizer]: A tuple containing the loaded model and tokenizer.
Raises:
FileNotFoundError: If config file or safetensors are not found.
ValueError: If model class or args class are not found.
"""
model_path = get_model_path(path_or_hf_repo)
model = load_model(model_path, lazy)
if adapter_file is not None:
model = apply_lora_layers(model, adapter_file)
model.eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, **tokenizer_config)
return model, tokenizer
def run(args, training_callback: TrainingCallback = None):
np.random.seed(args.seed)
print("Loading pretrained model")
model, tokenizer = load(args.model)
# Freeze all layers
model.freeze()
# Convert linear layers to lora layers and unfreeze in the process
linear_to_lora_layers(model, args.lora_layers, args.lora_parameters)
print_trainable_parameters(model)
print("Loading datasets")
train_set, valid_set, test_set = load_dataset(args)
# Resume training the given adapters.
if args.resume_adapter_file is not None:
print(f"Loading pretrained adapters from {args.resume_adapter_file}")
model.load_weights(args.resume_adapter_file, strict=False)
if args.train:
print("Training")
# init training args
training_args = TrainingArgs(
batch_size=args.batch_size,
iters=args.iters,
val_batches=args.val_batches,
steps_per_report=args.steps_per_report,
steps_per_eval=args.steps_per_eval,
steps_per_save=args.save_every,
adapter_file=args.adapter_file,
max_seq_length=args.max_seq_length,
grad_checkpoint=args.grad_checkpoint,
)
model.train()
opt = optim.Adam(learning_rate=args.learning_rate)
# Train model
train(
model=model,
tokenizer=tokenizer,
args=training_args,
optimizer=opt,
train_dataset=train_set,
val_dataset=valid_set,
training_callback=training_callback,
)
# Load the LoRA adapter weights which we assume should exist by this point
if not Path(args.adapter_file).is_file():
raise ValueError(
f"Adapter file {args.adapter_file} missing. "
"Use --train to learn and save the adapters.npz."
)
model.load_weights(args.adapter_file, strict=False)
if args.test:
print("Testing")
model.eval()
test_loss = evaluate(
model=model,
dataset=test_set,
tokenizer=tokenizer,
batch_size=args.batch_size,
num_batches=args.test_batches,
)
test_ppl = math.exp(test_loss)
print(f"Test loss {test_loss:.3f}, Test ppl {test_ppl:.3f}.")
if args.prompt is not None:
raise NotImplementedError(
"Please use mlx_lm.generate with trained adapter for generation."
) | null |
17,800 | import argparse
import mlx.core as mx
from .utils import generate, load
DEFAULT_PROMPT = "hello"
DEFAULT_MAX_TOKENS = 100
DEFAULT_TEMP = 0.6
DEFAULT_TOP_P = 1.0
DEFAULT_SEED = 0
The provided code snippet includes necessary dependencies for implementing the `setup_arg_parser` function. Write a Python function `def setup_arg_parser()` to solve the following problem:
Set up and return the argument parser.
Here is the function:
def setup_arg_parser():
"""Set up and return the argument parser."""
parser = argparse.ArgumentParser(description="LLM inference script")
parser.add_argument(
"--model",
type=str,
default="mlx_model",
help="The path to the local model directory or Hugging Face repo.",
)
parser.add_argument(
"--adapter-file",
type=str,
help="Optional path for the trained adapter weights.",
)
parser.add_argument(
"--trust-remote-code",
action="store_true",
help="Enable trusting remote code for tokenizer",
)
parser.add_argument(
"--eos-token",
type=str,
default=None,
help="End of sequence token for tokenizer",
)
parser.add_argument(
"--prompt", default=DEFAULT_PROMPT, help="Message to be processed by the model"
)
parser.add_argument(
"--max-tokens",
"-m",
type=int,
default=DEFAULT_MAX_TOKENS,
help="Maximum number of tokens to generate",
)
parser.add_argument(
"--temp", type=float, default=DEFAULT_TEMP, help="Sampling temperature"
)
parser.add_argument(
"--top-p", type=float, default=DEFAULT_TOP_P, help="Sampling top-p"
)
parser.add_argument("--seed", type=int, default=DEFAULT_SEED, help="PRNG seed")
parser.add_argument(
"--ignore-chat-template",
action="store_true",
help="Use the raw prompt without the tokenizer's chat template.",
)
parser.add_argument(
"--colorize",
action="store_true",
help="Colorize output based on T[0] probability",
)
return parser | Set up and return the argument parser. |
17,801 | import argparse
import mlx.core as mx
from .utils import generate, load
def colorprint(color, s):
def colorprint_by_t0(s, t0):
if t0 > 0.95:
color = "white"
elif t0 > 0.70:
color = "green"
elif t0 > 0.30:
color = "yellow"
else:
color = "red"
colorprint(color, s) | null |
17,802 | from functools import partial
import mlx.core as mx
import mlx.nn as nn
def rms_norm(x, weight, eps):
x = x.astype(mx.float32)
x = x * mx.rsqrt(x.square().mean(-1, keepdims=True) + eps)
return weight * x.astype(weight.dtype) | null |
17,803 | from functools import partial
import mlx.core as mx
import mlx.nn as nn
The provided code snippet includes necessary dependencies for implementing the `ln_norm` function. Write a Python function `def ln_norm(x, eps, weight=None, bias=None)` to solve the following problem:
Layer normalization for input tensor x. Args: x (np.ndarray): Input tensor. eps (float, optional): Small value to avoid division by zero. weight (np.ndarray, optional): Weight tensor for normalization. bias (np.ndarray, optional): Bias tensor for normalization. Returns: np.ndarray: Normalized tensor.
Here is the function:
def ln_norm(x, eps, weight=None, bias=None):
"""
Layer normalization for input tensor x.
Args:
x (np.ndarray): Input tensor.
eps (float, optional): Small value to avoid division by zero.
weight (np.ndarray, optional): Weight tensor for normalization.
bias (np.ndarray, optional): Bias tensor for normalization.
Returns:
np.ndarray: Normalized tensor.
"""
t = x.dtype
x = x.astype(mx.float32)
# Compute mean and variance along the last dimension
means = mx.mean(x, axis=-1, keepdims=True)
var = mx.var(x, axis=-1, keepdims=True)
# Normalize the input tensor
x = (x - means) * mx.rsqrt(var + eps)
x = x.astype(t)
# Apply weight and bias if provided
if weight is not None:
x = x * weight
if bias is not None:
x = x + bias
return x | Layer normalization for input tensor x. Args: x (np.ndarray): Input tensor. eps (float, optional): Small value to avoid division by zero. weight (np.ndarray, optional): Weight tensor for normalization. bias (np.ndarray, optional): Bias tensor for normalization. Returns: np.ndarray: Normalized tensor. |
17,804 | from dataclasses import dataclass
from functools import partial
from typing import Dict, Optional, Tuple, Union
import mlx.core as mx
import mlx.nn as nn
from .base import BaseModelArgs
def rms_norm(x, weight, eps):
x = x.astype(mx.float32)
x = x * mx.rsqrt(x.square().mean(-1, keepdims=True) + eps)
return (1.0 + weight) * x.astype(weight.dtype) | null |
17,805 | import argparse
import time
from functools import partial
import mlx.core as mx
import mlx.nn as nn
import mlx.optimizers as optim
import resnet
from dataset import get_cifar10
def train_epoch(model, train_iter, optimizer, epoch):
def train_step(model, inp, tgt):
output = model(inp)
loss = mx.mean(nn.losses.cross_entropy(output, tgt))
acc = mx.mean(mx.argmax(output, axis=1) == tgt)
return loss, acc
losses = []
accs = []
samples_per_sec = []
state = [model.state, optimizer.state]
@partial(mx.compile, inputs=state, outputs=state)
def step(inp, tgt):
train_step_fn = nn.value_and_grad(model, train_step)
(loss, acc), grads = train_step_fn(model, inp, tgt)
optimizer.update(model, grads)
return loss, acc
for batch_counter, batch in enumerate(train_iter):
x = mx.array(batch["image"])
y = mx.array(batch["label"])
tic = time.perf_counter()
loss, acc = step(x, y)
mx.eval(state)
toc = time.perf_counter()
loss = loss.item()
acc = acc.item()
losses.append(loss)
accs.append(acc)
throughput = x.shape[0] / (toc - tic)
samples_per_sec.append(throughput)
if batch_counter % 10 == 0:
print(
" | ".join(
(
f"Epoch {epoch:02d} [{batch_counter:03d}]",
f"Train loss {loss:.3f}",
f"Train acc {acc:.3f}",
f"Throughput: {throughput:.2f} images/second",
)
)
)
mean_tr_loss = mx.mean(mx.array(losses))
mean_tr_acc = mx.mean(mx.array(accs))
samples_per_sec = mx.mean(mx.array(samples_per_sec))
return mean_tr_loss, mean_tr_acc, samples_per_sec | null |
17,806 | import argparse
import time
from functools import partial
import mlx.core as mx
import mlx.nn as nn
import mlx.optimizers as optim
import resnet
from dataset import get_cifar10
def eval_fn(model, inp, tgt):
return mx.mean(mx.argmax(model(inp), axis=1) == tgt)
def test_epoch(model, test_iter, epoch):
accs = []
for batch_counter, batch in enumerate(test_iter):
x = mx.array(batch["image"])
y = mx.array(batch["label"])
acc = eval_fn(model, x, y)
acc_value = acc.item()
accs.append(acc_value)
mean_acc = mx.mean(mx.array(accs))
return mean_acc | null |
17,807 | import numpy as np
from mlx.data.datasets import load_cifar10
def get_cifar10(batch_size, root=None):
tr = load_cifar10(root=root)
mean = np.array([0.485, 0.456, 0.406]).reshape((1, 1, 3))
std = np.array([0.229, 0.224, 0.225]).reshape((1, 1, 3))
def normalize(x):
x = x.astype("float32") / 255.0
return (x - mean) / std
tr_iter = (
tr.shuffle()
.to_stream()
.image_random_h_flip("image", prob=0.5)
.pad("image", 0, 4, 4, 0.0)
.pad("image", 1, 4, 4, 0.0)
.image_random_crop("image", 32, 32)
.key_transform("image", normalize)
.batch(batch_size)
.prefetch(4, 4)
)
test = load_cifar10(root=root, train=False)
test_iter = test.to_stream().key_transform("image", normalize).batch(batch_size)
return tr_iter, test_iter | null |
17,808 | from typing import Any
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_flatten
class Block(nn.Module):
"""
Implements a ResNet block with two convolutional layers and a skip connection.
As per the paper, CIFAR-10 uses Shortcut type-A skip connections. (See paper for details)
"""
def __init__(self, in_dims, dims, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm(dims)
self.conv2 = nn.Conv2d(
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm(dims)
if stride != 1:
self.shortcut = ShortcutA(dims)
else:
self.shortcut = None
def __call__(self, x):
out = nn.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.shortcut is None:
out += x
else:
out += self.shortcut(x)
out = nn.relu(out)
return out
class ResNet(nn.Module):
"""
Creates a ResNet model for CIFAR-10, as specified in the original paper.
"""
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm(16)
self.layer1 = self._make_layer(block, 16, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 16, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 32, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(in_dims, dims, stride))
in_dims = dims
return nn.Sequential(*layers)
def num_params(self):
nparams = sum(x.size for k, x in tree_flatten(self.parameters()))
return nparams
def __call__(self, x):
x = nn.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
x = self.linear(x)
return x
def resnet20(**kwargs):
return ResNet(Block, [3, 3, 3], **kwargs) | null |
17,809 | from typing import Any
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_flatten
class Block(nn.Module):
"""
Implements a ResNet block with two convolutional layers and a skip connection.
As per the paper, CIFAR-10 uses Shortcut type-A skip connections. (See paper for details)
"""
def __init__(self, in_dims, dims, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm(dims)
self.conv2 = nn.Conv2d(
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm(dims)
if stride != 1:
self.shortcut = ShortcutA(dims)
else:
self.shortcut = None
def __call__(self, x):
out = nn.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.shortcut is None:
out += x
else:
out += self.shortcut(x)
out = nn.relu(out)
return out
class ResNet(nn.Module):
"""
Creates a ResNet model for CIFAR-10, as specified in the original paper.
"""
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm(16)
self.layer1 = self._make_layer(block, 16, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 16, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 32, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(in_dims, dims, stride))
in_dims = dims
return nn.Sequential(*layers)
def num_params(self):
nparams = sum(x.size for k, x in tree_flatten(self.parameters()))
return nparams
def __call__(self, x):
x = nn.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
x = self.linear(x)
return x
def resnet32(**kwargs):
return ResNet(Block, [5, 5, 5], **kwargs) | null |
17,810 | from typing import Any
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_flatten
class Block(nn.Module):
"""
Implements a ResNet block with two convolutional layers and a skip connection.
As per the paper, CIFAR-10 uses Shortcut type-A skip connections. (See paper for details)
"""
def __init__(self, in_dims, dims, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm(dims)
self.conv2 = nn.Conv2d(
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm(dims)
if stride != 1:
self.shortcut = ShortcutA(dims)
else:
self.shortcut = None
def __call__(self, x):
out = nn.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.shortcut is None:
out += x
else:
out += self.shortcut(x)
out = nn.relu(out)
return out
class ResNet(nn.Module):
"""
Creates a ResNet model for CIFAR-10, as specified in the original paper.
"""
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm(16)
self.layer1 = self._make_layer(block, 16, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 16, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 32, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(in_dims, dims, stride))
in_dims = dims
return nn.Sequential(*layers)
def num_params(self):
nparams = sum(x.size for k, x in tree_flatten(self.parameters()))
return nparams
def __call__(self, x):
x = nn.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
x = self.linear(x)
return x
def resnet44(**kwargs):
return ResNet(Block, [7, 7, 7], **kwargs) | null |
17,811 | from typing import Any
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_flatten
class Block(nn.Module):
"""
Implements a ResNet block with two convolutional layers and a skip connection.
As per the paper, CIFAR-10 uses Shortcut type-A skip connections. (See paper for details)
"""
def __init__(self, in_dims, dims, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm(dims)
self.conv2 = nn.Conv2d(
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm(dims)
if stride != 1:
self.shortcut = ShortcutA(dims)
else:
self.shortcut = None
def __call__(self, x):
out = nn.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.shortcut is None:
out += x
else:
out += self.shortcut(x)
out = nn.relu(out)
return out
class ResNet(nn.Module):
"""
Creates a ResNet model for CIFAR-10, as specified in the original paper.
"""
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm(16)
self.layer1 = self._make_layer(block, 16, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 16, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 32, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(in_dims, dims, stride))
in_dims = dims
return nn.Sequential(*layers)
def num_params(self):
nparams = sum(x.size for k, x in tree_flatten(self.parameters()))
return nparams
def __call__(self, x):
x = nn.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
x = self.linear(x)
return x
def resnet56(**kwargs):
return ResNet(Block, [9, 9, 9], **kwargs) | null |
17,812 | from typing import Any
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_flatten
class Block(nn.Module):
"""
Implements a ResNet block with two convolutional layers and a skip connection.
As per the paper, CIFAR-10 uses Shortcut type-A skip connections. (See paper for details)
"""
def __init__(self, in_dims, dims, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm(dims)
self.conv2 = nn.Conv2d(
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm(dims)
if stride != 1:
self.shortcut = ShortcutA(dims)
else:
self.shortcut = None
def __call__(self, x):
out = nn.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.shortcut is None:
out += x
else:
out += self.shortcut(x)
out = nn.relu(out)
return out
class ResNet(nn.Module):
"""
Creates a ResNet model for CIFAR-10, as specified in the original paper.
"""
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm(16)
self.layer1 = self._make_layer(block, 16, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 16, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 32, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(in_dims, dims, stride))
in_dims = dims
return nn.Sequential(*layers)
def num_params(self):
nparams = sum(x.size for k, x in tree_flatten(self.parameters()))
return nparams
def __call__(self, x):
x = nn.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
x = self.linear(x)
return x
def resnet110(**kwargs):
return ResNet(Block, [18, 18, 18], **kwargs) | null |
17,813 | from typing import Any
import mlx.core as mx
import mlx.nn as nn
from mlx.utils import tree_flatten
class Block(nn.Module):
"""
Implements a ResNet block with two convolutional layers and a skip connection.
As per the paper, CIFAR-10 uses Shortcut type-A skip connections. (See paper for details)
"""
def __init__(self, in_dims, dims, stride=1):
super().__init__()
self.conv1 = nn.Conv2d(
in_dims, dims, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm(dims)
self.conv2 = nn.Conv2d(
dims, dims, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm(dims)
if stride != 1:
self.shortcut = ShortcutA(dims)
else:
self.shortcut = None
def __call__(self, x):
out = nn.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if self.shortcut is None:
out += x
else:
out += self.shortcut(x)
out = nn.relu(out)
return out
class ResNet(nn.Module):
"""
Creates a ResNet model for CIFAR-10, as specified in the original paper.
"""
def __init__(self, block, num_blocks, num_classes=10):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm(16)
self.layer1 = self._make_layer(block, 16, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 16, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 32, 64, num_blocks[2], stride=2)
self.linear = nn.Linear(64, num_classes)
def _make_layer(self, block, in_dims, dims, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(in_dims, dims, stride))
in_dims = dims
return nn.Sequential(*layers)
def num_params(self):
nparams = sum(x.size for k, x in tree_flatten(self.parameters()))
return nparams
def __call__(self, x):
x = nn.relu(self.bn1(self.conv1(x)))
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = mx.mean(x, axis=[1, 2]).reshape(x.shape[0], -1)
x = self.linear(x)
return x
def resnet1202(**kwargs):
return ResNet(Block, [200, 200, 200], **kwargs) | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.