id
int64
0
190k
prompt
stringlengths
21
13.4M
docstring
stringlengths
1
12k
17,814
import argparse import time from functools import partial import kwt import mlx.core as mx import mlx.data as dx import mlx.nn as nn import mlx.optimizers as optim from mlx.data.datasets import load_speechcommands from mlx.data.features import mfsc def prepare_dataset(batch_size, split, root=None): def normalize(x): return (x - x.mean()) / x.std() data = load_speechcommands(split=split, root=root) data_iter = ( data.squeeze("audio") .key_transform( "audio", mfsc( 40, 16000, frame_size_ms=30, frame_stride_ms=10, high_freq=7600, low_freq=20, ), ) .key_transform("audio", normalize) .shuffle() .batch(batch_size) .to_stream() .prefetch(4, 4) ) return data_iter
null
17,815
import argparse import time from functools import partial import kwt import mlx.core as mx import mlx.data as dx import mlx.nn as nn import mlx.optimizers as optim from mlx.data.datasets import load_speechcommands from mlx.data.features import mfsc def train_epoch(model, train_iter, optimizer, epoch): def train_step(model, x, y): output = model(x) loss = mx.mean(nn.losses.cross_entropy(output, y)) acc = mx.mean(mx.argmax(output, axis=1) == y) return loss, acc state = [model.state, optimizer.state] @partial(mx.compile, inputs=state, outputs=state) def step(x, y): (loss, acc), grads = nn.value_and_grad(model, train_step)(model, x, y) optimizer.update(model, grads) return loss, acc losses = [] accs = [] samples_per_sec = [] model.train(True) for batch_counter, batch in enumerate(train_iter): x = mx.array(batch["audio"]) y = mx.array(batch["label"]) tic = time.perf_counter() loss, acc = step(x, y) mx.eval(state) toc = time.perf_counter() loss = loss.item() acc = acc.item() losses.append(loss) accs.append(acc) throughput = x.shape[0] / (toc - tic) samples_per_sec.append(throughput) if batch_counter % 25 == 0: print( " | ".join( ( f"Epoch {epoch:02d} [{batch_counter:03d}]", f"Train loss {loss:.3f}", f"Train acc {acc:.3f}", f"Throughput: {throughput:.2f} samples/second", ) ) ) mean_tr_loss = mx.mean(mx.array(losses)) mean_tr_acc = mx.mean(mx.array(accs)) samples_per_sec = mx.mean(mx.array(samples_per_sec)) return mean_tr_loss, mean_tr_acc, samples_per_sec
null
17,816
import argparse import time from functools import partial import kwt import mlx.core as mx import mlx.data as dx import mlx.nn as nn import mlx.optimizers as optim from mlx.data.datasets import load_speechcommands from mlx.data.features import mfsc def eval_fn(model, x, y): return mx.mean(mx.argmax(model(x), axis=1) == y) def test_epoch(model, test_iter): model.train(False) accs = [] throughput = [] for batch_counter, batch in enumerate(test_iter): x = mx.array(batch["audio"]) y = mx.array(batch["label"]) tic = time.perf_counter() acc = eval_fn(model, x, y) accs.append(acc.item()) toc = time.perf_counter() throughput.append(x.shape[0] / (toc - tic)) mean_acc = mx.mean(mx.array(accs)) mean_throughput = mx.mean(mx.array(throughput)) return mean_acc, mean_throughput
null
17,817
from typing import Any import mlx.core as mx import mlx.nn as nn from mlx.utils import tree_flatten class KWT(nn.Module): """ Implements the Keyword Transformer (KWT) [1] model. KWT is essentially a vision transformer [2] with minor modifications: - Instead of square patches, KWT uses rectangular patches -> a patch across frequency for every timestep - KWT modules apply layer normalization after attention/feedforward layers [1] https://arxiv.org/abs/2104.11178 [2] https://arxiv.org/abs/2010.11929 Parameters ---------- input_res: tuple of ints Input resolution (time, frequency) patch_res: tuple of ints Patch resolution (time, frequency) num_classes: int Number of classes dim: int Model Embedding dimension depth: int Number of transformer layers heads: int Number of attention heads mlp_dim: int Feedforward hidden dimension pool: str Pooling type, either "cls" or "mean" in_channels: int, optional Number of input channels dropout: float, optional Dropout rate emb_dropout: float, optional Embedding dropout rate """ def __init__( self, input_res, patch_res, num_classes, dim, depth, heads, mlp_dim, pool="mean", in_channels=1, dropout=0.0, emb_dropout=0.0, ): super().__init__() self.num_patches = int( (input_res[0] / patch_res[0]) * (input_res[1] / patch_res[1]) ) self.dim = dim self.patch_embedding = nn.Conv2d( in_channels, dim, kernel_size=patch_res, stride=patch_res ) self.pos_embedding = mx.random.truncated_normal( -0.01, 0.01, (self.num_patches + 1, dim), ) self.cls_token = mx.random.truncated_normal(-0.01, 0.01, (dim,)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, mlp_dim, dropout) self.pool = pool self.mlp_head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, num_classes)) def num_params(self): nparams = sum(x.size for k, x in tree_flatten(self.parameters())) return nparams def __call__(self, x): if x.ndim != 4: x = mx.expand_dims(x, axis=-1) x = self.patch_embedding(x) x = x.reshape(x.shape[0], -1, self.dim) assert x.shape[1] == self.num_patches cls_tokens = mx.broadcast_to(self.cls_token, (x.shape[0], 1, self.dim)) x = mx.concatenate((cls_tokens, x), axis=1) x = x + self.pos_embedding x = self.dropout(x) x = self.transformer(x) x = x.mean(axis=1) if self.pool == "mean" else x[:, 0] x = self.mlp_head(x) return x def parse_kwt_args(**kwargs): input_res = kwargs.pop("input_res", [98, 40]) patch_res = kwargs.pop("patch_res", [1, 40]) num_classes = kwargs.pop("num_classes", 35) emb_dropout = kwargs.pop("emb_dropout", 0.1) return input_res, patch_res, num_classes, emb_dropout, kwargs def kwt1(**kwargs): input_res, patch_res, num_classes, emb_dropout, kwargs = parse_kwt_args(**kwargs) return KWT( input_res, patch_res, num_classes, dim=64, depth=12, heads=1, mlp_dim=256, emb_dropout=emb_dropout, **kwargs )
null
17,818
from typing import Any import mlx.core as mx import mlx.nn as nn from mlx.utils import tree_flatten class KWT(nn.Module): """ Implements the Keyword Transformer (KWT) [1] model. KWT is essentially a vision transformer [2] with minor modifications: - Instead of square patches, KWT uses rectangular patches -> a patch across frequency for every timestep - KWT modules apply layer normalization after attention/feedforward layers [1] https://arxiv.org/abs/2104.11178 [2] https://arxiv.org/abs/2010.11929 Parameters ---------- input_res: tuple of ints Input resolution (time, frequency) patch_res: tuple of ints Patch resolution (time, frequency) num_classes: int Number of classes dim: int Model Embedding dimension depth: int Number of transformer layers heads: int Number of attention heads mlp_dim: int Feedforward hidden dimension pool: str Pooling type, either "cls" or "mean" in_channels: int, optional Number of input channels dropout: float, optional Dropout rate emb_dropout: float, optional Embedding dropout rate """ def __init__( self, input_res, patch_res, num_classes, dim, depth, heads, mlp_dim, pool="mean", in_channels=1, dropout=0.0, emb_dropout=0.0, ): super().__init__() self.num_patches = int( (input_res[0] / patch_res[0]) * (input_res[1] / patch_res[1]) ) self.dim = dim self.patch_embedding = nn.Conv2d( in_channels, dim, kernel_size=patch_res, stride=patch_res ) self.pos_embedding = mx.random.truncated_normal( -0.01, 0.01, (self.num_patches + 1, dim), ) self.cls_token = mx.random.truncated_normal(-0.01, 0.01, (dim,)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, mlp_dim, dropout) self.pool = pool self.mlp_head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, num_classes)) def num_params(self): nparams = sum(x.size for k, x in tree_flatten(self.parameters())) return nparams def __call__(self, x): if x.ndim != 4: x = mx.expand_dims(x, axis=-1) x = self.patch_embedding(x) x = x.reshape(x.shape[0], -1, self.dim) assert x.shape[1] == self.num_patches cls_tokens = mx.broadcast_to(self.cls_token, (x.shape[0], 1, self.dim)) x = mx.concatenate((cls_tokens, x), axis=1) x = x + self.pos_embedding x = self.dropout(x) x = self.transformer(x) x = x.mean(axis=1) if self.pool == "mean" else x[:, 0] x = self.mlp_head(x) return x def parse_kwt_args(**kwargs): input_res = kwargs.pop("input_res", [98, 40]) patch_res = kwargs.pop("patch_res", [1, 40]) num_classes = kwargs.pop("num_classes", 35) emb_dropout = kwargs.pop("emb_dropout", 0.1) return input_res, patch_res, num_classes, emb_dropout, kwargs def kwt2(**kwargs): input_res, patch_res, num_classes, emb_dropout, kwargs = parse_kwt_args(**kwargs) return KWT( input_res, patch_res, num_classes, dim=128, depth=12, heads=2, mlp_dim=512, emb_dropout=emb_dropout, **kwargs )
null
17,819
from typing import Any import mlx.core as mx import mlx.nn as nn from mlx.utils import tree_flatten class KWT(nn.Module): """ Implements the Keyword Transformer (KWT) [1] model. KWT is essentially a vision transformer [2] with minor modifications: - Instead of square patches, KWT uses rectangular patches -> a patch across frequency for every timestep - KWT modules apply layer normalization after attention/feedforward layers [1] https://arxiv.org/abs/2104.11178 [2] https://arxiv.org/abs/2010.11929 Parameters ---------- input_res: tuple of ints Input resolution (time, frequency) patch_res: tuple of ints Patch resolution (time, frequency) num_classes: int Number of classes dim: int Model Embedding dimension depth: int Number of transformer layers heads: int Number of attention heads mlp_dim: int Feedforward hidden dimension pool: str Pooling type, either "cls" or "mean" in_channels: int, optional Number of input channels dropout: float, optional Dropout rate emb_dropout: float, optional Embedding dropout rate """ def __init__( self, input_res, patch_res, num_classes, dim, depth, heads, mlp_dim, pool="mean", in_channels=1, dropout=0.0, emb_dropout=0.0, ): super().__init__() self.num_patches = int( (input_res[0] / patch_res[0]) * (input_res[1] / patch_res[1]) ) self.dim = dim self.patch_embedding = nn.Conv2d( in_channels, dim, kernel_size=patch_res, stride=patch_res ) self.pos_embedding = mx.random.truncated_normal( -0.01, 0.01, (self.num_patches + 1, dim), ) self.cls_token = mx.random.truncated_normal(-0.01, 0.01, (dim,)) self.dropout = nn.Dropout(emb_dropout) self.transformer = Transformer(dim, depth, heads, mlp_dim, dropout) self.pool = pool self.mlp_head = nn.Sequential(nn.LayerNorm(dim), nn.Linear(dim, num_classes)) def num_params(self): nparams = sum(x.size for k, x in tree_flatten(self.parameters())) return nparams def __call__(self, x): if x.ndim != 4: x = mx.expand_dims(x, axis=-1) x = self.patch_embedding(x) x = x.reshape(x.shape[0], -1, self.dim) assert x.shape[1] == self.num_patches cls_tokens = mx.broadcast_to(self.cls_token, (x.shape[0], 1, self.dim)) x = mx.concatenate((cls_tokens, x), axis=1) x = x + self.pos_embedding x = self.dropout(x) x = self.transformer(x) x = x.mean(axis=1) if self.pool == "mean" else x[:, 0] x = self.mlp_head(x) return x def parse_kwt_args(**kwargs): input_res = kwargs.pop("input_res", [98, 40]) patch_res = kwargs.pop("patch_res", [1, 40]) num_classes = kwargs.pop("num_classes", 35) emb_dropout = kwargs.pop("emb_dropout", 0.1) return input_res, patch_res, num_classes, emb_dropout, kwargs def kwt3(**kwargs): input_res, patch_res, num_classes, emb_dropout, kwargs = parse_kwt_args(**kwargs) return KWT( input_res, patch_res, num_classes, dim=192, depth=12, heads=3, mlp_dim=768, emb_dropout=emb_dropout, **kwargs )
null
17,820
from functools import partial import matplotlib.pyplot as plt import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np from flows import RealNVP from sklearn import datasets, preprocessing from tqdm import trange The provided code snippet includes necessary dependencies for implementing the `get_moons_dataset` function. Write a Python function `def get_moons_dataset(n_samples=100_000, noise=0.06)` to solve the following problem: Get two moons dataset with given noise level. Here is the function: def get_moons_dataset(n_samples=100_000, noise=0.06): """Get two moons dataset with given noise level.""" x, _ = datasets.make_moons(n_samples=n_samples, noise=noise) scaler = preprocessing.StandardScaler() x = scaler.fit_transform(x) return x
Get two moons dataset with given noise level.
17,821
import argparse import copy import hashlib import json import os import urllib import warnings from dataclasses import asdict from pathlib import Path from typing import List import mlx.core as mx import mlx.nn as nn import numpy as np import torch from mlx.utils import tree_flatten, tree_map, tree_unflatten from tqdm import tqdm from whisper import torch_whisper from whisper.whisper import ModelDimensions, Whisper _MODELS = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large-v1": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large-v1.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", "large-v3": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", } _ALIGNMENT_HEADS = { "tiny.en": b"ABzY8J1N>@0{>%R00Bk>$p{7v037`oCl~+#00", "tiny": b"ABzY8bu8Lr0{>%RKn9Fp%m@SkK7Kt=7ytkO", "base.en": b"ABzY8;40c<0{>%RzzG;p*o+Vo09|#PsxSZm00", "base": b"ABzY8KQ!870{>%RzyTQH3`Q^yNP!>##QT-<FaQ7m", "small.en": b"ABzY8>?_)10{>%RpeA61k&I|OI3I$65C{;;pbCHh0B{qLQ;+}v00", "small": b"ABzY8DmU6=0{>%Rpa?J`kvJ6qF(V^F86#Xh7JUGMK}P<N0000", "medium.en": b"ABzY8usPae0{>%R7<zz_OvQ{)4kMa0BMw6u5rT}kRKX;$NfYBv00*Hl@qhsU00", "medium": b"ABzY8B0Jh+0{>%R7}kK1fFL7w6%<-Pf*t^=N)Qr&0RR9", "large-v1": b"ABzY8r9j$a0{>%R7#4sLmoOs{s)o3~84-RPdcFk!JR<kSfC2yj", "large-v2": b"ABzY8zd+h!0{>%R7=D0pU<_bnWW*tkYAhobTNnu$jnkEkXqp)j;w1Tzk)UH3X%SZd&fFZ2fC2yj", "large-v3": b"ABzY8gWO1E0{>%R7(9S+Kn!D~%ngiGaR?*L!iJG9p-nab0JQ=-{D1-g00", "large": b"ABzY8gWO1E0{>%R7(9S+Kn!D~%ngiGaR?*L!iJG9p-nab0JQ=-{D1-g00", } def _download(url: str, root: str) -> str: os.makedirs(root, exist_ok=True) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, os.path.basename(url)) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): with open(download_target, "rb") as f: model_bytes = f.read() if hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return download_target else: warnings.warn( f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024, ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() if hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return download_target def available_models() -> List[str]: """Returns the names of available models""" return list(_MODELS.keys()) The provided code snippet includes necessary dependencies for implementing the `load_torch_model` function. Write a Python function `def load_torch_model( name_or_path: str, download_root: str = None, ) -> torch_whisper.Whisper` to solve the following problem: Load a Whisper ASR model Parameters ---------- name_or_path : str one of the official model names listed by `whisper.available_models()` or a local Pytorch checkpoint which is in the original OpenAI format download_root: str path to download the model files; by default, it uses "~/.cache/whisper" Returns ------- model : Whisper The Whisper ASR model instance Here is the function: def load_torch_model( name_or_path: str, download_root: str = None, ) -> torch_whisper.Whisper: """ Load a Whisper ASR model Parameters ---------- name_or_path : str one of the official model names listed by `whisper.available_models()` or a local Pytorch checkpoint which is in the original OpenAI format download_root: str path to download the model files; by default, it uses "~/.cache/whisper" Returns ------- model : Whisper The Whisper ASR model instance """ if download_root is None: download_root = os.path.join(os.path.expanduser("~"), ".cache/whisper") # todo: accept alignment_heads of local Pytorch checkpoint alignment_heads = None if name_or_path in _MODELS: alignment_heads = _ALIGNMENT_HEADS[name_or_path] name_or_path = _download(_MODELS[name_or_path], download_root) elif not Path(name_or_path).is_file(): raise RuntimeError( f"Model {name_or_path} is neither found in {available_models()} nor as a local path" ) with open(name_or_path, "rb") as fp: checkpoint = torch.load(fp) dims = torch_whisper.ModelDimensions(**checkpoint["dims"]) model = torch_whisper.Whisper(dims) model.load_state_dict(checkpoint["model_state_dict"]) if alignment_heads is not None: model.set_alignment_heads(alignment_heads) return model
Load a Whisper ASR model Parameters ---------- name_or_path : str one of the official model names listed by `whisper.available_models()` or a local Pytorch checkpoint which is in the original OpenAI format download_root: str path to download the model files; by default, it uses "~/.cache/whisper" Returns ------- model : Whisper The Whisper ASR model instance
17,822
import argparse import copy import hashlib import json import os import urllib import warnings from dataclasses import asdict from pathlib import Path from typing import List import mlx.core as mx import mlx.nn as nn import numpy as np import torch from mlx.utils import tree_flatten, tree_map, tree_unflatten from tqdm import tqdm from whisper import torch_whisper from whisper.whisper import ModelDimensions, Whisper def convert(model, rules=None): params = {} if rules is not None and type(model) in rules: out = rules[type(model)](model, rules) return out if isinstance(model, torch.Tensor): return mx.array(model.detach().numpy()) if isinstance(model, torch.nn.ModuleList): return [convert(n, rules) for n in model.children()] if isinstance(model, torch.nn.Conv1d): return { "weight": convert(model.weight).transpose(0, 2, 1), "bias": convert(model.bias), } for k, n in model.named_children(): if k in rules: params.update(rules[k](n, rules)) else: params[k] = convert(n, rules) for k, p in model.named_parameters(recurse=False): params[k] = convert(p) return params def torch_to_mlx( torch_model: torch_whisper.Whisper, dtype: mx.Dtype = mx.float16, ) -> Whisper: def convert_rblock(model, rules): children = dict(model.named_children()) mlp = list(children.pop("mlp").children()) params = { "mlp1": convert(mlp[0], rules), "mlp2": convert(mlp[-1], rules), } for k, n in children.items(): params[k] = convert(n, rules) return params rules = { torch_whisper.ResidualAttentionBlock: convert_rblock, } params = convert(torch_model, rules) mlx_model = Whisper(torch_model.dims, dtype) params = tree_map(lambda p: p.astype(dtype), params) mlx_model.update(params) if (alignment_heads := getattr(torch_model, "alignment_heads", None)) is not None: mlx_model.set_alignment_heads(alignment_heads.indices().T.numpy()) return mlx_model
null
17,823
import argparse import copy import hashlib import json import os import urllib import warnings from dataclasses import asdict from pathlib import Path from typing import List import mlx.core as mx import mlx.nn as nn import numpy as np import torch from mlx.utils import tree_flatten, tree_map, tree_unflatten from tqdm import tqdm from whisper import torch_whisper from whisper.whisper import ModelDimensions, Whisper def upload_to_hub(path: str, name: str, torch_name_or_path: str): import os from huggingface_hub import HfApi, ModelCard, logging repo_id = f"mlx-community/{name}" text = f""" --- library_name: mlx --- # {name} This model was converted to MLX format from [`{torch_name_or_path}`](). ## Use with mlx ```bash git clone https://github.com/ml-explore/mlx-examples.git cd mlx-examples/whisper/ pip install -r requirements.txt >> import whisper >> whisper.transcribe("FILE_NAME") ``` """ card = ModelCard(text) card.save(os.path.join(path, "README.md")) logging.set_verbosity_info() api = HfApi() api.create_repo(repo_id=repo_id, exist_ok=True) api.upload_folder( folder_path=path, repo_id=repo_id, repo_type="model", )
null
17,824
import argparse import copy import hashlib import json import os import urllib import warnings from dataclasses import asdict from pathlib import Path from typing import List import mlx.core as mx import mlx.nn as nn import numpy as np import torch from mlx.utils import tree_flatten, tree_map, tree_unflatten from tqdm import tqdm from whisper import torch_whisper from whisper.whisper import ModelDimensions, Whisper def quantize(weights, config, args): quantized_config = copy.deepcopy(config) # Load the model: model = Whisper(ModelDimensions(**config)) weights = tree_map(mx.array, weights) model.update(tree_unflatten(list(weights.items()))) # Quantize the model: nn.QuantizedLinear.quantize_module(model, args.q_group_size, args.q_bits) # Update the config: quantized_config["quantization"] = { "group_size": args.q_group_size, "bits": args.q_bits, } quantized_weights = dict(tree_flatten(model.parameters())) return quantized_weights, quantized_config
null
17,825
import base64 import gzip from dataclasses import dataclass from typing import Dict, Iterable, Optional import numpy as np import torch import torch.nn.functional as F from torch import Tensor, nn The provided code snippet includes necessary dependencies for implementing the `sinusoids` function. Write a Python function `def sinusoids(length, channels, max_timescale=10000)` to solve the following problem: Returns sinusoids for positional embedding Here is the function: def sinusoids(length, channels, max_timescale=10000): """Returns sinusoids for positional embedding""" assert channels % 2 == 0 log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2)) scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1)
Returns sinusoids for positional embedding
17,826
import base64 import gzip import math from dataclasses import dataclass from typing import Union import mlx.core as mx import mlx.nn as nn import numpy as np from .decoding import decode as decode_function from .decoding import detect_language as detect_language_function The provided code snippet includes necessary dependencies for implementing the `sinusoids` function. Write a Python function `def sinusoids(length, channels, max_timescale=10000)` to solve the following problem: Returns sinusoids for positional embedding Here is the function: def sinusoids(length, channels, max_timescale=10000): """Returns sinusoids for positional embedding""" assert channels % 2 == 0 log_timescale_increment = math.log(max_timescale) / (channels // 2 - 1) inv_timescales = mx.exp(-log_timescale_increment * mx.arange(channels // 2)) scaled_time = mx.arange(length)[:, None] * inv_timescales[None, :] return mx.concatenate([mx.sin(scaled_time), mx.cos(scaled_time)], axis=1)
Returns sinusoids for positional embedding
17,827
import sys import warnings from typing import List, Optional, Tuple, Union import mlx.core as mx import numpy as np import tqdm from .audio import ( FRAMES_PER_SECOND, HOP_LENGTH, N_FRAMES, N_SAMPLES, SAMPLE_RATE, log_mel_spectrogram, pad_or_trim, ) from .decoding import DecodingOptions, DecodingResult from .load_models import load_model from .timing import add_word_timestamps from .tokenizer import LANGUAGES, get_tokenizer def _format_timestamp(seconds: float): assert seconds >= 0, "non-negative timestamp expected" milliseconds = round(seconds * 1000.0) hours = milliseconds // 3_600_000 milliseconds -= hours * 3_600_000 minutes = milliseconds // 60_000 milliseconds -= minutes * 60_000 seconds = milliseconds // 1_000 milliseconds -= seconds * 1_000 hours_marker = f"{hours:02d}:" if hours > 0 else "" return f"{hours_marker}{minutes:02d}:{seconds:02d}.{milliseconds:03d}" def _get_end(segments: List[dict]) -> Optional[float]: return next( (w["end"] for s in reversed(segments) for w in reversed(s["words"])), segments[-1]["end"] if segments else None, ) class ModelHolder: model = None model_path = None def get_model(cls, model_path: str, dtype: mx.Dtype): if cls.model is None or model_path != cls.model_path: cls.model = load_model(model_path, dtype=dtype) cls.model_path = model_path return cls.model SAMPLE_RATE = 16000 HOP_LENGTH = 160 N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE N_FRAMES = N_SAMPLES // HOP_LENGTH FRAMES_PER_SECOND = SAMPLE_RATE // HOP_LENGTH def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): """ Pad or trim the audio array to N_SAMPLES, as expected by the encoder. """ if array.shape[axis] > length: sl = [slice(None)] * array.ndim sl[axis] = slice(0, length) array = array[tuple(sl)] if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) pad_fn = mx.pad if isinstance(array, mx.array) else np.pad array = pad_fn(array, pad_widths) return array def log_mel_spectrogram( audio: Union[str, np.ndarray], n_mels: int = 80, padding: int = 0, ): """ Compute the log-Mel spectrogram of Parameters ---------- audio: Union[str, np.ndarray, mx.array], shape = (*) The path to audio or either a NumPy or mlx array containing the audio waveform in 16 kHz n_mels: int The number of Mel-frequency filters, only 80 is supported padding: int Number of zero samples to pad to the right Returns ------- mx.array, shape = (80, n_frames) An array that contains the Mel spectrogram """ device = mx.default_device() mx.set_default_device(mx.cpu) if not isinstance(audio, mx.array): if isinstance(audio, str): audio = load_audio(audio) audio = mx.array(audio) if padding > 0: audio = mx.pad(audio, (0, padding)) window = hanning(N_FFT) freqs = stft(audio, window, nperseg=N_FFT, noverlap=HOP_LENGTH) magnitudes = freqs[:-1, :].abs().square() filters = mel_filters(n_mels) mel_spec = magnitudes @ filters.T log_spec = mx.maximum(mel_spec, 1e-10).log10() log_spec = mx.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 mx.set_default_device(device) return log_spec class DecodingOptions: # whether to perform X->X "transcribe" or X->English "translate" task: str = "transcribe" # language that the audio is in; uses detected language if None language: Optional[str] = None # sampling-related options temperature: float = 0.0 sample_len: Optional[int] = None # maximum number of tokens to sample best_of: Optional[int] = None # number of independent sample trajectories, if t > 0 beam_size: Optional[int] = None # number of beams in beam search, if t == 0 patience: Optional[float] = None # patience in beam search (arxiv:2204.05424) # "alpha" in Google NMT, or None for length norm, when ranking generations # to select which to return among the beams or best-of-N samples length_penalty: Optional[float] = None # text or tokens to feed as the prompt or the prefix; for more info: # https://github.com/openai/whisper/discussions/117#discussioncomment-3727051 prompt: Optional[Union[str, List[int]]] = None # for the previous context prefix: Optional[Union[str, List[int]]] = None # to prefix the current context # list of tokens ids (or comma-separated token ids) to suppress # "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()` suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1" suppress_blank: bool = True # this will suppress blank outputs # timestamp sampling options without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only max_initial_timestamp: Optional[float] = 1.0 # implementation details fp16: bool = True # use fp16 for most of the calculation class DecodingResult: audio_features: mx.array language: str language_probs: Optional[Dict[str, float]] = None tokens: List[int] = field(default_factory=list) text: str = "" avg_logprob: float = np.nan no_speech_prob: float = np.nan temperature: float = np.nan compression_ratio: float = np.nan def add_word_timestamps( *, segments: List[dict], model: "Whisper", tokenizer: Tokenizer, mel: mx.array, num_frames: int, prepend_punctuations: str = "\"'“¿([{-", append_punctuations: str = "\"'.。,,!!??::”)]}、", last_speech_timestamp: float, **kwargs, ): if len(segments) == 0: return text_tokens_per_segment = [ [token for token in segment["tokens"] if token < tokenizer.eot] for segment in segments ] text_tokens = list(itertools.chain.from_iterable(text_tokens_per_segment)) alignment = find_alignment(model, tokenizer, text_tokens, mel, num_frames, **kwargs) word_durations = np.array([t.end - t.start for t in alignment]) word_durations = word_durations[word_durations.nonzero()] median_duration = np.median(word_durations) if len(word_durations) > 0 else 0.0 median_duration = min(0.7, float(median_duration)) max_duration = median_duration * 2 # hack: truncate long words at sentence boundaries. # a better segmentation algorithm based on VAD should be able to replace this. if len(word_durations) > 0: sentence_end_marks = ".。!!??" # ensure words at sentence boundaries are not longer than twice the median word duration. for i in range(1, len(alignment)): if alignment[i].end - alignment[i].start > max_duration: if alignment[i].word in sentence_end_marks: alignment[i].end = alignment[i].start + max_duration elif alignment[i - 1].word in sentence_end_marks: alignment[i].start = alignment[i].end - max_duration merge_punctuations(alignment, prepend_punctuations, append_punctuations) time_offset = segments[0]["seek"] * HOP_LENGTH / SAMPLE_RATE word_index = 0 for segment, text_tokens in zip(segments, text_tokens_per_segment): saved_tokens = 0 words = [] while word_index < len(alignment) and saved_tokens < len(text_tokens): timing = alignment[word_index] if timing.word: words.append( dict( word=timing.word, start=round(time_offset + timing.start, 2), end=round(time_offset + timing.end, 2), probability=timing.probability, ) ) saved_tokens += len(timing.tokens) word_index += 1 # hack: truncate long words at segment boundaries. # a better segmentation algorithm based on VAD should be able to replace this. if len(words) > 0: # ensure the first and second word after a pause is not longer than # twice the median word duration. if words[0]["end"] - last_speech_timestamp > median_duration * 4 and ( words[0]["end"] - words[0]["start"] > max_duration or ( len(words) > 1 and words[1]["end"] - words[0]["start"] > max_duration * 2 ) ): if ( len(words) > 1 and words[1]["end"] - words[1]["start"] > max_duration ): boundary = max(words[1]["end"] / 2, words[1]["end"] - max_duration) words[0]["end"] = words[1]["start"] = boundary words[0]["start"] = max(0, words[0]["end"] - max_duration) # prefer the segment-level start timestamp if the first word is too long. if ( segment["start"] < words[0]["end"] and segment["start"] - 0.5 > words[0]["start"] ): words[0]["start"] = max( 0, min(words[0]["end"] - median_duration, segment["start"]) ) else: segment["start"] = words[0]["start"] # prefer the segment-level end timestamp if the last word is too long. if ( segment["end"] > words[-1]["start"] and segment["end"] + 0.5 < words[-1]["end"] ): words[-1]["end"] = max( words[-1]["start"] + median_duration, segment["end"] ) else: segment["end"] = words[-1]["end"] last_speech_timestamp = segment["end"] segment["words"] = words LANGUAGES = { "en": "english", "zh": "chinese", "de": "german", "es": "spanish", "ru": "russian", "ko": "korean", "fr": "french", "ja": "japanese", "pt": "portuguese", "tr": "turkish", "pl": "polish", "ca": "catalan", "nl": "dutch", "ar": "arabic", "sv": "swedish", "it": "italian", "id": "indonesian", "hi": "hindi", "fi": "finnish", "vi": "vietnamese", "he": "hebrew", "uk": "ukrainian", "el": "greek", "ms": "malay", "cs": "czech", "ro": "romanian", "da": "danish", "hu": "hungarian", "ta": "tamil", "no": "norwegian", "th": "thai", "ur": "urdu", "hr": "croatian", "bg": "bulgarian", "lt": "lithuanian", "la": "latin", "mi": "maori", "ml": "malayalam", "cy": "welsh", "sk": "slovak", "te": "telugu", "fa": "persian", "lv": "latvian", "bn": "bengali", "sr": "serbian", "az": "azerbaijani", "sl": "slovenian", "kn": "kannada", "et": "estonian", "mk": "macedonian", "br": "breton", "eu": "basque", "is": "icelandic", "hy": "armenian", "ne": "nepali", "mn": "mongolian", "bs": "bosnian", "kk": "kazakh", "sq": "albanian", "sw": "swahili", "gl": "galician", "mr": "marathi", "pa": "punjabi", "si": "sinhala", "km": "khmer", "sn": "shona", "yo": "yoruba", "so": "somali", "af": "afrikaans", "oc": "occitan", "ka": "georgian", "be": "belarusian", "tg": "tajik", "sd": "sindhi", "gu": "gujarati", "am": "amharic", "yi": "yiddish", "lo": "lao", "uz": "uzbek", "fo": "faroese", "ht": "haitian creole", "ps": "pashto", "tk": "turkmen", "nn": "nynorsk", "mt": "maltese", "sa": "sanskrit", "lb": "luxembourgish", "my": "myanmar", "bo": "tibetan", "tl": "tagalog", "mg": "malagasy", "as": "assamese", "tt": "tatar", "haw": "hawaiian", "ln": "lingala", "ha": "hausa", "ba": "bashkir", "jw": "javanese", "su": "sundanese", "yue": "cantonese", } def get_tokenizer( multilingual: bool, *, num_languages: int = 99, language: Optional[str] = None, task: Optional[str] = None, # Literal["transcribe", "translate", None] ) -> Tokenizer: if language is not None: language = language.lower() if language not in LANGUAGES: if language in TO_LANGUAGE_CODE: language = TO_LANGUAGE_CODE[language] else: raise ValueError(f"Unsupported language: {language}") if multilingual: encoding_name = "multilingual" language = language or "en" task = task or "transcribe" else: encoding_name = "gpt2" language = None task = None encoding = get_encoding(name=encoding_name, num_languages=num_languages) return Tokenizer( encoding=encoding, num_languages=num_languages, language=language, task=task ) The provided code snippet includes necessary dependencies for implementing the `transcribe` function. Write a Python function `def transcribe( audio: Union[str, np.ndarray, mx.array], *, path_or_hf_repo: str = "mlx-community/whisper-tiny", verbose: Optional[bool] = None, temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0), compression_ratio_threshold: Optional[float] = 2.4, logprob_threshold: Optional[float] = -1.0, no_speech_threshold: Optional[float] = 0.6, condition_on_previous_text: bool = True, initial_prompt: Optional[str] = None, word_timestamps: bool = False, prepend_punctuations: str = "\"'“¿([{-", append_punctuations: str = "\"'.。,,!!??::”)]}、", clip_timestamps: Union[str, List[float]] = "0", hallucination_silence_threshold: Optional[float] = None, **decode_options, )` to solve the following problem: Transcribe an audio file using Whisper Parameters ---------- audio: Union[str, np.ndarray, mx.array] The path to the audio file to open, or the audio waveform path_or_hf_repo: str The localpath to the Whisper model or HF Hub repo with the MLX converted weights. verbose: bool Whether to display the text being decoded to the console. If True, displays all the details, If False, displays minimal details. If None, does not display anything temperature: Union[float, Tuple[float, ...]] Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `compression_ratio_threshold` or `logprob_threshold`. compression_ratio_threshold: float If the gzip compression ratio is above this value, treat as failed logprob_threshold: float If the average log probability over sampled tokens is below this value, treat as failed no_speech_threshold: float If the no_speech probability is higher than this value AND the average log probability over sampled tokens is below `logprob_threshold`, consider the segment as silent condition_on_previous_text: bool if True, the previous output of the model is provided as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop, such as repetition looping or timestamps going out of sync. word_timestamps: bool Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment. prepend_punctuations: str If word_timestamps is True, merge these punctuation symbols with the next word append_punctuations: str If word_timestamps is True, merge these punctuation symbols with the previous word initial_prompt: Optional[str] Optional text to provide as a prompt for the first window. This can be used to provide, or "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those word correctly. decode_options: dict Keyword arguments to construct `DecodingOptions` instances clip_timestamps: Union[str, List[float]] Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process. The last end timestamp defaults to the end of the file. hallucination_silence_threshold: Optional[float] When word_timestamps is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected Returns ------- A dictionary containing the resulting text ("text") and segment-level details ("segments"), and the spoken language ("language"), which is detected when `decode_options["language"]` is None. Here is the function: def transcribe( audio: Union[str, np.ndarray, mx.array], *, path_or_hf_repo: str = "mlx-community/whisper-tiny", verbose: Optional[bool] = None, temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0), compression_ratio_threshold: Optional[float] = 2.4, logprob_threshold: Optional[float] = -1.0, no_speech_threshold: Optional[float] = 0.6, condition_on_previous_text: bool = True, initial_prompt: Optional[str] = None, word_timestamps: bool = False, prepend_punctuations: str = "\"'“¿([{-", append_punctuations: str = "\"'.。,,!!??::”)]}、", clip_timestamps: Union[str, List[float]] = "0", hallucination_silence_threshold: Optional[float] = None, **decode_options, ): """ Transcribe an audio file using Whisper Parameters ---------- audio: Union[str, np.ndarray, mx.array] The path to the audio file to open, or the audio waveform path_or_hf_repo: str The localpath to the Whisper model or HF Hub repo with the MLX converted weights. verbose: bool Whether to display the text being decoded to the console. If True, displays all the details, If False, displays minimal details. If None, does not display anything temperature: Union[float, Tuple[float, ...]] Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `compression_ratio_threshold` or `logprob_threshold`. compression_ratio_threshold: float If the gzip compression ratio is above this value, treat as failed logprob_threshold: float If the average log probability over sampled tokens is below this value, treat as failed no_speech_threshold: float If the no_speech probability is higher than this value AND the average log probability over sampled tokens is below `logprob_threshold`, consider the segment as silent condition_on_previous_text: bool if True, the previous output of the model is provided as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop, such as repetition looping or timestamps going out of sync. word_timestamps: bool Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment. prepend_punctuations: str If word_timestamps is True, merge these punctuation symbols with the next word append_punctuations: str If word_timestamps is True, merge these punctuation symbols with the previous word initial_prompt: Optional[str] Optional text to provide as a prompt for the first window. This can be used to provide, or "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those word correctly. decode_options: dict Keyword arguments to construct `DecodingOptions` instances clip_timestamps: Union[str, List[float]] Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process. The last end timestamp defaults to the end of the file. hallucination_silence_threshold: Optional[float] When word_timestamps is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected Returns ------- A dictionary containing the resulting text ("text") and segment-level details ("segments"), and the spoken language ("language"), which is detected when `decode_options["language"]` is None. """ dtype = mx.float16 if decode_options.get("fp16", True) else mx.float32 model = ModelHolder.get_model(path_or_hf_repo, dtype) # Pad 30-seconds of silence to the input audio, for slicing mel = log_mel_spectrogram(audio, n_mels=model.dims.n_mels, padding=N_SAMPLES) content_frames = mel.shape[-2] - N_FRAMES content_duration = float(content_frames * HOP_LENGTH / SAMPLE_RATE) if verbose: system_encoding = sys.getdefaultencoding() if system_encoding != "utf-8": make_safe = lambda x: x.encode(system_encoding, errors="replace").decode( system_encoding ) else: make_safe = lambda x: x if decode_options.get("language", None) is None: if not model.is_multilingual: decode_options["language"] = "en" else: if verbose: print( "Detecting language using up to the first 30 seconds. " "Use the `language` decoding option to specify the language" ) mel_segment = pad_or_trim(mel, N_FRAMES, axis=-2).astype(dtype) _, probs = model.detect_language(mel_segment) decode_options["language"] = max(probs, key=probs.get) if verbose is not None: print( f"Detected language: {LANGUAGES[decode_options['language']].title()}" ) language: str = decode_options["language"] task: str = decode_options.get("task", "transcribe") tokenizer = get_tokenizer( model.is_multilingual, num_languages=model.num_languages, language=language, task=task, ) if isinstance(clip_timestamps, str): clip_timestamps = [ float(ts) for ts in (clip_timestamps.split(",") if clip_timestamps else []) ] seek_points: List[int] = [round(ts * FRAMES_PER_SECOND) for ts in clip_timestamps] if len(seek_points) == 0: seek_points.append(0) if len(seek_points) % 2 == 1: seek_points.append(content_frames) seek_clips: List[Tuple[int, int]] = list(zip(seek_points[::2], seek_points[1::2])) punctuation = "\"'“¿([{-\"'.。,,!!??::”)]}、" if word_timestamps and task == "translate": warnings.warn("Word-level timestamps on translations may not be reliable.") def decode_with_fallback(segment: mx.array) -> DecodingResult: temperatures = ( [temperature] if isinstance(temperature, (int, float)) else temperature ) decode_result = None for t in temperatures: kwargs = {**decode_options} if t > 0: # disable beam_size and patience when t > 0 kwargs.pop("beam_size", None) kwargs.pop("patience", None) else: # disable best_of when t == 0 kwargs.pop("best_of", None) options = DecodingOptions(**kwargs, temperature=t) decode_result = model.decode(segment, options) needs_fallback = False if ( compression_ratio_threshold is not None and decode_result.compression_ratio > compression_ratio_threshold ): needs_fallback = True # too repetitive if ( logprob_threshold is not None and decode_result.avg_logprob < logprob_threshold ): needs_fallback = True # average log probability is too low if ( no_speech_threshold is not None and decode_result.no_speech_prob > no_speech_threshold ): needs_fallback = False # silence if not needs_fallback: break return decode_result clip_idx = 0 seek = seek_clips[clip_idx][0] input_stride = N_FRAMES // model.dims.n_audio_ctx # mel frames per output token: 2 time_precision = ( input_stride * HOP_LENGTH / SAMPLE_RATE ) # time per output token: 0.02 (seconds) all_tokens = [] all_segments = [] prompt_reset_since = 0 if initial_prompt is not None: initial_prompt_tokens = tokenizer.encode(" " + initial_prompt.strip()) all_tokens.extend(initial_prompt_tokens) else: initial_prompt_tokens = [] def new_segment( *, start: float, end: float, tokens: mx.array, result: DecodingResult ): tokens = tokens.tolist() text_tokens = [token for token in tokens if token < tokenizer.eot] return { "seek": seek, "start": start, "end": end, "text": tokenizer.decode(text_tokens), "tokens": tokens, "temperature": result.temperature, "avg_logprob": result.avg_logprob, "compression_ratio": result.compression_ratio, "no_speech_prob": result.no_speech_prob, } # show the progress bar when verbose is False (if True, transcribed text will be printed) with tqdm.tqdm( total=content_frames, unit="frames", disable=verbose is not False ) as pbar: last_speech_timestamp = 0.0 # NOTE: This loop is obscurely flattened to make the diff readable. # A later commit should turn this into a simpler nested loop. # for seek_clip_start, seek_clip_end in seek_clips: # while seek < seek_clip_end while clip_idx < len(seek_clips): seek_clip_start, seek_clip_end = seek_clips[clip_idx] if seek < seek_clip_start: seek = seek_clip_start if seek >= seek_clip_end: clip_idx += 1 if clip_idx < len(seek_clips): seek = seek_clips[clip_idx][0] continue time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE) window_end_time = float((seek + N_FRAMES) * HOP_LENGTH / SAMPLE_RATE) segment_size = min(N_FRAMES, content_frames - seek, seek_clip_end - seek) mel_segment = mel[seek : seek + segment_size] segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE mel_segment = pad_or_trim(mel_segment, N_FRAMES, axis=-2).astype(dtype) decode_options["prompt"] = all_tokens[prompt_reset_since:] result: DecodingResult = decode_with_fallback(mel_segment) tokens = np.array(result.tokens) if no_speech_threshold is not None: # no voice activity check should_skip = result.no_speech_prob > no_speech_threshold if ( logprob_threshold is not None and result.avg_logprob > logprob_threshold ): # don't skip if the logprob is high enough, despite the no_speech_prob should_skip = False if should_skip: seek += segment_size # fast-forward to the next segment boundary continue previous_seek = seek current_segments = [] # anomalous words are very long/short/improbable def word_anomaly_score(word: dict) -> float: probability = word.get("probability", 0.0) duration = word["end"] - word["start"] score = 0.0 if probability < 0.15: score += 1.0 if duration < 0.133: score += (0.133 - duration) * 15 if duration > 2.0: score += duration - 2.0 return score def is_segment_anomaly(segment: Optional[dict]) -> bool: if segment is None or not segment["words"]: return False words = [w for w in segment["words"] if w["word"] not in punctuation] words = words[:8] score = sum(word_anomaly_score(w) for w in words) return score >= 3 or score + 0.01 >= len(words) def next_words_segment(segments: List[dict]) -> Optional[dict]: return next((s for s in segments if s["words"]), None) timestamp_tokens = tokens >= tokenizer.timestamp_begin single_timestamp_ending = timestamp_tokens[-2:].tolist() == [False, True] consecutive = np.where( np.logical_and(timestamp_tokens[:-1], timestamp_tokens[1:]) )[0] consecutive += 1 if len(consecutive) > 0: # if the output contains two consecutive timestamp tokens slices = consecutive.tolist() if single_timestamp_ending: slices.append(len(tokens)) last_slice = 0 for current_slice in slices: sliced_tokens = tokens[last_slice:current_slice] start_timestamp_pos = ( sliced_tokens[0].item() - tokenizer.timestamp_begin ) end_timestamp_pos = ( sliced_tokens[-1].item() - tokenizer.timestamp_begin ) current_segments.append( new_segment( start=time_offset + start_timestamp_pos * time_precision, end=time_offset + end_timestamp_pos * time_precision, tokens=sliced_tokens, result=result, ) ) last_slice = current_slice if single_timestamp_ending: # single timestamp at the end means no speech after the last timestamp. seek += segment_size else: # otherwise, ignore the unfinished segment and seek to the last timestamp last_timestamp_pos = ( tokens[last_slice - 1].item() - tokenizer.timestamp_begin ) seek += last_timestamp_pos * input_stride else: duration = segment_duration timestamps = tokens[timestamp_tokens.nonzero()[0]] if ( len(timestamps) > 0 and timestamps[-1].item() != tokenizer.timestamp_begin ): # no consecutive timestamps but it has a timestamp; use the last one. last_timestamp_pos = ( timestamps[-1].item() - tokenizer.timestamp_begin ) duration = last_timestamp_pos * time_precision current_segments.append( new_segment( start=time_offset, end=time_offset + duration, tokens=tokens, result=result, ) ) seek += segment_size if word_timestamps: add_word_timestamps( segments=current_segments, model=model, tokenizer=tokenizer, mel=mel_segment, num_frames=segment_size, prepend_punctuations=prepend_punctuations, append_punctuations=append_punctuations, last_speech_timestamp=last_speech_timestamp, ) if not single_timestamp_ending: last_word_end = _get_end(current_segments) if last_word_end is not None and last_word_end > time_offset: seek = round(last_word_end * FRAMES_PER_SECOND) # skip silence before possible hallucinations if hallucination_silence_threshold is not None: threshold = hallucination_silence_threshold if not single_timestamp_ending: last_word_end = _get_end(current_segments) if last_word_end is not None and last_word_end > time_offset: remaining_duration = window_end_time - last_word_end if remaining_duration > threshold: seek = round(last_word_end * FRAMES_PER_SECOND) else: seek = previous_seek + segment_size # if first segment might be a hallucination, skip leading silence first_segment = next_words_segment(current_segments) if first_segment is not None and is_segment_anomaly(first_segment): gap = first_segment["start"] - time_offset if gap > threshold: seek = previous_seek + round(gap * FRAMES_PER_SECOND) continue # skip silence before any possible hallucination that is surrounded # by silence or more hallucinations hal_last_end = last_speech_timestamp for si in range(len(current_segments)): segment = current_segments[si] if not segment["words"]: continue if is_segment_anomaly(segment): next_segment = next_words_segment( current_segments[si + 1 :] ) if next_segment is not None: hal_next_start = next_segment["words"][0]["start"] else: hal_next_start = time_offset + segment_duration silence_before = ( segment["start"] - hal_last_end > threshold or segment["start"] < threshold or segment["start"] - time_offset < 2.0 ) silence_after = ( hal_next_start - segment["end"] > threshold or is_segment_anomaly(next_segment) or window_end_time - segment["end"] < 2.0 ) if silence_before and silence_after: seek = round( max(time_offset + 1, segment["start"]) * FRAMES_PER_SECOND ) if content_duration - segment["end"] < threshold: seek = content_frames current_segments[si:] = [] break hal_last_end = segment["end"] last_word_end = _get_end(current_segments) if last_word_end is not None: last_speech_timestamp = last_word_end if verbose: for segment in current_segments: start, end, text = segment["start"], segment["end"], segment["text"] line = f"[{_format_timestamp(start)} --> {_format_timestamp(end)}] {text}" print(make_safe(line)) # if a segment is instantaneous or does not contain text, clear it for i, segment in enumerate(current_segments): if segment["start"] == segment["end"] or segment["text"].strip() == "": segment["text"] = "" segment["tokens"] = [] segment["words"] = [] all_segments.extend( [ {"id": i, **segment} for i, segment in enumerate( current_segments, start=len(all_segments) ) ] ) all_tokens.extend( [token for segment in current_segments for token in segment["tokens"]] ) if not condition_on_previous_text or result.temperature > 0.5: # do not feed the prompt tokens if a high temperature was used prompt_reset_since = len(all_tokens) # update progress bar pbar.update(min(content_frames, seek) - previous_seek) return dict( text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]), segments=all_segments, language=language, )
Transcribe an audio file using Whisper Parameters ---------- audio: Union[str, np.ndarray, mx.array] The path to the audio file to open, or the audio waveform path_or_hf_repo: str The localpath to the Whisper model or HF Hub repo with the MLX converted weights. verbose: bool Whether to display the text being decoded to the console. If True, displays all the details, If False, displays minimal details. If None, does not display anything temperature: Union[float, Tuple[float, ...]] Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `compression_ratio_threshold` or `logprob_threshold`. compression_ratio_threshold: float If the gzip compression ratio is above this value, treat as failed logprob_threshold: float If the average log probability over sampled tokens is below this value, treat as failed no_speech_threshold: float If the no_speech probability is higher than this value AND the average log probability over sampled tokens is below `logprob_threshold`, consider the segment as silent condition_on_previous_text: bool if True, the previous output of the model is provided as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop, such as repetition looping or timestamps going out of sync. word_timestamps: bool Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment. prepend_punctuations: str If word_timestamps is True, merge these punctuation symbols with the next word append_punctuations: str If word_timestamps is True, merge these punctuation symbols with the previous word initial_prompt: Optional[str] Optional text to provide as a prompt for the first window. This can be used to provide, or "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those word correctly. decode_options: dict Keyword arguments to construct `DecodingOptions` instances clip_timestamps: Union[str, List[float]] Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process. The last end timestamp defaults to the end of the file. hallucination_silence_threshold: Optional[float] When word_timestamps is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected Returns ------- A dictionary containing the resulting text ("text") and segment-level details ("segments"), and the spoken language ("language"), which is detected when `decode_options["language"]` is None.
17,828
import json from pathlib import Path import mlx.core as mx import mlx.nn as nn from huggingface_hub import snapshot_download from mlx.utils import tree_unflatten from . import whisper def load_model( path_or_hf_repo: str, dtype: mx.Dtype = mx.float32, ) -> whisper.Whisper: model_path = Path(path_or_hf_repo) if not model_path.exists(): model_path = Path(snapshot_download(repo_id=path_or_hf_repo)) with open(str(model_path / "config.json"), "r") as f: config = json.loads(f.read()) config.pop("model_type", None) quantization = config.pop("quantization", None) model_args = whisper.ModelDimensions(**config) weights = mx.load(str(model_path / "weights.npz")) weights = tree_unflatten(list(weights.items())) model = whisper.Whisper(model_args, dtype) if quantization is not None: nn.QuantizedLinear.quantize_module(model, **quantization) model.update(weights) mx.eval(model.parameters()) return model
null
17,829
argparse import os import subprocess import sys import time import mlx.core as mx from whisper import audio, decoding, load_models, transcribe def parse_arguments(): parser = argparse.ArgumentParser(description="Benchmark script.") parser.add_argument( "--mlx-dir", type=str, default="mlx_models", help="The folder of MLX models", ) parser.add_argument( "--all", action="store_true", help="Use all available models, i.e. tiny,small,medium,large-v3", ) parser.add_argument( "-m", "--models", type=str, help="Specify models as a comma-separated list (e.g., tiny,small,medium)", ) return parser.parse_args()
null
17,830
import os import subprocess import sys import time import mlx.core as mx from whisper import audio, decoding, load_models, transcribe def timer(fn, *args): for _ in range(5): fn(*args) num_its = 10 tic = time.perf_counter() for _ in range(num_its): fn(*args) toc = time.perf_counter() return (toc - tic) / num_its
null
17,831
import os import subprocess import sys import time import mlx.core as mx from whisper import audio, decoding, load_models, transcribe audio_file = "whisper/assets/ls_test.flac" def feats(n_mels: int = 80): data = audio.load_audio(audio_file) data = audio.pad_or_trim(data) mels = audio.log_mel_spectrogram(data, n_mels) mx.eval(mels) return mels
null
17,832
import os import subprocess import sys import time import mlx.core as mx from whisper import audio, decoding, load_models, transcribe def model_forward(model, mels, tokens): logits = model(mels, tokens) mx.eval(logits) return logits
null
17,833
import os import subprocess import sys import time import mlx.core as mx from whisper import audio, decoding, load_models, transcribe def decode(model, mels): return decoding.decode(model, mels)
null
17,834
import os import subprocess import sys import time import mlx.core as mx from whisper import audio, decoding, load_models, transcribe audio_file = "whisper/assets/ls_test.flac" def everything(model_path): return transcribe(audio_file, path_or_hf_repo=model_path)
null
17,835
import argparse import time from functools import partial from pathlib import Path import dataset import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import vae from mlx.utils import tree_flatten from PIL import Image def loss_fn(model, X): X_recon, mu, logvar = model(X) # Reconstruction loss recon_loss = nn.losses.mse_loss(X_recon, X, reduction="sum") # KL divergence between encoder distribution and standard normal: kl_div = -0.5 * mx.sum(1 + logvar - mu.square() - logvar.exp()) # Total loss return recon_loss + kl_div
null
17,836
import argparse import time from functools import partial from pathlib import Path import dataset import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import vae from mlx.utils import tree_flatten from PIL import Image def grid_image_from_batch(image_batch, num_rows): """ Generate a grid image from a batch of images. Assumes input has shape (B, H, W, C). """ B, H, W, _ = image_batch.shape num_cols = B // num_rows # Calculate the size of the output grid image grid_height = num_rows * H grid_width = num_cols * W # Normalize and convert to the desired data type image_batch = np.array(image_batch * 255).astype(np.uint8) # Reshape the batch of images into a 2D grid grid_image = image_batch.reshape(num_rows, num_cols, H, W, -1) grid_image = grid_image.swapaxes(1, 2) grid_image = grid_image.reshape(grid_height, grid_width, -1) # Convert the grid to a PIL Image return Image.fromarray(grid_image.squeeze()) def reconstruct(model, batch, out_file): # Reconstruct a single batch only images = mx.array(batch["image"]) images_recon = model(images)[0] paired_images = mx.stack([images, images_recon]).swapaxes(0, 1).flatten(0, 1) grid_image = grid_image_from_batch(paired_images, num_rows=16) grid_image.save(out_file)
null
17,837
import argparse import time from functools import partial from pathlib import Path import dataset import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import vae from mlx.utils import tree_flatten from PIL import Image def grid_image_from_batch(image_batch, num_rows): """ Generate a grid image from a batch of images. Assumes input has shape (B, H, W, C). """ B, H, W, _ = image_batch.shape num_cols = B // num_rows # Calculate the size of the output grid image grid_height = num_rows * H grid_width = num_cols * W # Normalize and convert to the desired data type image_batch = np.array(image_batch * 255).astype(np.uint8) # Reshape the batch of images into a 2D grid grid_image = image_batch.reshape(num_rows, num_cols, H, W, -1) grid_image = grid_image.swapaxes(1, 2) grid_image = grid_image.reshape(grid_height, grid_width, -1) # Convert the grid to a PIL Image return Image.fromarray(grid_image.squeeze()) def generate( model, out_file, num_samples=128, ): # Sample from the latent distribution: z = mx.random.normal([num_samples, model.num_latent_dims]) # Decode the latent vectors to images: images = model.decode(z) # Save all images in a single file grid_image = grid_image_from_batch(images, num_rows=8) grid_image.save(out_file)
null
17,838
import math import mlx.core as mx import mlx.nn as nn def upsample_nearest(x, scale: int = 2): B, H, W, C = x.shape x = mx.broadcast_to(x[:, :, None, :, None, :], (B, H, scale, W, scale, C)) x = x.reshape(B, H * scale, W * scale, C) return x
null
17,839
from mlx.data.datasets import load_mnist def mnist(batch_size, img_size, root=None): # load train and test sets using mlx-data load_fn = load_mnist tr = load_fn(root=root, train=True) test = load_fn(root=root, train=False) # number of image channels is 1 for MNIST num_img_channels = 1 # normalize to [0,1] def normalize(x): return x.astype("float32") / 255.0 # iterator over training set tr_iter = ( tr.shuffle() .to_stream() .image_resize("image", h=img_size[0], w=img_size[1]) .key_transform("image", normalize) .batch(batch_size) .prefetch(4, 4) ) # iterator over test set test_iter = ( test.to_stream() .image_resize("image", h=img_size[0], w=img_size[1]) .key_transform("image", normalize) .batch(batch_size) ) return tr_iter, test_iter
null
17,840
import math import time from functools import partial import datasets import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np from mlx.utils import tree_flatten def to_samples(context_size, dataset): tokens = dataset.size window_size = context_size + 1 # include target samples = tokens - window_size + 1 X = np.lib.stride_tricks.as_strided( dataset, shape=(samples, window_size), strides=(dataset.itemsize, dataset.itemsize), ) return X[:, :-1], X[:, 1:] def iterate_batches(batch_size, context_size, dataset): inputs, targets = to_samples(context_size, dataset) s = 0 while True: if s == 0: # Reset permutation: perm = np.random.permutation(inputs.shape[0]) ids = perm[s : s + batch_size] yield inputs[ids], targets[ids] s += batch_size if s >= inputs.shape[0]: s = 0
null
17,841
import io import itertools import os import zipfile from urllib import request import numpy as np def wikitext(dataset="2", save_dir="/tmp"): def ptb(save_dir="/tmp"): def load_dataset(dataname): if dataname == "ptb": return ptb() elif dataname == "wikitext2": return wikitext(dataset="2") else: return wikitext(dataset="103")
null
17,842
import argparse import time from functools import partial import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import mnist def loss_fn(model, X, y): return nn.losses.cross_entropy(model(X), y, reduction="mean")
null
17,843
import argparse import time from functools import partial import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import mnist def batch_iterate(batch_size, X, y): perm = mx.array(np.random.permutation(y.size)) for s in range(0, y.size, batch_size): ids = perm[s : s + batch_size] yield X[ids], y[ids]
null
17,844
import gzip import os import pickle from urllib import request import numpy as np def mnist( save_dir="/tmp", base_url="http://yann.lecun.com/exdb/mnist/", filename="mnist.pkl" ): def fashion_mnist(save_dir="/tmp"): return mnist( save_dir, base_url="http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/", filename="fashion_mnist.pkl", )
null
17,845
import argparse from time import perf_counter_ns from typing import List, Optional, Tuple import mlx.core as mx import mlx.nn as nn import numpy as np from mlx.utils import tree_map, tree_unflatten from transformers import AutoTokenizer, T5Config The provided code snippet includes necessary dependencies for implementing the `_relative_position_bucket` function. Write a Python function `def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 )` to solve the following problem: Adapted from HF Tensorflow: https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) Here is the function: def _relative_position_bucket( relative_position, bidirectional=True, num_buckets=32, max_distance=128 ): """ Adapted from HF Tensorflow: https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets) """ relative_buckets = 0 if bidirectional: num_buckets //= 2 relative_buckets += (relative_position > 0).astype(mx.int16) * num_buckets relative_position = mx.abs(relative_position) else: relative_position = -mx.minimum( relative_position, mx.zeros_like(relative_position) ) # now relative_position is in the range [0, inf) # half of the buckets are for exact increments in positions max_exact = num_buckets // 2 is_small = relative_position < max_exact # The other half of the buckets are for logarithmically bigger bins in positions up to max_distance scale = (num_buckets - max_exact) / np.log(max_distance / max_exact) relative_position_if_large = max_exact + ( mx.log(relative_position.astype(mx.float32) / max_exact) * scale ).astype(mx.int16) relative_position_if_large = mx.minimum(relative_position_if_large, num_buckets - 1) relative_buckets += mx.where( is_small, relative_position, relative_position_if_large ) return relative_buckets
Adapted from HF Tensorflow: https://github.com/huggingface/transformers/blob/main/src/transformers/models/t5/modeling_t5.py Translate relative position to a bucket number for relative attention. The relative position is defined as memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for small absolute relative_position and larger buckets for larger absolute relative_positions. All relative positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket. This should allow for more graceful generalization to longer sequences than the model has been trained on Args: relative_position: an int32 Tensor bidirectional: a boolean - whether the attention is bidirectional num_buckets: an integer max_distance: an integer Returns: a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
17,846
import argparse from time import perf_counter_ns from typing import List, Optional, Tuple import mlx.core as mx import mlx.nn as nn import numpy as np from mlx.utils import tree_map, tree_unflatten from transformers import AutoTokenizer, T5Config class T5(nn.Module): def __init__(self, config: T5Config): def encode(self, inputs: mx.array): def decode( self, inputs: mx.array, memory: mx.array, cache=None, ): def __call__( self, inputs: mx.array, decoder_inputs: mx.array, ): class Tokenizer: def __init__(self, config: T5Config): def eos_id(self) -> int: def decoder_start_id(self) -> int: def encode(self, s: str) -> mx.array: def decode(self, t: List[int], with_sep: bool = True) -> str: def generate(prompt: str, model: T5, tokenizer: Tokenizer, temp: Optional[float] = 0.0): def sample(logits): if temp == 0: return mx.argmax(logits, axis=-1) else: return mx.random.categorical(logits * (1 / temp)) prompt = tokenizer.encode(prompt) decoder_inputs = mx.array([tokenizer.decoder_start_id]) memory = model.encode(prompt) cache = None y = decoder_inputs while True: logits, cache = model.decode(y[None], memory, cache=cache) y = sample(logits[:, -1, :]) yield y.squeeze()
null
17,847
import argparse from time import perf_counter_ns from typing import List, Optional, Tuple import mlx.core as mx import mlx.nn as nn import numpy as np from mlx.utils import tree_map, tree_unflatten from transformers import AutoTokenizer, T5Config class T5(nn.Module): def __init__(self, config: T5Config): self.wte = nn.Embedding(config.vocab_size, config.d_model) self.encoder = TransformerEncoder(config) self.decoder = TransformerDecoder(config) self.tie_word_embeddings = config.tie_word_embeddings if not self.tie_word_embeddings: self.lm_head = OutputHead(config) self.model_dim = config.d_model def encode(self, inputs: mx.array): return self.encoder(self.wte(inputs)) def decode( self, inputs: mx.array, memory: mx.array, cache=None, ): inputs = self.wte(inputs) T = inputs.shape[1] if T > 1: mask = nn.MultiHeadAttention.create_additive_causal_mask(T) mask = mask.astype(inputs.dtype) else: mask = None y, cache = self.decoder( inputs, memory=memory, mask=mask, memory_mask=None, cache=cache ) if not self.tie_word_embeddings: y = self.lm_head(y) else: y *= self.model_dim**-0.5 y = y @ self.wte.weight.T return y, cache def __call__( self, inputs: mx.array, decoder_inputs: mx.array, ): return self.decode(decoder_inputs, self.encode(inputs))[0] class Tokenizer: def __init__(self, config: T5Config): self._decoder_start_id = config.decoder_start_token_id self._tokenizer = AutoTokenizer.from_pretrained( args.model, legacy=False, model_max_length=getattr(config, "n_positions", 512), ) def eos_id(self) -> int: return self._tokenizer.eos_token_id def decoder_start_id(self) -> int: return self._decoder_start_id def encode(self, s: str) -> mx.array: return mx.array( self._tokenizer( s, return_tensors="np", return_attention_mask=False, )["input_ids"] ) def decode(self, t: List[int], with_sep: bool = True) -> str: tokens = self._tokenizer.convert_ids_to_tokens(t) return "".join(t.replace("▁", " " if with_sep else "") for t in tokens) def load_model(model_name: str, dtype: str = "float16"): config = T5Config.from_pretrained(args.model) dtype = getattr(mx, dtype) model = T5(config) file_name = model_name.replace("/", "-") weights = mx.load(f"{file_name}.npz") weights = tree_unflatten(list(weights.items())) weights = tree_map(lambda p: p.astype(dtype), weights) model.update(weights) mx.eval(model.parameters()) return model, Tokenizer(config)
null
17,848
import argparse from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, T5EncoderModel def embed(t5_model: str): batch = [ "translate English to German: That is good.", "This is an example of T5 working on MLX.", ] tokenizer = AutoTokenizer.from_pretrained(t5_model) torch_model = T5EncoderModel.from_pretrained(t5_model) torch_tokens = tokenizer(batch, return_tensors="pt", padding=True) torch_forward = torch_model(**torch_tokens, output_hidden_states=True) torch_output = torch_forward.last_hidden_state.detach().numpy() print("\n TF BERT:") for input_str, embedding in list(zip(batch, torch_output)): print("Input:", input_str) print(embedding) print()
null
17,849
import argparse from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, T5EncoderModel def generate(t5_model: str): prompt = "translate English to German: As much as six inches of rain could fall in the New York City region through Monday morning, and officials warned of flooding along the coast." tokenizer = AutoTokenizer.from_pretrained(t5_model) torch_model = AutoModelForSeq2SeqLM.from_pretrained(t5_model) torch_tokens = tokenizer(prompt, return_tensors="pt", padding=True).input_ids outputs = torch_model.generate(torch_tokens, do_sample=False, max_length=512) print(tokenizer.decode(outputs[0], skip_special_tokens=True))
null
17,851
import argparse import numpy from transformers import BertModel def replace_key(key: str) -> str: key = key.replace(".layer.", ".layers.") key = key.replace(".self.key.", ".key_proj.") key = key.replace(".self.query.", ".query_proj.") key = key.replace(".self.value.", ".value_proj.") key = key.replace(".attention.output.dense.", ".attention.out_proj.") key = key.replace(".attention.output.LayerNorm.", ".ln1.") key = key.replace(".output.LayerNorm.", ".ln2.") key = key.replace(".intermediate.dense.", ".linear1.") key = key.replace(".output.dense.", ".linear2.") key = key.replace(".LayerNorm.", ".norm.") key = key.replace("pooler.dense.", "pooler.") return key def convert(bert_model: str, mlx_model: str) -> None: model = BertModel.from_pretrained(bert_model) # save the tensors tensors = { replace_key(key): tensor.numpy() for key, tensor in model.state_dict().items() } numpy.savez(mlx_model, **tensors)
null
17,852
import argparse from dataclasses import dataclass from pathlib import Path from typing import List, Optional, Tuple import mlx.core as mx import mlx.nn as nn import numpy import numpy as np from mlx.utils import tree_unflatten from transformers import BertTokenizer def load_model(bert_model: str, weights_path: str) -> Tuple[Bert, BertTokenizer]: if not Path(weights_path).exists(): raise ValueError(f"No model weights found in {weights_path}") # create and update the model model = Bert(model_configs[bert_model]) model.load_weights(weights_path) tokenizer = BertTokenizer.from_pretrained(bert_model) return model, tokenizer def run(bert_model: str, mlx_model: str, batch: List[str]): model, tokenizer = load_model(bert_model, mlx_model) tokens = tokenizer(batch, return_tensors="np", padding=True) tokens = {key: mx.array(v) for key, v in tokens.items()} return model(**tokens)
null
17,853
from typing import Tuple from image_processor import CLIPImageProcessor from model import CLIPModel from tokenizer import CLIPTokenizer class CLIPImageProcessor: """ A simple port of https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/image_processing_clip.py. """ def __init__( self, crop_size: int = 224, do_center_crop: bool = True, do_normalize: bool = True, do_resize: bool = True, image_mean: List[float] = [0.48145466, 0.4578275, 0.40821073], image_std: List[float] = [0.26862954, 0.26130258, 0.27577711], size: int = 224, **kwargs ) -> None: self.crop_size = crop_size self.do_center_crop = do_center_crop self.do_normalize = do_normalize self.do_resize = do_resize self.image_mean = mx.array(image_mean) self.image_std = mx.array(image_std) self.size = size def __call__(self, images: List[Image]) -> mx.array: return mx.concatenate( [self._preprocess(image)[None] for image in images], axis=0 ) def _preprocess(self, image: Image) -> mx.array: if self.do_resize: image = resize(image, self.size) if self.do_center_crop: image = center_crop(image, (self.crop_size, self.crop_size)) image = mx.array(np.array(image)) image = rescale(image) if self.do_normalize: image = normalize(image, self.image_mean, self.image_std) return image def from_pretrained(path: str): path = Path(path) with open(path / "preprocessor_config.json", encoding="utf-8") as f: config = json.load(f) return CLIPImageProcessor(**config) class CLIPModel(nn.Module): def __init__(self, config: CLIPConfig): self.text_model = ClipTextModel(config.text_config) self.vision_model = ClipVisionModel(config.vision_config) text_embed_dim = config.text_config.hidden_size vision_embed_dim = config.vision_config.hidden_size projection_dim = config.projection_dim self.visual_projection = nn.Linear(vision_embed_dim, projection_dim, bias=False) self.text_projection = nn.Linear(text_embed_dim, projection_dim, bias=False) self.logit_scale = mx.array(0.0) def get_text_features(self, x: mx.array) -> mx.array: return self.text_projection(self.text_model(x).pooler_output) def get_image_features(self, x: mx.array) -> mx.array: return self.visual_projection(self.vision_model(x).pooler_output) def __call__( self, input_ids: Optional[mx.array] = None, pixel_values: Optional[mx.array] = None, return_loss=False, ) -> CLIPModelOutput: if input_ids is not None: text_model_output = self.text_model(input_ids) text_embeds = self.text_projection(text_model_output.pooler_output) text_embeds = text_embeds / LA.norm(text_embeds, axis=-1, keepdims=True) else: text_embeds = None text_model_output = None if pixel_values is not None: vision_model_output = self.vision_model(pixel_values) image_embeds = self.visual_projection(vision_model_output.pooler_output) image_embeds = image_embeds / LA.norm(image_embeds, axis=-1, keepdims=True) else: image_embeds = None vision_model_output = None if return_loss and (input_ids is None or pixel_values is None): raise ValueError("Must provide text and image inputs to compute loss.") if return_loss: logit_scale = mx.exp(self.logit_scale) logits = (text_embeds @ image_embeds.T) * logit_scale loss = clip_loss(logits) else: loss = None return CLIPModelOutput( loss=loss, text_embeds=text_embeds, image_embeds=image_embeds, vision_model_output=vision_model_output, text_model_output=text_model_output, ) def from_pretrained(path: str): path = Path(path) with open(path / "config.json", "r") as fid: config = json.load(fid) text_config = config["text_config"] text_config = CLIPTextConfig( num_hidden_layers=text_config["num_hidden_layers"], hidden_size=text_config["hidden_size"], intermediate_size=text_config["intermediate_size"], num_attention_heads=text_config["num_attention_heads"], max_position_embeddings=text_config["max_position_embeddings"], vocab_size=text_config["vocab_size"], layer_norm_eps=text_config["layer_norm_eps"], ) vision_config = config["vision_config"] vision_config = CLIPVisionConfig( num_hidden_layers=vision_config["num_hidden_layers"], hidden_size=vision_config["hidden_size"], intermediate_size=vision_config["intermediate_size"], num_attention_heads=vision_config["num_attention_heads"], num_channels=3, image_size=vision_config["image_size"], patch_size=vision_config["patch_size"], layer_norm_eps=vision_config["layer_norm_eps"], ) config = CLIPConfig( text_config=text_config, vision_config=vision_config, projection_dim=config["projection_dim"], ) model = CLIPModel(config) weight_files = glob.glob(str(path / "*.safetensors")) if not weight_files: logging.error(f"No safetensors found in {path}") raise FileNotFoundError(f"No safetensors found in {path}") weights = {} for wf in weight_files: weights.update(mx.load(wf)) weights = model.sanitize(weights) model.load_weights(list(weights.items())) return model def sanitize(weights): sanitized_weights = {} for k, v in weights.items(): if "position_ids" in k: # Remove unused position_ids continue elif "patch_embedding.weight" in k: # pytorch conv2d expects the weight tensor to be of shape [out_channels, in_channels, kH, KW] # mlx conv2d expects the weight tensor to be of shape [out_channels, kH, KW, in_channels] sanitized_weights[k] = v.transpose(0, 2, 3, 1) else: sanitized_weights[k] = v return sanitized_weights class CLIPTokenizer: """A simple port of CLIPTokenizer from https://github.com/huggingface/transformers/ .""" def __init__(self, bpe_ranks, vocab): self.bpe_ranks = bpe_ranks self.vocab = vocab self.pat = regex.compile( r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", regex.IGNORECASE, ) self._cache = {self.bos: self.bos, self.eos: self.eos} def bos(self): return "<|startoftext|>" def bos_token(self): return self.vocab[self.bos] def eos(self): return "<|endoftext|>" def eos_token(self): return self.vocab[self.eos] def bpe(self, text): if text in self._cache: return self._cache[text] unigrams = list(text[:-1]) + [text[-1] + "</w>"] unique_bigrams = set(zip(unigrams, unigrams[1:])) if not unique_bigrams: return unigrams # In every iteration try to merge the two most likely bigrams. If none # was merged we are done. # # Ported from https://github.com/huggingface/transformers/blob/main/src/transformers/models/clip/tokenization_py while unique_bigrams: bigram = min( unique_bigrams, key=lambda pair: self.bpe_ranks.get(pair, float("inf")) ) if bigram not in self.bpe_ranks: break new_unigrams = [] skip = False for a, b in zip(unigrams, unigrams[1:]): if skip: skip = False continue if (a, b) == bigram: new_unigrams.append(a + b) skip = True else: new_unigrams.append(a) if not skip: new_unigrams.append(b) unigrams = new_unigrams unique_bigrams = set(zip(unigrams, unigrams[1:])) self._cache[text] = unigrams return unigrams def __call__(self, *args: Any, **kwargs: Any) -> Any: return self.tokenize(*args, **kwargs) def tokenize(self, text, prepend_bos=True, append_eos=True) -> mx.array: if isinstance(text, list): return mx.array([self.tokenize(t, prepend_bos, append_eos) for t in text]) # Lower case, cleanup, and split. Hugging Face does a much, # more thorough job here but this should suffice for 95% of # cases. clean_text = regex.sub(r"\s+", " ", text.lower()) tokens = regex.findall(self.pat, clean_text) # Split the tokens according to the byte-pair merge file bpe_tokens = [ti for t in tokens for ti in self.bpe(t)] # Map to token ids and return tokens = [] if prepend_bos: tokens.append(self.bos_token) tokens.extend(self.vocab[t] for t in bpe_tokens) if append_eos: tokens.append(self.eos_token) return mx.array(tokens) def from_pretrained(path: str): path = Path(path) with open(path / "vocab.json", encoding="utf-8") as f: vocab = json.load(f) with open(path / "merges.txt", encoding="utf-8") as f: bpe_merges = f.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1] bpe_merges = [tuple(m.split()) for m in bpe_merges] bpe_ranks = dict(map(reversed, enumerate(bpe_merges))) return CLIPTokenizer(bpe_ranks, vocab) def load(model_dir: str) -> Tuple[CLIPModel, CLIPTokenizer, CLIPImageProcessor]: model = CLIPModel.from_pretrained(model_dir) tokenizer = CLIPTokenizer.from_pretrained(model_dir) img_processor = CLIPImageProcessor.from_pretrained(model_dir) return model, tokenizer, img_processor
null
17,854
import argparse import json import shutil from pathlib import Path from typing import Any, Dict, Union import mlx.core as mx import torch from huggingface_hub import snapshot_download def make_shards(weights: dict, max_file_size_gb: int = 5) -> list: max_file_size_bytes = max_file_size_gb << 30 shards = [] shard, shard_size = {}, 0 for k, v in weights.items(): if shard_size + v.nbytes > max_file_size_bytes: shards.append(shard) shard, shard_size = {}, 0 shard[k] = v shard_size += v.nbytes shards.append(shard) return shards The provided code snippet includes necessary dependencies for implementing the `save_weights` function. Write a Python function `def save_weights(save_path: Union[str, Path], weights: Dict[str, Any]) -> None` to solve the following problem: Save model weights into specified directory. Here is the function: def save_weights(save_path: Union[str, Path], weights: Dict[str, Any]) -> None: """Save model weights into specified directory.""" if isinstance(save_path, str): save_path = Path(save_path) save_path.mkdir(parents=True, exist_ok=True) shards = make_shards(weights) shards_count = len(shards) shard_file_format = ( "model-{:05d}-of-{:05d}.safetensors" if shards_count > 1 else "model.safetensors" ) total_size = sum(v.nbytes for v in weights.values()) index_data = {"metadata": {"total_size": total_size}, "weight_map": {}} for i, shard in enumerate(shards): shard_name = shard_file_format.format(i + 1, shards_count) shard_path = save_path / shard_name mx.save_safetensors(str(shard_path), shard) for weight_name in shard.keys(): index_data["weight_map"][weight_name] = shard_name index_data["weight_map"] = { k: index_data["weight_map"][k] for k in sorted(index_data["weight_map"]) } with open(save_path / "model.safetensors.index.json", "w") as f: json.dump( index_data, f, indent=4, )
Save model weights into specified directory.
17,855
import argparse import json import shutil from pathlib import Path from typing import Any, Dict, Union import mlx.core as mx import torch from huggingface_hub import snapshot_download def get_model_path(path_or_hf_repo: str) -> Path: model_path = Path(path_or_hf_repo) if not model_path.exists(): model_path = Path( snapshot_download( repo_id=path_or_hf_repo, allow_patterns=[ "*.bin", "*.json", "*.txt", ], ) ) return model_path
null
17,856
import argparse import json import shutil from pathlib import Path from typing import Any, Dict, Union import mlx.core as mx import torch from huggingface_hub import snapshot_download def torch_to_mx(a: torch.Tensor, *, dtype: str) -> mx.array: # bfloat16 is not numpy convertible. Upcast to float32 to avoid precision loss a = a.to(torch.float32) if dtype == "bfloat16" else a.to(getattr(torch, dtype)) return mx.array(a.numpy(), getattr(mx, dtype))
null
17,857
import json from pathlib import Path from typing import List, Tuple import mlx.core as mx import numpy as np from PIL.Image import Image The provided code snippet includes necessary dependencies for implementing the `resize` function. Write a Python function `def resize(image: Image, short_size: int) -> Image` to solve the following problem: Resize so small size to short_size Here is the function: def resize(image: Image, short_size: int) -> Image: """ Resize so small size to short_size """ width, height = image.size short = min(width, height) long = max(width, height) if short == short_size: return image new_short = short_size new_long = int(short_size * long / short) new_size = (new_short, new_long) if width <= height else (new_long, new_short) return image.resize(new_size)
Resize so small size to short_size
17,858
import json from pathlib import Path from typing import List, Tuple import mlx.core as mx import numpy as np from PIL.Image import Image def center_crop(image: Image, size: Tuple[int, int]) -> Image: if size[0] % 2 != 0 or size[1] % 2 != 0: raise ValueError("Only even crop sizes supported.") original_width, original_height = image.size crop_height, crop_width = size top = (original_height - crop_height) // 2 bottom = top + crop_height left = (original_width - crop_width) // 2 right = left + crop_width return image.crop((left, top, right, bottom))
null
17,859
import json from pathlib import Path from typing import List, Tuple import mlx.core as mx import numpy as np from PIL.Image import Image def rescale(image: mx.array) -> mx.array: return image.astype(mx.float32) * (1 / 255.0)
null
17,860
import json from pathlib import Path from typing import List, Tuple import mlx.core as mx import numpy as np from PIL.Image import Image def normalize(image: mx.array, mean: mx.array, std: mx.array) -> mx.array: return (image - mean) / std
null
17,861
import glob import json import logging import math from dataclasses import dataclass from pathlib import Path from typing import Optional, Union import mlx.core as mx import mlx.nn as nn from mlx.core import linalg as LA from mlx.nn.losses import cross_entropy from mlx.utils import tree_flatten The provided code snippet includes necessary dependencies for implementing the `quick_gelu` function. Write a Python function `def quick_gelu(x: mx.array) -> mx.array` to solve the following problem: A fast GELU approximation https://github.com/hendrycks/GELUs Here is the function: def quick_gelu(x: mx.array) -> mx.array: """ A fast GELU approximation https://github.com/hendrycks/GELUs """ return x * mx.sigmoid(1.702 * x)
A fast GELU approximation https://github.com/hendrycks/GELUs
17,862
import glob import json import logging import math from dataclasses import dataclass from pathlib import Path from typing import Optional, Union import mlx.core as mx import mlx.nn as nn from mlx.core import linalg as LA from mlx.nn.losses import cross_entropy from mlx.utils import tree_flatten def clip_loss(logits: mx.array) -> mx.array: N, M = logits.shape caption_loss = cross_entropy(logits, mx.arange(N), reduction="mean") image_loss = cross_entropy(logits.T, mx.arange(M), reduction="mean") return (caption_loss + image_loss) / 2.0
null
17,863
import glob import json import logging from pathlib import Path from typing import Generator import mlx.core as mx import mlx.nn as nn import models.llama as llama import models.mixtral as mixtral import models.phi2 as phi2 import transformers from huggingface_hub import snapshot_download def load(path_or_hf_repo: str): def fetch_from_hub(hf_path: str): model_path = snapshot_download( repo_id=hf_path, allow_patterns=["*.json", "*.safetensors", "tokenizer.model"], ) weight_files = glob.glob(f"{model_path}/*.safetensors") if len(weight_files) == 0: raise FileNotFoundError("No safetensors found in {}".format(model_path)) weights = {} for wf in weight_files: weights.update(mx.load(wf).items()) config = transformers.AutoConfig.from_pretrained(hf_path) tokenizer = transformers.AutoTokenizer.from_pretrained( hf_path, ) return weights, config.to_dict(), tokenizer
null
17,864
import glob import json import logging from pathlib import Path from typing import Generator import mlx.core as mx import mlx.nn as nn import models.llama as llama import models.mixtral as mixtral import models.phi2 as phi2 import transformers from huggingface_hub import snapshot_download def load(path_or_hf_repo: str): # If the path exists, it will try to load model form it # otherwise download and cache from the hf_repo and cache model_path = Path(path_or_hf_repo) if not model_path.exists(): model_path = Path( snapshot_download( repo_id=path_or_hf_repo, allow_patterns=["*.json", "*.safetensors", "tokenizer.model"], ) ) with open(model_path / "config.json", "r") as f: config = json.loads(f.read()) quantization = config.get("quantization", None) weight_files = glob.glob(str(model_path / "*.safetensors")) if len(weight_files) == 0: raise FileNotFoundError("No safetensors found in {}".format(model_path)) weights = {} for wf in weight_files: weights.update(mx.load(wf).items()) model_class, model_args_class = _get_classes(config=config) model_args = model_args_class.from_dict(config) model = model_class(model_args) if quantization is not None: nn.QuantizedLinear.quantize_module( model, **quantization, linear_class_predicate=lambda m: isinstance(m, nn.Linear) and m.weight.shape[0] != 8, ) model.load_weights(list(weights.items())) mx.eval(model.parameters()) tokenizer = transformers.AutoTokenizer.from_pretrained(model_path) return model, tokenizer, config def upload_to_hub(path: str, name: str, hf_path: str): import os from huggingface_hub import HfApi, ModelCard, logging repo_id = f"mlx-community/{name}" card = ModelCard.load(hf_path) card.data.tags = ["mlx"] if card.data.tags is None else card.data.tags + ["mlx"] card.text = f""" # {name} This model was converted to MLX format from [`{hf_path}`](). Refer to the [original model card](https://huggingface.co/{hf_path}) for more details on the model. ## Use with mlx ```bash pip install mlx git clone https://github.com/ml-explore/mlx-examples.git cd mlx-examples/llms/hf_llm python generate.py --model {repo_id} --prompt "My name is" ``` """ card.save(os.path.join(path, "README.md")) logging.set_verbosity_info() api = HfApi() api.create_repo(repo_id=repo_id, exist_ok=True) api.upload_folder( folder_path=path, repo_id=repo_id, repo_type="model", )
null
17,865
import glob import json import logging from pathlib import Path from typing import Generator import mlx.core as mx import mlx.nn as nn import models.llama as llama import models.mixtral as mixtral import models.phi2 as phi2 import transformers from huggingface_hub import snapshot_download def make_shards(weights: dict, max_file_size_gibibyte: int = 15): max_file_size_bytes = max_file_size_gibibyte << 30 shards = [] shard, shard_size = {}, 0 for k, v in weights.items(): if shard_size + v.nbytes > max_file_size_bytes: shards.append(shard) shard, shard_size = {}, 0 shard[k] = v shard_size += v.nbytes shards.append(shard) return shards def save_model(save_dir: str, weights, tokenizer, config): save_dir = Path(save_dir) save_dir.mkdir(parents=True, exist_ok=True) shards = make_shards(weights, max_file_size_gibibyte=5) shards_count = len(shards) shard_file_format = ( "model-{:05d}-of-{:05d}.safetensors" if shards_count > 1 else "model.safetensors" ) for i, shard in enumerate(shards): shard_name = shard_file_format.format(i + 1, shards_count) mx.save_safetensors(str(save_dir / shard_name), shard) tokenizer.save_pretrained(save_dir) with open(save_dir / "config.json", "w") as fid: json.dump(config, fid, indent=4)
null
17,866
import glob import json import logging from pathlib import Path from typing import Generator import mlx.core as mx import mlx.nn as nn import models.llama as llama import models.mixtral as mixtral import models.phi2 as phi2 import transformers from huggingface_hub import snapshot_download The provided code snippet includes necessary dependencies for implementing the `generate` function. Write a Python function `def generate( prompt: mx.array, model: nn.Module, temp: float = 0.0 ) -> Generator[mx.array, None, None]` to solve the following problem: Generate text based on the given prompt and model. Args: prompt (mx.array): The input prompt. model (nn.Module): The model to use for generation. temp (float): The temperature for sampling. If temp is 0, use max sampling. Yields: mx.array: The generated text. Here is the function: def generate( prompt: mx.array, model: nn.Module, temp: float = 0.0 ) -> Generator[mx.array, None, None]: """ Generate text based on the given prompt and model. Args: prompt (mx.array): The input prompt. model (nn.Module): The model to use for generation. temp (float): The temperature for sampling. If temp is 0, use max sampling. Yields: mx.array: The generated text. """ def sample(logits: mx.array) -> mx.array: return ( mx.argmax(logits, axis=-1) if temp == 0 else mx.random.categorical(logits * (1 / temp)) ) y = prompt cache = None while True: logits, cache = model(y[None], cache=cache) logits = logits[:, -1, :] y = sample(logits) yield y
Generate text based on the given prompt and model. Args: prompt (mx.array): The input prompt. model (nn.Module): The model to use for generation. temp (float): The temperature for sampling. If temp is 0, use max sampling. Yields: mx.array: The generated text.
17,867
import argparse import copy import mlx.core as mx import mlx.nn as nn import utils from mlx.utils import tree_flatten def quantize(weights, config, args): quantized_config = copy.deepcopy(config) # Get model classes model_class, model_args_class = utils._get_classes(config=config) # Load the model: model = model_class(model_args_class.from_dict(config)) model.load_weights(list(weights.items())) # Quantize the model: nn.QuantizedLinear.quantize_module( model, args.q_group_size, args.q_bits, linear_class_predicate=lambda m: isinstance(m, nn.Linear) and m.weight.shape[0] != 8, ) # Update the config: quantized_config["quantization"] = { "group_size": args.q_group_size, "bits": args.q_bits, } quantized_weights = dict(tree_flatten(model.parameters())) return quantized_weights, quantized_config
null
17,868
import glob import inspect import json import math from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Tuple, Union import mlx.core as mx import mlx.nn as nn import numpy as np from huggingface_hub import snapshot_download from transformers import AutoTokenizer class ModelArgs: hidden_size: int num_hidden_layers: int intermediate_size: int num_attention_heads: int rms_norm_eps: float vocab_size: int num_key_value_heads: int = None rope_theta: float = 10000 rope_traditional: bool = False model_type: str = None rope_scaling: Optional[Dict[str, Union[float, str]]] = None def __post_init__(self): if self.num_key_value_heads is None: self.num_key_value_heads = self.num_attention_heads if self.rope_scaling: required_keys = {"factor", "type"} if not all(key in self.rope_scaling for key in required_keys): raise ValueError(f"rope_scaling must contain keys {required_keys}") if self.rope_scaling["type"] != "linear": raise ValueError("rope_scaling 'type' currently only supports 'linear'") def from_dict(cls, params): return cls( **{ k: v for k, v in params.items() if k in inspect.signature(cls).parameters } ) class Model(nn.Module): def __init__(self, args: ModelArgs): super().__init__() self.model = LlamaModel(args) self.lm_head = nn.Linear(args.hidden_size, args.vocab_size, bias=False) def __call__( self, inputs: mx.array, cache=None, ): out, cache = self.model(inputs, cache) return self.lm_head(out), cache def load(path_or_hf_repo: str): # If the path exists, it will try to load model form it # otherwise download and cache from the hf_repo and cache model_path = Path(path_or_hf_repo) if not model_path.exists(): model_path = Path( snapshot_download( repo_id=path_or_hf_repo, allow_patterns=["*.json", "*.safetensors", "tokenizer.model"], ) ) with open(model_path / "config.json", "r") as f: config = json.loads(f.read()) quantization = config.get("quantization", None) model_args = ModelArgs.from_dict(config) weight_files = glob.glob(str(model_path / "*.safetensors")) if len(weight_files) == 0: raise FileNotFoundError("No safetensors found in {}".format(model_path)) weights = {} for wf in weight_files: weights.update(mx.load(wf).items()) model = Model(model_args) if quantization is not None: nn.QuantizedLinear.quantize_module( model, **quantization, linear_class_predicate=lambda m: isinstance(m, nn.Linear) and m.weight.shape[0] != 8, ) model.load_weights(list(weights.items())) mx.eval(model.parameters()) tokenizer = AutoTokenizer.from_pretrained(model_path) return model, tokenizer, config
null
17,869
import glob import inspect import json import math from dataclasses import dataclass from pathlib import Path from typing import Dict, List, Optional, Tuple, Union import mlx.core as mx import mlx.nn as nn import numpy as np from huggingface_hub import snapshot_download from transformers import AutoTokenizer class Model(nn.Module): def __init__(self, args: ModelArgs): def __call__( self, inputs: mx.array, cache=None, ): def generate(prompt: mx.array, model: Model, temp: float = 0.0): def sample(logits): if temp == 0: return mx.argmax(logits, axis=-1) else: return mx.random.categorical(logits * (1 / temp)) y = prompt cache = None while True: logits, cache = model(y[None], cache=cache) logits = logits[:, -1, :] y = sample(logits) yield y
null
17,870
import json import os class WikiSQL: def __init__(self, dataset, save_dir="/tmp"): valid_sets = ("train", "dev", "test") if dataset not in valid_sets: raise ValueError(f"Dataset must be in {valid_sets}, got {dataset}") data_dir = os.path.join(save_dir, "wikisql") self._maybe_download(data_dir) self._parse_tables(os.path.join(data_dir, f"data/{dataset}.tables.jsonl")) self._parse_queries(os.path.join(data_dir, f"data/{dataset}.jsonl")) def _maybe_download(self, data_dir): if not os.path.exists(data_dir): import io import tarfile from urllib import request url = "https://raw.githubusercontent.com/salesforce/WikiSQL/master/data.tar.bz2" r = request.urlopen(url) with tarfile.open(fileobj=io.BytesIO(r.read())) as tf: tf.extractall(data_dir) def _parse_tables(self, tables): self._tables = {} with open(tables) as f: for line in f: table = json.loads(line) self._tables[table["id"]] = { "columns": table["header"], "types": table["types"], "desc": f"table: {table['id']}\ncolumns: {', '.join(table['header'])}", } def _parse_queries(self, queries): self._queries = [] with open(queries) as f: for line in f: query = json.loads(line) table = self._tables[query["table_id"]] question = query["question"] answer = self.query_to_text( query["sql"], query["table_id"], table["columns"], table["types"] ) self._queries.append( f"<s>{table['desc']}\nQ: {question}\nA: {answer}</s>" ) def query_to_text(self, query, table, columns, types): aggregation_ops = ["", "MAX", "MIN", "COUNT", "SUM", "AVG"] condition_ops = ["=", ">", "<", "OP"] column = columns[query["sel"]] aggregation = (aggregation_ops[query["agg"]] + " ") if query["agg"] > 0 else "" sql = f"SELECT {aggregation}{column} FROM {table}" conditions = query["conds"] if conditions: cs = [] for i, o, v in conditions: column = columns[i] op = condition_ops[o] if types[i] == "text": value = f"'{v}'" else: value = v cs.append(f"{column} {op} {value}") sql += " WHERE " + " AND ".join(cs) return sql def __getitem__(self, idx): return self._queries[idx] def __len__(self): return len(self._queries) The provided code snippet includes necessary dependencies for implementing the `load` function. Write a Python function `def load()` to solve the following problem: Load all three splits of the WikiSQL dataset. Here is the function: def load(): """ Load all three splits of the WikiSQL dataset. """ return (WikiSQL(dn) for dn in ["train", "dev", "test"])
Load all three splits of the WikiSQL dataset.
17,871
import argparse import json import math import time from pathlib import Path import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import utils as lora_utils from mlx.utils import tree_flatten, tree_unflatten from models.lora import LoRALinear def build_parser(): parser = argparse.ArgumentParser(description="LoRA or QLoRA finetuning.") parser.add_argument( "--model", default="mlx_model", help="The path to the local model directory or Hugging Face repo.", ) # Generation args parser.add_argument( "--max-tokens", "-m", type=int, default=100, help="The maximum number of tokens to generate", ) parser.add_argument( "--temp", type=float, default=0.8, help="The sampling temperature" ) parser.add_argument( "--prompt", "-p", type=str, help="The prompt for generation", default=None, ) # Training args parser.add_argument( "--train", action="store_true", help="Do training", ) parser.add_argument( "--data", type=str, default="data/", help="Directory with {train, valid, test}.jsonl files", ) parser.add_argument( "--lora-layers", type=int, default=16, help="Number of layers to fine-tune", ) parser.add_argument("--batch-size", type=int, default=4, help="Minibatch size.") parser.add_argument( "--iters", type=int, default=1000, help="Iterations to train for." ) parser.add_argument( "--val-batches", type=int, default=25, help="Number of validation batches, -1 uses the entire validation set.", ) parser.add_argument( "--learning-rate", type=float, default=1e-5, help="Adam learning rate." ) parser.add_argument( "--steps-per-report", type=int, default=10, help="Number of training steps between loss reporting.", ) parser.add_argument( "--steps-per-eval", type=int, default=200, help="Number of training steps between validations.", ) parser.add_argument( "--resume-adapter-file", type=str, default=None, help="Load path to resume training with the given adapter weights.", ) parser.add_argument( "--adapter-file", type=str, default="adapters.npz", help="Save/load path for the trained adapter weights.", ) parser.add_argument( "--save-every", type=int, default=100, help="Save the model every N iterations.", ) parser.add_argument( "--test", action="store_true", help="Evaluate on the test set after training", ) parser.add_argument( "--test-batches", type=int, default=500, help="Number of test set batches, -1 uses the entire test set.", ) parser.add_argument("--seed", type=int, default=0, help="The PRNG seed") return parser
null
17,872
import argparse import json import math import time from pathlib import Path import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import utils as lora_utils from mlx.utils import tree_flatten, tree_unflatten from models.lora import LoRALinear class Dataset: """ Light-weight wrapper to hold lines from a jsonl file """ def __init__(self, path: Path, key: str = "text"): if not path.exists(): self._data = None else: with open(path, "r") as fid: self._data = [json.loads(l) for l in fid] self._key = key def __getitem__(self, idx: int): return self._data[idx][self._key] def __len__(self): return len(self._data) def train(model, train_set, val_set, optimizer, loss, tokenizer, args): # Create value and grad function for loss loss_value_and_grad = nn.value_and_grad(model, loss) losses = [] n_tokens = 0 # Main training loop start = time.perf_counter() for it, batch in zip( range(args.iters), iterate_batches(train_set, tokenizer, args.batch_size, train=True), ): # Forward and backward pass (lvalue, toks), grad = loss_value_and_grad(model, *batch) # Model update optimizer.update(model, grad) mx.eval(model.parameters(), optimizer.state, lvalue) # Record loss losses.append(lvalue.item()) n_tokens += toks.item() # Report training loss if needed if (it + 1) % args.steps_per_report == 0: train_loss = np.mean(losses) stop = time.perf_counter() print( f"Iter {it + 1}: Train loss {train_loss:.3f}, " f"It/sec {args.steps_per_report / (stop - start):.3f}, " f"Tokens/sec {float(n_tokens) / (stop - start):.3f}" ) losses = [] n_tokens = 0 start = time.perf_counter() # Report validation loss if needed if it == 0 or (it + 1) % args.steps_per_eval == 0: stop = time.perf_counter() val_loss = evaluate( model, val_set, loss, tokenizer, args.batch_size, args.val_batches ) print( f"Iter {it + 1}: " f"Val loss {val_loss:.3f}, " f"Val took {(time.perf_counter() - stop):.3f}s" ) start = time.perf_counter() # Save adapter weights if needed if (it + 1) % args.save_every == 0: mx.savez( args.adapter_file, **dict(tree_flatten(model.trainable_parameters())) ) print(f"Iter {it + 1}: Saved adapter weights to {args.adapter_file}.") def load(args): def load_and_check(name): dataset_path = Path(args.data) / f"{name}.jsonl" try: return Dataset(dataset_path) except Exception as e: print(f"Unable to build dataset {dataset_path} ({e})") raise names = ("train", "valid", "test") train, valid, test = (load_and_check(n) for n in names) if args.train and len(train) == 0: raise ValueError( "Training set not found or empty. Must provide training set for fine-tuning." ) if args.train and len(valid) == 0: raise ValueError( "Validation set not found or empty. Must provide validation set for fine-tuning." ) if args.test and len(test) == 0: raise ValueError( "Test set not found or empty. Must provide test set for evaluation." ) return train, valid, test
null
17,873
import argparse import json import math import time from pathlib import Path import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import utils as lora_utils from mlx.utils import tree_flatten, tree_unflatten from models.lora import LoRALinear def loss(model, inputs, targets, lengths): # Run model on inputs logits, _ = model(inputs) logits = logits.astype(mx.float32) # Mask padding tokens length_mask = mx.arange(inputs.shape[1])[None, :] < lengths[:, None] # Calculate the loss ce = nn.losses.cross_entropy(logits, targets) * length_mask ntoks = length_mask.sum() ce = ce.sum() / ntoks return ce, ntoks
null
17,874
import argparse import json import math import time from pathlib import Path import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim import numpy as np import utils as lora_utils from mlx.utils import tree_flatten, tree_unflatten from models.lora import LoRALinear def generate(model, prompt, tokenizer, args): print(prompt, end="", flush=True) prompt = mx.array(tokenizer.encode(prompt)) tokens = [] skip = 0 for token, n in zip( lora_utils.generate(prompt, model, args.temp), range(args.max_tokens), ): if token == tokenizer.eos_token_id: break tokens.append(token.item()) s = tokenizer.decode(tokens) if len(s) - skip > 1: print(s[skip:-1], end="", flush=True) skip = len(s) - 1 print(tokenizer.decode(tokens)[skip:], flush=True) print("=" * 10) if len(tokens) == 0: print("No tokens generated for this prompt") return
null
17,875
import argparse import codecs from pathlib import Path import mlx.core as mx import requests from PIL import Image from transformers import AutoProcessor from llava import LlavaModel def parse_arguments(): parser = argparse.ArgumentParser( description="Generate text from an image using a model." ) parser.add_argument( "--model", type=str, default="llava-hf/llava-1.5-7b-hf", help="The path to the local model directory or Hugging Face repo.", ) parser.add_argument( "--image", type=str, default="http://images.cocodataset.org/val2017/000000039769.jpg", help="URL or path of the image to process.", ) parser.add_argument( "--prompt", type=str, default="USER: <image>\nWhat are these?\nASSISTANT:", help="Message to be processed by the model.", ) parser.add_argument( "--max-tokens", type=int, default=100, help="Maximum number of tokens to generate.", ) parser.add_argument( "--temp", type=float, default=0.3, help="Temperature for sampling." ) return parser.parse_args()
null
17,876
import argparse import codecs from pathlib import Path import mlx.core as mx import requests from PIL import Image from transformers import AutoProcessor from llava import LlavaModel def load_image(image_source): """ Helper function to load an image from either a URL or file. """ if image_source.startswith(("http://", "https://")): try: response = requests.get(image_source, stream=True) response.raise_for_status() return Image.open(response.raw) except Exception as e: raise ValueError( f"Failed to load image from URL: {image_source} with error {e}" ) elif Path(image_source).is_file(): try: return Image.open(image_source) except IOError as e: raise ValueError(f"Failed to load image {image_source} with error: {e}") else: raise ValueError( f"The image {image_source} must be a valid URL or existing file." ) def prepare_inputs(processor, image, prompt): if isinstance(image, str): image = load_image(image) inputs = processor(prompt, image, return_tensors="np") pixel_values = mx.array(inputs["pixel_values"]) input_ids = mx.array(inputs["input_ids"]) return input_ids, pixel_values
null
17,877
import argparse import codecs from pathlib import Path import mlx.core as mx import requests from PIL import Image from transformers import AutoProcessor from llava import LlavaModel def load_model(model_path): processor = AutoProcessor.from_pretrained(model_path) model = LlavaModel.from_pretrained(model_path) return processor, model
null
17,878
import argparse import codecs from pathlib import Path import mlx.core as mx import requests from PIL import Image from transformers import AutoProcessor from llava import LlavaModel def sample(logits, temperature=0.0): if temperature == 0: return mx.argmax(logits, axis=-1) else: return mx.random.categorical(logits * (1 / temperature)) def generate_text(input_ids, pixel_values, model, processor, max_tokens, temperature): logits, cache = model(input_ids, pixel_values) logits = logits[:, -1, :] y = sample(logits, temperature=temperature) tokens = [y.item()] for n in range(max_tokens - 1): logits, cache = model.language_model(y[None], cache=cache) logits = logits[:, -1, :] y = sample(logits, temperature) token = y.item() if token == processor.tokenizer.eos_token_id: break tokens.append(token) return processor.tokenizer.decode(tokens)
null
17,879
import time from argparse import ArgumentParser from functools import partial import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim from datasets import download_cora, load_data, train_val_test_mask from mlx.nn.losses import cross_entropy from mlx.utils import tree_flatten from gcn import GCN def eval_fn(x, y): return mx.mean(mx.argmax(x, axis=1) == y)
null
17,880
import time from argparse import ArgumentParser from functools import partial import mlx.core as mx import mlx.nn as nn import mlx.optimizers as optim from datasets import download_cora, load_data, train_val_test_mask from mlx.nn.losses import cross_entropy from mlx.utils import tree_flatten from gcn import GCN def loss_fn(y_hat, y, weight_decay=0.0, parameters=None): l = mx.mean(nn.losses.cross_entropy(y_hat, y)) if weight_decay != 0.0: assert parameters != None, "Model parameters missing for L2 reg." l2_reg = sum(mx.sum(p[1] ** 2) for p in tree_flatten(parameters)).sqrt() return l + weight_decay * l2_reg return l def forward_fn(gcn, x, adj, y, train_mask, weight_decay): y_hat = gcn(x, adj) loss = loss_fn(y_hat[train_mask], y[train_mask], weight_decay, gcn.parameters()) return loss, y_hat
null
17,881
import os import tarfile import mlx.core as mx import numpy as np import requests import scipy.sparse as sparse The provided code snippet includes necessary dependencies for implementing the `train_val_test_mask` function. Write a Python function `def train_val_test_mask()` to solve the following problem: Splits the loaded dataset into train/validation/test sets. Here is the function: def train_val_test_mask(): """Splits the loaded dataset into train/validation/test sets.""" train_set = mx.arange(140) validation_set = mx.arange(200, 500) test_set = mx.arange(500, 1500) return train_set, validation_set, test_set
Splits the loaded dataset into train/validation/test sets.
17,882
import os import tarfile import mlx.core as mx import numpy as np import requests import scipy.sparse as sparse def download_cora(): """Downloads the cora dataset into a local cora folder.""" url = "https://linqs-data.soe.ucsc.edu/public/lbc/cora.tgz" extract_to = "." if os.path.exists(os.path.join(extract_to, "cora")): return response = requests.get(url, stream=True) if response.status_code == 200: file_path = os.path.join(extract_to, url.split("/")[-1]) # Write the file to local disk with open(file_path, "wb") as file: file.write(response.raw.read()) # Extract the .tgz file with tarfile.open(file_path, "r:gz") as tar: tar.extractall(path=extract_to) print(f"Cora dataset extracted to {extract_to}") os.remove(file_path) def enumerate_labels(labels): """Converts the labels from the original string form to the integer [0:MaxLabels-1] """ label_map = {v: e for e, v in enumerate(set(labels))} labels = np.array([label_map[label] for label in labels]) return labels def normalize_adjacency(adj): """Normalizes the adjacency matrix according to the paper by Kipf et al. https://arxiv.org/abs/1609.02907 """ adj = adj + sparse.eye(adj.shape[0]) node_degrees = np.array(adj.sum(1)) node_degrees = np.power(node_degrees, -0.5).flatten() node_degrees[np.isinf(node_degrees)] = 0.0 node_degrees[np.isnan(node_degrees)] = 0.0 degree_matrix = sparse.diags(node_degrees, dtype=np.float32) adj = degree_matrix @ adj @ degree_matrix return adj The provided code snippet includes necessary dependencies for implementing the `load_data` function. Write a Python function `def load_data(config)` to solve the following problem: Loads the Cora graph data into MLX array format. Here is the function: def load_data(config): """Loads the Cora graph data into MLX array format.""" print("Loading Cora dataset...") # Download dataset files download_cora() # Graph nodes raw_nodes_data = np.genfromtxt(config.nodes_path, dtype="str") raw_node_ids = raw_nodes_data[:, 0].astype( "int32" ) # unique identifier of each node raw_node_labels = raw_nodes_data[:, -1] labels_enumerated = enumerate_labels(raw_node_labels) # target labels as integers node_features = sparse.csr_matrix(raw_nodes_data[:, 1:-1], dtype="float32") # Edges ids_ordered = {raw_id: order for order, raw_id in enumerate(raw_node_ids)} raw_edges_data = np.genfromtxt(config.edges_path, dtype="int32") edges_ordered = np.array( list(map(ids_ordered.get, raw_edges_data.flatten())), dtype="int32" ).reshape(raw_edges_data.shape) # Adjacency matrix adj = sparse.coo_matrix( (np.ones(edges_ordered.shape[0]), (edges_ordered[:, 0], edges_ordered[:, 1])), shape=(labels_enumerated.shape[0], labels_enumerated.shape[0]), dtype=np.float32, ) # Make the adjacency matrix symmetric adj = adj + adj.T.multiply(adj.T > adj) adj = normalize_adjacency(adj) # Convert to mlx array features = mx.array(node_features.toarray(), mx.float32) labels = mx.array(labels_enumerated, mx.int32) adj = mx.array(adj.toarray()) print("Dataset loaded.") return features, labels, adj
Loads the Cora graph data into MLX array format.
17,883
import hashlib from django.conf import settings def is_request_from_worker(request): auth_header = request.META.get('HTTP_X_AUTH_TOKEN') if auth_header is None: return False if settings.DEBUG: return True hashed_token = hashlib.sha256(auth_header.encode()).hexdigest() return hashed_token == settings.WORKER_AUTH_TOKEN_HASH
null
17,884
import json import os import secrets from pathlib import Path def show_toolbar(request): return True
null
17,885
import hashlib import itertools import uuid from collections import OrderedDict from datetime import timedelta from django.conf import settings from django.core.cache import cache from django.db import models from django.db.models.signals import post_save from django.db.models.constraints import UniqueConstraint, CheckConstraint from django.dispatch import receiver from django.forms import model_to_dict from django.utils import timezone from django.utils.translation import gettext_lazy as _ def binary_upload_path(instance, filename): return f"{settings.UPLOAD_COMPILED_PATH}/{instance.hash}"
null
17,886
import hashlib import itertools import uuid from collections import OrderedDict from datetime import timedelta from django.conf import settings from django.core.cache import cache from django.db import models from django.db.models.signals import post_save from django.db.models.constraints import UniqueConstraint, CheckConstraint from django.dispatch import receiver from django.forms import model_to_dict from django.utils import timezone from django.utils.translation import gettext_lazy as _ def decompilation_upload_path(instance, filename): ctx = hashlib.sha256() for data in instance.decompiled_file.chunks(1024): ctx.update(data) return f"{settings.UPLOAD_DECOMPILED_PATH}/{ctx.hexdigest()}"
null
17,887
import hashlib import itertools import uuid from collections import OrderedDict from datetime import timedelta from django.conf import settings from django.core.cache import cache from django.db import models from django.db.models.signals import post_save from django.db.models.constraints import UniqueConstraint, CheckConstraint from django.dispatch import receiver from django.forms import model_to_dict from django.utils import timezone from django.utils.translation import gettext_lazy as _ class Binary(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) file = models.FileField(upload_to=binary_upload_path, max_length=255) created = models.DateTimeField('Compile Date', default=timezone.now, editable=False) hash = models.CharField(max_length=128, editable=False, unique=True, blank=False, null=False) featured = models.BooleanField(default=False) featured_name = models.TextField(max_length=128, null=True) class Meta: verbose_name_plural = "binaries" def __str__(self): return f'Binary: {self.hash}' class Decompiler(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) name = models.CharField(max_length=255) version = models.CharField('Version Major.minor.patch', max_length=255) revision = models.CharField('Specific revision label', max_length=255, blank=True) url = models.URLField(max_length=255) last_health_check = models.DateTimeField(default=timezone.now, editable=False) featured = models.BooleanField('Featured on homepage', default=False) created = models.DateTimeField(default=timezone.now, editable=False) class Meta: constraints = [ UniqueConstraint(fields=['name', 'version', 'revision', 'url'], name='unique_decompiler_info') ] def __str__(self): if len(self.revision) > 0: return f'Decompiler: {self.name} {self.version} {self.revision[:8]}' else: return f'Decompiler: {self.name} {self.version}' def __lt__(self, other): if not isinstance(other, (Decompiler,)): return False this_version = list(itertools.chain(*[v.split('-') for v in self.version.split('.')])) other_version = list(itertools.chain(*[v.split('-') for v in other.version.split('.')])) for i in range(min(len(this_version), len(other_version))): try: if int(this_version[i]) < int(other_version[i]): return True elif int(this_version[i]) > int(other_version[i]): return False except ValueError: if this_version[i] < other_version[i]: return True elif this_version[i] > other_version[i]: return False if len(this_version) < len(other_version): return True if len(this_version) > len(other_version): return False if self.last_health_check < other.last_health_check: return True if self.last_health_check > other.last_health_check: return False return False def healthy_latest_versions(cls): latest_versions = {} for decompiler in Decompiler.objects.filter(last_health_check__gte=timezone.now() - HEALTHY_CUTOFF): if decompiler.name not in latest_versions or latest_versions[decompiler.name] < decompiler: latest_versions[decompiler.name] = decompiler return latest_versions def healthy(self): return self.last_health_check >= (timezone.now() - HEALTHY_CUTOFF) class DecompilationRequest(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) binary = models.ForeignKey(Binary, related_name='decompilation_requests', on_delete=models.CASCADE) decompiler = models.ForeignKey(Decompiler, related_name='decompilation_requests', on_delete=models.CASCADE, null=True, editable=False) created = models.DateTimeField(default=timezone.now, editable=False) last_attempted = models.DateTimeField(default='0001-01-01 00:00:00', editable=False) def __str__(self): return f'<Decompilation Request: {self.id}>' class Meta: constraints = [ UniqueConstraint(fields=['binary', 'decompiler'], name='unique_binary_decompiler') ] ordering = ['created'] def get_queue(): queue_info = cache.get('request_queue_info') if queue_info is None: unfulfilled = DecompilationRequest.objects.filter( decompiler__last_health_check__gte=timezone.now() - HEALTHY_CUTOFF, ) queue = OrderedDict() for d in sorted(Decompiler.healthy_latest_versions().values(), key=lambda d: d.id): decompiler_queue = unfulfilled.filter(decompiler__id=d.id) oldest_unfinished = decompiler_queue.first() if oldest_unfinished is not None: oldest_unfinished = oldest_unfinished.created queue[str(d.id)] = { 'decompiler': model_to_dict(d), 'oldest_unfinished': oldest_unfinished, 'queue_length': decompiler_queue.count() } oldest_unfinished = unfulfilled.first() if oldest_unfinished is not None: oldest_unfinished = oldest_unfinished.created general_queue = { 'oldest_unfinished': oldest_unfinished, 'queue_length': unfulfilled.count() } queue_info = { 'general': general_queue, 'per_decompiler': queue, } cache.set('request_queue_info', queue_info, timeout=5) return queue_info class Decompilation(models.Model): id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False) binary = models.ForeignKey(Binary, related_name='decompilations', on_delete=models.CASCADE, editable=False) #TODO: rename to contents decompiled_file = models.FileField(upload_to=decompilation_upload_path, max_length=255, null=True) decompiler = models.ForeignKey(Decompiler, related_name='decompilations', null=True, on_delete=models.SET_NULL, editable=False) error = models.TextField('Error Message', null=True) created = models.DateTimeField('Decompile Date', default=timezone.now, editable=False) analysis_time = models.FloatField(default=0) def __str__(self): return f'<Decompilation: {self.id}>' class Meta: constraints = [ UniqueConstraint(fields=['binary', 'decompiler'], name='unique_binary_decompilation'), CheckConstraint(check=( models.Q(decompiled_file='', error__isnull=False) | (~models.Q(decompiled_file='') & models.Q(error__isnull=True)) ), name='decompiled_file_or_error' ) ] def succeeded(self) -> bool: return not self.failed def failed(self) -> bool: return self.error is not None or self.decompiled_file is None def rerun_binary_decompilation(binary: Binary, decompiler: Decompiler): # Delete any pending requests for the binary+decompiler and add one to the queue try: existing_req = binary.decompilation_requests.get(decompiler=decompiler) existing_req.delete() except DecompilationRequest.DoesNotExist: pass try: existing_decomp = binary.decompilations.get(decompiler=decompiler) existing_decomp.delete() except Decompilation.DoesNotExist: pass DecompilationRequest.objects.create(binary=binary, decompiler=decompiler)
null
17,888
from django.db import migrations def populate_completed(apps, schema_editor): DecompilationRequest = apps.get_model('explorer', 'decompilationrequest') DecompilationRequest.objects.exclude(decompilation=None).update(completed=True)
null
17,889
import os import shutil import subprocess import sys import tempfile from pathlib import Path DEWOLF_INSTALL = Path(os.getenv("DEWOLF_INSTALL_PATH", "/home/decompiler_user/dewolf")) def version(): p = subprocess.check_output(['git', 'describe', '--tags', '--abbrev=0', 'HEAD'], cwd=str(DEWOLF_INSTALL)) ver = p.strip().decode() if ver[0] == 'v': ver = ver[1:] print(ver) p = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=str(DEWOLF_INSTALL)) hash = p.strip().decode() print(hash)
null
17,890
import os import shutil import subprocess import sys import tempfile from pathlib import Path RETDEC_DECOMPILER = RETDEC_INSTALL / 'retdec-decompiler' def version(): proc = subprocess.run([RETDEC_DECOMPILER, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # RetDec version : v4.0-415-g05c9b113 # Commit hash : 05c9b11351d3e82012d823fa3709f940033768cf # Build date : 2022-04-13T20:37:02Z output = proc.stdout.decode() lines = output.split('\n') version_lines = [l for l in lines if l.startswith('RetDec version : ')] commit_lines = [l for l in lines if l.startswith('Commit hash : ')] assert len(version_lines) == 1 assert len(commit_lines) == 1 version = version_lines[0][18:] assert version[0] == 'v' version = version[1:] # strip second hyphen and beyond cause we don't care version = '-'.join(version.split('-')[:2]) revision = commit_lines[0][18:26] # 8 chars is enough print(version) print(revision)
null
17,891
import argparse import gzip import shlex import signal from dataclasses import dataclass, asdict import logging import os import resource import subprocess import sys import threading import time import traceback import requests def set_limits(soft_mem, hard_mem): resource.setrlimit(resource.RLIMIT_AS, (soft_mem, hard_mem))
null
17,892
import os import shutil import subprocess import sys import tempfile from pathlib import Path SNOWMAN_NOCODE = SNOWMAN_INSTALL / 'nocode' def version(): proc = subprocess.run([SNOWMAN_NOCODE, '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Version: v0.1.3-13-g6fed71c output = proc.stdout.decode() lines = output.split('\n') version_lines = [l for l in lines if l.startswith('Version: ')] assert len(version_lines) == 1 revision = version_lines[0][-8:] version = version_lines[0][10:-9] print(version) print(revision)
null
17,893
import os import shutil import subprocess import sys import tempfile from pathlib import Path REKO_DECOMPILE = REKO_INSTALL / 'reko' def version(): proc = subprocess.run([REKO_DECOMPILE, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Reko decompiler version 0.11.5.0 (git:36c3481) output = proc.stdout.decode().strip() assert 'Reko decompiler version ' in output version = output.split(' ')[3] revision = output.split(' ')[4] assert '(git:' in revision revision = revision[5:-1] print(version) print(revision)
null
17,894
import sys import tempfile from typing import List import angr from angr.analyses import CFGFast, Decompiler from angr.knowledge_plugins import Function import warnings def decompile(): conts = sys.stdin.buffer.read() t = tempfile.NamedTemporaryFile() t.write(conts) t.flush() p = angr.Project(t.name, auto_load_libs=False, load_debug_info=False) cfg: CFGFast = p.analyses.CFGFast( normalize=True, resolve_indirect_jumps=True, data_references=True, ) p.analyses.CompleteCallingConventions( cfg=cfg.model, recover_variables=True, analyze_callsites=True ) funcs_to_decompile: List[Function] = [ func for func in cfg.functions.values() if not func.is_plt and not func.is_simprocedure and not func.alignment ] for func in funcs_to_decompile: try: decompiler: Decompiler = p.analyses.Decompiler(func, cfg=cfg.model) if decompiler.codegen is None: print(f"// No decompilation output for function {func.name}\n") continue print(decompiler.codegen.text) except Exception as e: print(f"Exception thrown decompiling function {func.name}: {e}")
null
17,895
import re import os import subprocess import sys from pathlib import Path def relyze_cli_run(params): def version(): success, ver = relyze_cli_run(['/version']) if not success: return 1 match = re.findall(r'\s(\d+\.\d+\.\d+)\s', ver) if len(match) == 0: return 1 print(match[0]) print() return 0
null
17,896
import os import shutil import subprocess import sys import tempfile from pathlib import Path BOOMERANG_CLI = BOOMERANG_INSTALL / 'boomerang-cli' def version(): proc = subprocess.run([BOOMERANG_CLI, '--version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) # boomerang-cli v0.5.2 output = proc.stdout.decode() assert output.startswith('boomerang-cli ') version = output.split(' ')[1] assert version.startswith('v') version = version[1:] print(version) print()
null
17,897
import os import shutil import subprocess import sys import tempfile from pathlib import Path RECSTUDIO_CLI = RECSTUDIO_INSTALL / 'RecCLI' def version(): with open(RECSTUDIO_CLI, 'rb') as f: # <h3>Welcome to RecStudio 4.1</h3> conts = f.read() assert b'<h3>Welcome to RecStudio ' in conts start = conts.find(b'<h3>Welcome to RecStudio ') + len(b'<h3>Welcome to RecStudio ') end = conts.find(b'</h3>', start) version = conts[start:end].decode() print(version) print()
null
17,898
import os import shutil import subprocess import sys import tempfile from pathlib import Path IDA_IDAT = IDA_INSTALL / 'idat' IDA_VERSION_PY = IDA_INSTALL / 'version.py' def version(): logpath = Path(os.getcwd()) / 'ida.log' try: # TODO: Is there a way to do this without creating an idb? with tempfile.TemporaryDirectory() as tmp: dummy_path = Path(tmp) / 'dummy' with open(dummy_path, 'wb') as dummy_file: dummy_file.write(b'\x00' * 256) subprocess.run([str(IDA_IDAT), '-A', '-a', f'-S{IDA_VERSION_PY}', f'-L{logpath}', str(dummy_path)]) version = open(dummy_path.parent / 'version.txt').read().strip() except Exception as e: with open(logpath, 'r') as f: print(f.read(), file=sys.stderr) raise e print(version) print() # Not given
null
17,899
from __future__ import print_function import ida_ida import ida_auto import ida_loader import ida_hexrays import ida_idp import ida_entry import idautils import os.path def init_hexrays(): ALL_DECOMPILERS = { ida_idp.PLFM_386: "hexrays", ida_idp.PLFM_ARM: "hexarm", ida_idp.PLFM_PPC: "hexppc", ida_idp.PLFM_MIPS: "hexmips", } cpu = ida_idp.ph.id decompiler = ALL_DECOMPILERS.get(cpu, None) if not decompiler: print("No known decompilers for architecture with ID: %d" % ida_idp.ph.id) return False if ida_ida.inf_is_64bit(): if cpu == ida_idp.PLFM_386: decompiler = "hexx64" else: decompiler += "64" if ida_loader.load_plugin(decompiler) and ida_hexrays.init_hexrays_plugin(): return True else: print('Couldn\'t load or initialize decompiler: "%s"' % decompiler) return False
null
17,900
import os import sys import shutil import argparse import subprocess def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs)
null
17,901
import os import sys import shutil import argparse import subprocess def delete_files(path, exts): for ext in exts: tmpfile = path + ext if os.path.exists(tmpfile): os.unlink(tmpfile)
null
17,902
import os import sys import shutil import argparse import subprocess platforms_32 = [HEX_X86, HEX_ARM, HEX_PPC, HEX_MIPS ] platforms_64 = [HEX_X64, HEX_ARM64, HEX_PPC64, HEX_MIPS64] def get_bitness(efd, path): # check if the input file is decompilable, and its bitness p = subprocess.run([efd, '-z', path]) exit_code = p.returncode if exit_code >= 64 and exit_code != 255: exit_code -= 64 for plfm in platforms_64: if exit_code & plfm: return 64 for plfm in platforms_32: if exit_code & plfm: return 32 return 0
null
17,903
import argparse import os import secrets import subprocess import sys from pathlib import Path DATA_DIR = BASE_DIR / 'db_data' MEDIA_DIR = BASE_DIR / 'media' STATICFILES_DIR = BASE_DIR / 'staticfiles' def _generate_secrets(force=False): if not SECRETS_DIR.exists(): SECRETS_DIR.mkdir() for secret_name in REQUIRED_SECRETS: secret_path = SECRETS_DIR / secret_name if secret_path.exists() and not force: print(f"Secret {secret_name} already exists, skipping...") continue print(f"Generating secret {secret_name}...") secret_path.touch(mode=0o600) secret_path.write_text(secrets.token_hex(32)) def init_server(args): if not DATA_DIR.exists(): DATA_DIR.mkdir() if not MEDIA_DIR.exists(): MEDIA_DIR.mkdir() if not STATICFILES_DIR.exists(): STATICFILES_DIR.mkdir() _generate_secrets(args.force)
null
17,904
import argparse import os import secrets import subprocess import sys from pathlib import Path BASE_COMPOSE_FILE = BASE_DIR / 'docker-compose.yml' PROD_COMPOSE_FILE = BASE_DIR / 'docker-compose.prod.yml' DEV_COMPOSE_FILE = BASE_DIR / 'docker-compose.dev.yml' DECOMPILERS = [ ('angr', 'angr'), ('boomerang', 'Boomerang'), ('ghidra', 'Ghidra'), ('recstudio', 'REC Studio'), ('reko', 'Reko'), ('retdec', 'RetDec'), ('snowman', 'Snowman') ] DECOMPILERS.sort(key=lambda d: d[0]) def build_server(args): config_files = f'-f {BASE_COMPOSE_FILE}' if args.prod: config_files += f' -f {PROD_COMPOSE_FILE}' else: config_files += f' -f {DEV_COMPOSE_FILE}' services = [ 'traefik', 'database', 'explorer' ] for d in DECOMPILERS: if getattr(args, d[0]): services.append(d[0]) cmd = f"docker-compose {config_files} build" subprocess.run(cmd.split(' ') + services, check=True)
null
17,905
import argparse import os import secrets import subprocess import sys from pathlib import Path BASE_COMPOSE_FILE = BASE_DIR / 'docker-compose.yml' PROD_COMPOSE_FILE = BASE_DIR / 'docker-compose.prod.yml' DEV_COMPOSE_FILE = BASE_DIR / 'docker-compose.dev.yml' S3_COMPOSE_FILE = BASE_DIR / 'docker-compose.s3.yml' def start_server(args): config_files = f'-c {BASE_COMPOSE_FILE}' if args.prod: config_files += f' -c {PROD_COMPOSE_FILE}' else: config_files += f' -c {DEV_COMPOSE_FILE}' env = os.environ.copy() env.update({ 'LETSENCRYPT_ACME_EMAIL': args.acme_email, 'DOMAIN': args.domain, 'REPLICAS': str(args.replicas), 'IMAGE_NAME': os.environ.get('IMAGE_NAME', 'decompiler_explorer') }) if 'DECOMPILER_TIMEOUT' in os.environ: env['DECOMPILER_TIMEOUT'] = os.environ['DECOMPILER_TIMEOUT'] elif args.timeout is not None: env['DECOMPILER_TIMEOUT'] = args.timeout if args.s3: config_files += f' -c {S3_COMPOSE_FILE}' env["AWS_STORAGE_BUCKET_NAME"] = args.s3_bucket if args.debug: env['DEBUG'] = '1' cmd = f"docker stack deploy {config_files} --with-registry-auth --prune dogbolt" subprocess.run(cmd.split(' '), env=env, check=True)
null
17,906
import argparse import os import secrets import subprocess import sys from pathlib import Path def stop_server(): cmd = f"docker stack rm dogbolt" subprocess.run(cmd.split(' '), check=True)
null
17,907
import datasets def simple_accuracy(preds, labels): return (preds == labels).mean()
null
17,908
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets def compute_exact(a_gold, a_pred): def compute_em(predictions, references): scores = [any(compute_exact(ref, pred) for ref in refs) for pred, refs in zip(predictions, references)] return (sum(scores) / len(scores)) * 100
null
17,909
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets def SARIsent(ssent, csent, rsents): numref = len(rsents) s1grams = ssent.split(" ") c1grams = csent.split(" ") s2grams = [] c2grams = [] s3grams = [] c3grams = [] s4grams = [] c4grams = [] r1gramslist = [] r2gramslist = [] r3gramslist = [] r4gramslist = [] for rsent in rsents: r1grams = rsent.split(" ") r2grams = [] r3grams = [] r4grams = [] r1gramslist.append(r1grams) for i in range(0, len(r1grams) - 1): if i < len(r1grams) - 1: r2gram = r1grams[i] + " " + r1grams[i + 1] r2grams.append(r2gram) if i < len(r1grams) - 2: r3gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2] r3grams.append(r3gram) if i < len(r1grams) - 3: r4gram = r1grams[i] + " " + r1grams[i + 1] + " " + r1grams[i + 2] + " " + r1grams[i + 3] r4grams.append(r4gram) r2gramslist.append(r2grams) r3gramslist.append(r3grams) r4gramslist.append(r4grams) for i in range(0, len(s1grams) - 1): if i < len(s1grams) - 1: s2gram = s1grams[i] + " " + s1grams[i + 1] s2grams.append(s2gram) if i < len(s1grams) - 2: s3gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2] s3grams.append(s3gram) if i < len(s1grams) - 3: s4gram = s1grams[i] + " " + s1grams[i + 1] + " " + s1grams[i + 2] + " " + s1grams[i + 3] s4grams.append(s4gram) for i in range(0, len(c1grams) - 1): if i < len(c1grams) - 1: c2gram = c1grams[i] + " " + c1grams[i + 1] c2grams.append(c2gram) if i < len(c1grams) - 2: c3gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2] c3grams.append(c3gram) if i < len(c1grams) - 3: c4gram = c1grams[i] + " " + c1grams[i + 1] + " " + c1grams[i + 2] + " " + c1grams[i + 3] c4grams.append(c4gram) (keep1score, del1score, add1score) = SARIngram(s1grams, c1grams, r1gramslist, numref) (keep2score, del2score, add2score) = SARIngram(s2grams, c2grams, r2gramslist, numref) (keep3score, del3score, add3score) = SARIngram(s3grams, c3grams, r3gramslist, numref) (keep4score, del4score, add4score) = SARIngram(s4grams, c4grams, r4gramslist, numref) avgkeepscore = sum([keep1score, keep2score, keep3score, keep4score]) / 4 avgdelscore = sum([del1score, del2score, del3score, del4score]) / 4 avgaddscore = sum([add1score, add2score, add3score, add4score]) / 4 finalscore = (avgkeepscore + avgdelscore + avgaddscore) / 3 return finalscore def normalize(sentence, lowercase: bool = True, tokenizer: str = "13a", return_str: bool = True): # Normalization is requried for the ASSET dataset (one of the primary # datasets in sentence simplification) to allow using space # to split the sentence. Even though Wiki-Auto and TURK datasets, # do not require normalization, we do it for consistency. # Code adapted from the EASSE library [1] written by the authors of the ASSET dataset. # [1] https://github.com/feralvam/easse/blob/580bba7e1378fc8289c663f864e0487188fe8067/easse/utils/preprocessing.py#L7 if lowercase: sentence = sentence.lower() if tokenizer in ["13a", "intl"]: if version.parse(sacrebleu.__version__).major >= 2: normalized_sent = sacrebleu.metrics.bleu._get_tokenizer(tokenizer)()(sentence) else: normalized_sent = sacrebleu.TOKENIZERS[tokenizer]()(sentence) elif tokenizer == "moses": normalized_sent = sacremoses.MosesTokenizer().tokenize(sentence, return_str=True, escape=False) elif tokenizer == "penn": normalized_sent = sacremoses.MosesTokenizer().penn_tokenize(sentence, return_str=True) else: normalized_sent = sentence if not return_str: normalized_sent = normalized_sent.split() return normalized_sent def compute_sari(sources, predictions, references): if not (len(sources) == len(predictions) == len(references)): raise ValueError("Sources length must match predictions and references lengths.") sari_score = 0 for src, pred, refs in zip(sources, predictions, references): sari_score += SARIsent(normalize(src), normalize(pred), [normalize(sent) for sent in refs]) sari_score = sari_score / len(predictions) return 100 * sari_score
null
17,910
import re import string from collections import Counter import sacrebleu import sacremoses from packaging import version import datasets def compute_sacrebleu( predictions, references, smooth_method="exp", smooth_value=None, force=False, lowercase=False, use_effective_order=False, ): references_per_prediction = len(references[0]) if any(len(refs) != references_per_prediction for refs in references): raise ValueError("Sacrebleu requires the same number of references for each prediction") transformed_references = [[refs[i] for refs in references] for i in range(references_per_prediction)] output = sacrebleu.corpus_bleu( predictions, transformed_references, smooth_method=smooth_method, smooth_value=smooth_value, force=force, lowercase=lowercase, use_effective_order=use_effective_order, ) return output.score
null
17,911
import coval from coval.conll import reader, util from coval.eval import evaluator import datasets logger = datasets.logging.get_logger(__name__) def get_coref_infos( key_lines, sys_lines, NP_only=False, remove_nested=False, keep_singletons=True, min_span=False, doc="dummy_doc" ): key_doc_lines = {doc: key_lines} sys_doc_lines = {doc: sys_lines} doc_coref_infos = {} key_nested_coref_num = 0 sys_nested_coref_num = 0 key_removed_nested_clusters = 0 sys_removed_nested_clusters = 0 key_singletons_num = 0 sys_singletons_num = 0 key_clusters, singletons_num = reader.get_doc_mentions(doc, key_doc_lines[doc], keep_singletons) key_singletons_num += singletons_num if NP_only or min_span: key_clusters = reader.set_annotated_parse_trees(key_clusters, key_doc_lines[doc], NP_only, min_span) sys_clusters, singletons_num = reader.get_doc_mentions(doc, sys_doc_lines[doc], keep_singletons) sys_singletons_num += singletons_num if NP_only or min_span: sys_clusters = reader.set_annotated_parse_trees(sys_clusters, key_doc_lines[doc], NP_only, min_span) if remove_nested: nested_mentions, removed_clusters = reader.remove_nested_coref_mentions(key_clusters, keep_singletons) key_nested_coref_num += nested_mentions key_removed_nested_clusters += removed_clusters nested_mentions, removed_clusters = reader.remove_nested_coref_mentions(sys_clusters, keep_singletons) sys_nested_coref_num += nested_mentions sys_removed_nested_clusters += removed_clusters sys_mention_key_cluster = reader.get_mention_assignments(sys_clusters, key_clusters) key_mention_sys_cluster = reader.get_mention_assignments(key_clusters, sys_clusters) doc_coref_infos[doc] = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster) if remove_nested: logger.info( "Number of removed nested coreferring mentions in the key " f"annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}" ) logger.info( "Number of resulting singleton clusters in the key " f"annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}" ) if not keep_singletons: logger.info( f"{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system " "files, respectively" ) return doc_coref_infos def evaluate(key_lines, sys_lines, metrics, NP_only, remove_nested, keep_singletons, min_span): doc_coref_infos = get_coref_infos(key_lines, sys_lines, NP_only, remove_nested, keep_singletons, min_span) output_scores = {} conll = 0 conll_subparts_num = 0 for name, metric in metrics: recall, precision, f1 = evaluator.evaluate_documents(doc_coref_infos, metric, beta=1) if name in ["muc", "bcub", "ceafe"]: conll += f1 conll_subparts_num += 1 output_scores.update({f"{name}/recall": recall, f"{name}/precision": precision, f"{name}/f1": f1}) logger.info( name.ljust(10), f"Recall: {recall * 100:.2f}", f" Precision: {precision * 100:.2f}", f" F1: {f1 * 100:.2f}", ) if conll_subparts_num == 3: conll = (conll / 3) * 100 logger.info(f"CoNLL score: {conll:.2f}") output_scores.update({"conll_score": conll}) return output_scores
null
17,912
import coval from coval.conll import reader, util from coval.eval import evaluator import datasets def check_gold_parse_annotation(key_lines): has_gold_parse = False for line in key_lines: if not line.startswith("#"): if len(line.split()) > 6: parse_col = line.split()[5] if not parse_col == "-": has_gold_parse = True break else: break return has_gold_parse
null
17,913
from typing import List from packaging import version from sklearn.metrics import f1_score import datasets from datasets.config import PY_VERSION def simple_accuracy(preds, labels): return float((preds == labels).mean()) def f1_and_simple_accuracy(preds, labels): return { "f1": float(f1_score(y_true=labels, y_pred=preds, average="macro")), "accuracy": simple_accuracy(preds, labels), }
null
17,914
from typing import List from packaging import version from sklearn.metrics import f1_score import datasets from datasets.config import PY_VERSION def bleu( preds, labels, smooth_method="exp", smooth_value=None, force=False, lowercase=False, tokenize=None, use_effective_order=False, ): # xtreme-s can only have one label labels = [[label] for label in labels] preds = list(preds) try: import sacrebleu as scb except ImportError: raise ValueError( "sacrebleu has to be installed in order to apply the bleu metric for covost2." "You can install it via `pip install sacrebleu`." ) if version.parse(scb.__version__) < version.parse("1.4.12"): raise ImportWarning( "To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n" 'You can install it with `pip install "sacrebleu>=1.4.12"`.' ) references_per_prediction = len(labels[0]) if any(len(refs) != references_per_prediction for refs in labels): raise ValueError("Sacrebleu requires the same number of references for each prediction") transformed_references = [[refs[i] for refs in labels] for i in range(references_per_prediction)] output = scb.corpus_bleu( preds, transformed_references, smooth_method=smooth_method, smooth_value=smooth_value, force=force, lowercase=lowercase, use_effective_order=use_effective_order, **({"tokenize": tokenize} if tokenize else {}), ) return {"bleu": output.score}
null