repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/candle/candle-pyo3/py_src/candle
hf_public_repos/candle/candle-pyo3/py_src/candle/functional/__init__.py
# Generated content DO NOT EDIT from .. import functional avg_pool2d = functional.avg_pool2d gelu = functional.gelu max_pool2d = functional.max_pool2d relu = functional.relu silu = functional.silu softmax = functional.softmax tanh = functional.tanh
0
hf_public_repos/candle/candle-pyo3
hf_public_repos/candle/candle-pyo3/_additional_typing/__init__.py
from typing import Union, Sequence class Tensor: """ This contains the type hints for the magic methodes of the `candle.Tensor` class. """ def __add__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __radd__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Add a scalar to a tensor or two tensors together. """ pass def __sub__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Subtract a scalar from a tensor or one tensor from another. """ pass def __truediv__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Divide a tensor by a scalar or one tensor by another. """ pass def __mul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __rmul__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Multiply a tensor by a scalar or one tensor by another. """ pass def __richcmp__(self, rhs: Union["Tensor", "Scalar"], op) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __getitem__(self, index: Union["Index", "Tensor", Sequence["Index"]]) -> "Tensor": """ Return a slice of a tensor. """ pass def __eq__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ne__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __lt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __le__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __gt__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass def __ge__(self, rhs: Union["Tensor", "Scalar"]) -> "Tensor": """ Compare a tensor with a scalar or one tensor with another. """ pass
0
hf_public_repos/candle/candle-pyo3
hf_public_repos/candle/candle-pyo3/_additional_typing/README.md
This python module contains external typehinting for certain `candle` classes. This is only necessary for `magic` methodes e.g. `__add__` as their text signature cant be set via pyo3. The classes in this module will be parsed by the `stub.py` script and interleafed with the signatures of the actual pyo3 `candle.candle` module.
0
hf_public_repos/candle/candle-pyo3/tests
hf_public_repos/candle/candle-pyo3/tests/native/test_tensor.py
import candle from candle import Tensor from candle.utils import cuda_is_available from candle.testing import assert_equal import pytest def test_tensor_can_be_constructed(): t = Tensor(42.0) assert t.values() == 42.0 def test_tensor_can_be_constructed_from_list(): t = Tensor([3.0, 1, 4, 1, 5, 9, 2, 6]) assert t.values() == [3.0, 1, 4, 1, 5, 9, 2, 6] def test_tensor_can_be_constructed_from_list_of_lists(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t.values() == [[3.0, 1, 4, 1], [5, 9, 2, 6]] def test_tensor_can_be_quantized(): t = candle.randn((16, 256)) for format in [ "q4_0", "q4_1", "q5_0", "q5_1", "q8_0", "q2k", "q3k", "q4k", "q5k", "q8k", ]: for formatted_format in [format.upper(), format.lower()]: quant_t = t.quantize(formatted_format) assert quant_t.ggml_dtype.lower() == format.lower() assert quant_t.shape == t.shape def test_tensor_can_be_indexed(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t[0].values() == [3.0, 1.0, 4.0, 1.0] assert t[1].values() == [5.0, 9.0, 2.0, 6.0] assert t[-1].values() == [5.0, 9.0, 2.0, 6.0] assert t[-2].values() == [3.0, 1.0, 4.0, 1.0] def test_tensor_can_be_sliced(): t = Tensor([3.0, 1, 4, 10, 5, 9, 2, 6]) assert t[0:4].values() == [3.0, 1.0, 4.0, 10.0] assert t[4:8].values() == [5.0, 9.0, 2.0, 6.0] assert t[-4:].values() == [5.0, 9.0, 2.0, 6.0] assert t[:-4].values() == [3.0, 1.0, 4.0, 10.0] assert t[-4:-2].values() == [5.0, 9.0] assert t[...].values() == t.values() def test_tensor_can_be_sliced_2d(): t = Tensor([[3.0, 1, 4, 1], [5, 9, 2, 6]]) assert t[:, 0].values() == [3.0, 5] assert t[:, 1].values() == [1.0, 9.0] assert t[0, 0].values() == 3.0 assert t[:, -1].values() == [1.0, 6.0] assert t[:, -4].values() == [3.0, 5] assert t[..., 0].values() == [3.0, 5] def test_tensor_can_be_scliced_3d(): t = Tensor([[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]) assert t[:, :, 0].values() == [[1, 5], [9, 13]] assert t[:, :, 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]] assert t[:, 0, 0].values() == [1, 9] assert t[..., 0].values() == [[1, 5], [9, 13]] assert t[..., 0:2].values() == [[[1, 2], [5, 6]], [[9, 10], [13, 14]]] def assert_bool(t: Tensor, expected: bool): assert t.shape == () assert str(t.dtype) == str(candle.u8) assert bool(t.values()) == expected def test_tensor_supports_equality_opperations_with_scalars(): t = Tensor(42.0) assert_bool(t == 42.0, True) assert_bool(t == 43.0, False) assert_bool(t != 42.0, False) assert_bool(t != 43.0, True) assert_bool(t > 41.0, True) assert_bool(t > 42.0, False) assert_bool(t >= 41.0, True) assert_bool(t >= 42.0, True) assert_bool(t < 43.0, True) assert_bool(t < 42.0, False) assert_bool(t <= 43.0, True) assert_bool(t <= 42.0, True) def test_tensor_supports_equality_opperations_with_tensors(): t = Tensor(42.0) same = Tensor(42.0) other = Tensor(43.0) assert_bool(t == same, True) assert_bool(t == other, False) assert_bool(t != same, False) assert_bool(t != other, True) assert_bool(t > same, False) assert_bool(t > other, False) assert_bool(t >= same, True) assert_bool(t >= other, False) assert_bool(t < same, False) assert_bool(t < other, True) assert_bool(t <= same, True) assert_bool(t <= other, True) def test_tensor_equality_opperations_can_broadcast(): # Create a decoder attention mask as a test case # e.g. # [[1,0,0] # [1,1,0] # [1,1,1]] mask_cond = candle.Tensor([0, 1, 2]) mask = mask_cond < (mask_cond + 1).reshape((3, 1)) assert mask.shape == (3, 3) assert_equal(mask, Tensor([[1, 0, 0], [1, 1, 0], [1, 1, 1]]).to_dtype(candle.u8)) def test_tensor_can_be_hashed(): t = Tensor(42.0) other = Tensor(42.0) # Hash should represent a unique tensor assert hash(t) != hash(other) assert hash(t) == hash(t) def test_tensor_can_be_expanded_with_none(): t = candle.rand((12, 12)) b = t[None] assert b.shape == (1, 12, 12) c = t[:, None, None, :] assert c.shape == (12, 1, 1, 12) d = t[None, :, None, :] assert d.shape == (1, 12, 1, 12) e = t[None, None, :, :] assert e.shape == (1, 1, 12, 12) f = t[:, :, None] assert f.shape == (12, 12, 1) def test_tensor_can_be_index_via_tensor(): t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]]) indexed = t[candle.Tensor([0, 2])] assert indexed.shape == (2, 4) assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]] indexed = t[:, candle.Tensor([0, 2])] assert indexed.shape == (3, 2) assert indexed.values() == [[1, 1], [3, 3], [5, 5]] def test_tensor_can_be_index_via_list(): t = candle.Tensor([[1, 2, 1, 2], [3, 4, 3, 4], [5, 6, 5, 6]]) indexed = t[[0, 2]] assert indexed.shape == (2, 4) assert indexed.values() == [[1, 2, 1, 2], [5, 6, 5, 6]] indexed = t[:, [0, 2]] assert indexed.shape == (3, 2) assert indexed.values() == [[1, 1], [3, 3], [5, 5]] def test_tensor_can_be_cast_via_to(): t = Tensor(42.0) assert str(t.dtype) == str(candle.f32) t_new_args = t.to(candle.f64) assert str(t_new_args.dtype) == str(candle.f64) t_new_kwargs = t.to(dtype=candle.f64) assert str(t_new_kwargs.dtype) == str(candle.f64) pytest.raises(TypeError, lambda: t.to("not a dtype")) pytest.raises(TypeError, lambda: t.to(dtype="not a dtype")) pytest.raises(TypeError, lambda: t.to(candle.f64, "not a dtype")) pytest.raises(TypeError, lambda: t.to()) pytest.raises(ValueError, lambda: t.to(candle.f16, dtype=candle.f64)) pytest.raises(ValueError, lambda: t.to(candle.f16, candle.f16)) other = Tensor(42.0).to(candle.f64) t_new_other_args = t.to(other) assert str(t_new_other_args.dtype) == str(candle.f64) t_new_other_kwargs = t.to(other=other) assert str(t_new_other_kwargs.dtype) == str(candle.f64) @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_tensor_can_be_moved_via_to(): t = Tensor(42.0) assert t.device == "cpu" t_new_args = t.to("cuda") assert t_new_args.device == "cuda" t_new_kwargs = t.to(device="cuda") assert t_new_kwargs.device == "cuda" pytest.raises(TypeError, lambda: t.to("not a device")) pytest.raises(TypeError, lambda: t.to(device="not a device")) pytest.raises(TypeError, lambda: t.to("cuda", "not a device")) pytest.raises(TypeError, lambda: t.to()) pytest.raises(ValueError, lambda: t.to("cuda", device="cpu")) pytest.raises(ValueError, lambda: t.to("cuda", "cuda")) other = Tensor(42.0).to("cuda") t_new_other_args = t.to(other) assert t_new_other_args.device == "cuda" t_new_other_kwargs = t.to(other=other) assert t_new_other_kwargs.device == "cuda" @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_tensor_can_be_moved_and_cast_via_to(): t = Tensor(42.0) assert t.device == "cpu" assert str(t.dtype) == str(candle.f32) t_new_args = t.to("cuda", candle.f64) assert t_new_args.device == "cuda" assert str(t_new_args.dtype) == str(candle.f64) t_new_kwargs = t.to(device="cuda", dtype=candle.f64) assert t_new_kwargs.device == "cuda" assert str(t_new_kwargs.dtype) == str(candle.f64) other = Tensor(42.0).to("cuda").to(candle.f64) t_new_other_args = t.to(other) assert t_new_other_args.device == "cuda" assert str(t_new_other_args.dtype) == str(candle.f64) t_new_other_kwargs = t.to(other=other) assert t_new_other_kwargs.device == "cuda" assert str(t_new_other_kwargs.dtype) == str(candle.f64) def test_tensor_can_be_added(): t = Tensor(42.0) result = t + t assert result.values() == 84.0 result = t + 2.0 assert result.values() == 44.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_add(b) c = a + b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d + e def test_tensor_can_be_subtracted(): t = Tensor(42.0) result = t - t assert result.values() == 0 result = t - 2.0 assert result.values() == 40.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_sub(b) c = a - b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d - e def test_tensor_can_be_multiplied(): t = Tensor(42.0) result = t * t assert result.values() == 1764.0 result = t * 2.0 assert result.values() == 84.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_mul(b) c = a * b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d * e def test_tensor_can_be_divided(): t = Tensor(42.0) result = t / t assert result.values() == 1.0 result = t / 2.0 assert result.values() == 21.0 a = candle.rand((3, 1, 4)) b = candle.rand((2, 1)) c_native = a.broadcast_div(b) c = a / b assert c.shape == (3, 2, 4) assert c.values() == c_native.values() with pytest.raises(ValueError): d = candle.rand((3, 4, 5)) e = candle.rand((4, 6)) f = d / e
0
hf_public_repos/candle/candle-pyo3/tests
hf_public_repos/candle/candle-pyo3/tests/native/test_utils.py
import candle from candle import Tensor, QTensor from candle.utils import load_safetensors, save_gguf, load_gguf, save_safetensors from pathlib import Path TEST_DIR = Path(__file__).parent.parent / "_workdir" TEST_DIR.mkdir(exist_ok=True) def test_can_roundtrip_safetensors(): tensors = { "a": candle.randn((16, 256)), "b": candle.randn((16, 16)), } file = str(TEST_DIR / "test.safetensors") save_safetensors(file, tensors) loaded_tensors = load_safetensors(file) assert set(tensors.keys()) == set(loaded_tensors.keys()) for key in tensors.keys(): assert tensors[key].values() == loaded_tensors[key].values(), "Values are not equal" assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal" assert str(tensors[key].dtype) == str(loaded_tensors[key].dtype), "Dtypes are not equal" def test_can_roundtrip_gguf(): metadata = { "a": 1, "b": "foo", "c": [1, 2, 3], "d": [[1, 2], [3, 4]], } tensors = { "a": candle.randn((16, 256)).quantize("q4_0"), "b": candle.randn((16, 16)).quantize("f32"), } file = str(TEST_DIR / "test.gguf") save_gguf(file, tensors, metadata) loaded_tensors, loaded_metadata = load_gguf(file) assert set(metadata.keys()) == set(loaded_metadata.keys()) for key in metadata.keys(): assert metadata[key] == loaded_metadata[key] assert set(tensors.keys()) == set(loaded_tensors.keys()) for key in tensors.keys(): assert tensors[key].dequantize().values() == loaded_tensors[key].dequantize().values(), "Values are not equal" assert tensors[key].shape == loaded_tensors[key].shape, "Shapes are not equal" assert str(tensors[key].ggml_dtype) == str(loaded_tensors[key].ggml_dtype), "Dtypes are not equal"
0
hf_public_repos/candle/candle-pyo3/tests
hf_public_repos/candle/candle-pyo3/tests/native/test_shape.py
from candle import Tensor from candle import rand import pytest def test_absolute_shapes_are_valid(): a = rand((10, 20)) assert a.shape == (10, 20) b = rand(10, 20) assert b.shape == (10, 20) pytest.raises(OverflowError, lambda: rand((10, 20, -1))) pytest.raises(OverflowError, lambda: rand(-1, 20)) pytest.raises(TypeError, lambda: rand("foo", True)) def test_relative_shapes_are_valid(): a = rand(10, 20) a = a.reshape((1, -1)) assert a.shape == (1, 200) b = rand(10, 20) b = b.reshape(-1, 1) assert b.shape == (200, 1) c = rand(10, 20) pytest.raises(TypeError, lambda: c.reshape(1, "foo")) pytest.raises(ValueError, lambda: c.reshape(1, -2)) pytest.raises(ValueError, lambda: c.reshape((-2, 1))) pytest.raises(ValueError, lambda: c.reshape((0, 1))) pytest.raises(ValueError, lambda: c.reshape((1, -1, -1)))
0
hf_public_repos/candle/candle-pyo3/tests
hf_public_repos/candle/candle-pyo3/tests/bindings/test_module.py
import candle from candle import Tensor, QTensor from candle.nn import Module, Linear from candle.utils import cuda_is_available import pytest def test_module_can_be_constructed(): class A(Module): pass a = A() assert a is not None assert len(list(a.buffers())) == 0 def test_module_registers_tensors(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) a = A() named_buffers = dict(a.named_buffers()) assert len(named_buffers) == 1 assert "t" in named_buffers def test_module_registers_submodules(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) a = A() named_modules = dict(a.named_modules()) named_buffers = dict(a.named_buffers()) assert len(named_buffers) == 2 assert "linear" in named_modules assert "linear.weight" in named_buffers assert "linear.bias" in named_buffers def test_module_can_dump_statedict(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) self.t = Tensor(42.0) a = A() state_dict = a.state_dict() assert hasattr(state_dict, "_metadata") assert "t" in state_dict assert "linear.weight" in state_dict assert "linear.bias" in state_dict assert len(state_dict) == 3 def test_module_can_load_statedict(): class A(Module): def __init__(self): super().__init__() self.linear = Linear(10, 20) self.t = Tensor(42.0) statedict = { "linear.weight": candle.ones((20, 10)), "linear.bias": candle.zeros((20,)), "t": Tensor(42.0), } a = A() a.load_state_dict(statedict) def test_module_throws_on_shape_missmatch(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) statedict = { "t": candle.ones((20,)), } a = A() with pytest.raises(RuntimeError) as excinfo: a.load_state_dict(statedict) assert "size mismatch" in str(excinfo.value) def test_module_throws_on_missing_key(): class A(Module): def __init__(self): super().__init__() self.t = Tensor(42.0) statedict = { "not_t": Tensor(42.0), } a = A() with pytest.raises(RuntimeError) as excinfo: a.load_state_dict(statedict) assert 'Missing key(s) in state_dict: "t".' in str(excinfo.value) def test_module_can_load_quantized_tensors(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) self._quantizable_buffers.add("t") statedict = { "t": candle.ones((16, 256)).quantize("q4_0"), } a = A() a.load_state_dict(statedict) assert isinstance(a.t, QTensor) assert a.t.ggml_dtype == "Q4_0" def test_module_dequantizes_tensors_automaticaly(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) statedict = { "t": candle.ones((16, 256)).quantize("q4_0"), } a = A() a.load_state_dict(statedict) assert isinstance(a.t, Tensor) @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_module_can_be_moved_to_cuda(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) a = A() a.cuda() assert a.t.device == "cuda" @pytest.mark.skipif(not cuda_is_available(), reason="CUDA is not available") def test_module_can_be_moved_from_cuda_to_cpu(): class A(Module): def __init__(self): super().__init__() self.t = candle.randn((16, 256)) a = A() a.cuda() assert a.t.device == "cuda" a.cpu() assert a.t.device == "cpu"
0
hf_public_repos/candle/candle-pyo3/tests
hf_public_repos/candle/candle-pyo3/tests/bindings/test_linear.py
import candle from candle import Tensor from candle.nn import Linear def test_linear_layer_can_be_constructed(): linear = Linear(10, 10) assert linear is not None def test_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536) def test_quantized_linear_layer_can_forward_a_singular_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((8, 384)) output = linear.forward(input_tensor) assert output.shape == (8, 1536) def test_quantized_linear_layer_can_forward_a_batched_input(): linear = Linear(384, 1536) linear.weight = linear.weight.quantize("q4_0") input_tensor = candle.randn((16, 8, 384)) output = linear.forward(input_tensor) assert output.shape == (16, 8, 1536)
0
hf_public_repos/candle/candle-pyo3/tests
hf_public_repos/candle/candle-pyo3/tests/bindings/test_testing.py
import candle from candle import Tensor from candle.testing import assert_equal, assert_almost_equal import pytest @pytest.mark.parametrize("dtype", [candle.f32, candle.f64, candle.f16, candle.u32, candle.u8, candle.i64]) def test_assert_equal_asserts_correctly(dtype: candle.DType): a = Tensor([1, 2, 3]).to(dtype) b = Tensor([1, 2, 3]).to(dtype) assert_equal(a, b) with pytest.raises(AssertionError): assert_equal(a, b + 1) @pytest.mark.parametrize("dtype", [candle.f32, candle.f64, candle.f16, candle.u32, candle.u8, candle.i64]) def test_assert_almost_equal_asserts_correctly(dtype: candle.DType): a = Tensor([1, 2, 3]).to(dtype) b = Tensor([1, 2, 3]).to(dtype) assert_almost_equal(a, b) with pytest.raises(AssertionError): assert_almost_equal(a, b + 1) assert_almost_equal(a, b + 1, atol=20) assert_almost_equal(a, b + 1, rtol=20) with pytest.raises(AssertionError): assert_almost_equal(a, b + 1, atol=0.9) with pytest.raises(AssertionError): assert_almost_equal(a, b + 1, rtol=0.1)
0
hf_public_repos/candle/candle-pyo3
hf_public_repos/candle/candle-pyo3/src/onnx.rs
use std::collections::HashMap; use crate::utils::wrap_err; use crate::{PyDType, PyTensor}; use candle_onnx::eval::{dtype, get_tensor, simple_eval}; use candle_onnx::onnx::tensor_proto::DataType; use candle_onnx::onnx::tensor_shape_proto::dimension::Value; use candle_onnx::onnx::type_proto::{Tensor as ONNXTensor, Value as ONNXValue}; use candle_onnx::onnx::{ModelProto, ValueInfoProto}; use pyo3::exceptions::PyValueError; use pyo3::prelude::*; use pyo3::types::{PyList, PyTuple}; #[derive(Clone, Debug)] #[pyclass(name = "ONNXTensorDescription")] /// A wrapper around an ONNX tensor description. pub struct PyONNXTensorDescriptor(ONNXTensor); #[pymethods] impl PyONNXTensorDescriptor { #[getter] /// The data type of the tensor. /// &RETURNS&: DType fn dtype(&self) -> PyResult<PyDType> { match DataType::try_from(self.0.elem_type) { Ok(dt) => match dtype(dt) { Some(dt) => Ok(PyDType(dt)), None => Err(PyValueError::new_err(format!( "unsupported 'value' data-type {dt:?}" ))), }, type_ => Err(PyValueError::new_err(format!( "unsupported input type {type_:?}" ))), } } #[getter] /// The shape of the tensor. /// &RETURNS&: Tuple[Union[int,str,Any]] fn shape(&self, py: Python) -> PyResult<Py<PyTuple>> { let shape = PyList::empty(py); if let Some(d) = &self.0.shape { for dim in d.dim.iter() { if let Some(value) = &dim.value { match value { Value::DimValue(v) => shape.append(*v)?, Value::DimParam(s) => shape.append(s.clone())?, }; } else { return Err(PyValueError::new_err("None value in shape")); } } } Ok(shape.to_tuple().into()) } fn __repr__(&self, py: Python) -> String { match (self.shape(py), self.dtype()) { (Ok(shape), Ok(dtype)) => format!( "TensorDescriptor[shape: {:?}, dtype: {:?}]", shape.to_string(), dtype.__str__() ), (Err(_), Err(_)) => "TensorDescriptor[shape: unknown, dtype: unknown]".to_string(), (Err(_), Ok(dtype)) => format!( "TensorDescriptor[shape: unknown, dtype: {:?}]", dtype.__str__() ), (Ok(shape), Err(_)) => format!( "TensorDescriptor[shape: {:?}, dtype: unknown]", shape.to_string() ), } } fn __str__(&self, py: Python) -> String { self.__repr__(py) } } #[derive(Clone, Debug)] #[pyclass(name = "ONNXModel")] /// A wrapper around an ONNX model. pub struct PyONNXModel(ModelProto); fn extract_tensor_descriptions( value_infos: &[ValueInfoProto], ) -> HashMap<String, PyONNXTensorDescriptor> { let mut map = HashMap::new(); for value_info in value_infos.iter() { let input_type = match &value_info.r#type { Some(input_type) => input_type, None => continue, }; let input_type = match &input_type.value { Some(input_type) => input_type, None => continue, }; let tensor_type: &ONNXTensor = match input_type { ONNXValue::TensorType(tt) => tt, _ => continue, }; map.insert( value_info.name.to_string(), PyONNXTensorDescriptor(tensor_type.clone()), ); } map } #[pymethods] impl PyONNXModel { #[new] #[pyo3(text_signature = "(self, path:str)")] /// Load an ONNX model from the given path. fn new(path: String) -> PyResult<Self> { let model: ModelProto = candle_onnx::read_file(path).map_err(wrap_err)?; Ok(PyONNXModel(model)) } #[getter] /// The version of the IR this model targets. /// &RETURNS&: int fn ir_version(&self) -> i64 { self.0.ir_version } #[getter] /// The producer of the model. /// &RETURNS&: str fn producer_name(&self) -> String { self.0.producer_name.clone() } #[getter] /// The version of the producer of the model. /// &RETURNS&: str fn producer_version(&self) -> String { self.0.producer_version.clone() } #[getter] /// The domain of the operator set of the model. /// &RETURNS&: str fn domain(&self) -> String { self.0.domain.clone() } #[getter] /// The version of the model. /// &RETURNS&: int fn model_version(&self) -> i64 { self.0.model_version } #[getter] /// The doc string of the model. /// &RETURNS&: str fn doc_string(&self) -> String { self.0.doc_string.clone() } /// Get the weights of the model. /// &RETURNS&: Dict[str, Tensor] fn initializers(&self) -> PyResult<HashMap<String, PyTensor>> { let mut map = HashMap::new(); if let Some(graph) = self.0.graph.as_ref() { for tensor_description in graph.initializer.iter() { let tensor = get_tensor(tensor_description, tensor_description.name.as_str()) .map_err(wrap_err)?; map.insert(tensor_description.name.to_string(), PyTensor(tensor)); } } Ok(map) } #[getter] /// The inputs of the model. /// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]] fn inputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> { if let Some(graph) = self.0.graph.as_ref() { return Some(extract_tensor_descriptions(&graph.input)); } None } #[getter] /// The outputs of the model. /// &RETURNS&: Optional[Dict[str, ONNXTensorDescription]] fn outputs(&self) -> Option<HashMap<String, PyONNXTensorDescriptor>> { if let Some(graph) = self.0.graph.as_ref() { return Some(extract_tensor_descriptions(&graph.output)); } None } #[pyo3(text_signature = "(self, inputs:Dict[str,Tensor])")] /// Run the model on the given inputs. /// &RETURNS&: Dict[str,Tensor] fn run(&self, inputs: HashMap<String, PyTensor>) -> PyResult<HashMap<String, PyTensor>> { let unwrapped_tensors = inputs.into_iter().map(|(k, v)| (k.clone(), v.0)).collect(); let result = simple_eval(&self.0, unwrapped_tensors).map_err(wrap_err)?; Ok(result .into_iter() .map(|(k, v)| (k.clone(), PyTensor(v))) .collect()) } }
0
hf_public_repos/candle/candle-pyo3
hf_public_repos/candle/candle-pyo3/src/lib.rs
#![allow(clippy::redundant_closure_call)] use pyo3::exceptions::{PyTypeError, PyValueError}; use pyo3::prelude::*; use pyo3::pyclass::CompareOp; use pyo3::types::{IntoPyDict, PyDict, PyTuple}; use pyo3::ToPyObject; use std::collections::hash_map::DefaultHasher; use std::hash::{Hash, Hasher}; use std::os::raw::c_long; use std::sync::Arc; use half::{bf16, f16}; #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use ::candle::{quantized::QTensor, DType, Device, Module, Tensor, WithDType}; mod utils; use utils::wrap_err; mod shape; use shape::{PyShape, PyShapeWithHole}; #[cfg(feature = "onnx")] mod onnx; #[derive(Clone, Debug)] #[pyclass(name = "Tensor")] /// A `candle` tensor. struct PyTensor(Tensor); impl std::ops::Deref for PyTensor { type Target = Tensor; fn deref(&self) -> &Self::Target { &self.0 } } #[derive(Clone, Copy, Debug, PartialEq, Eq)] #[pyclass(name = "DType")] /// A `candle` dtype. struct PyDType(DType); #[pymethods] impl PyDType { fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } } impl PyDType { fn from_pyobject(ob: PyObject, py: Python<'_>) -> PyResult<Self> { use std::str::FromStr; if let Ok(dtype) = ob.extract::<&str>(py) { let dtype = DType::from_str(dtype) .map_err(|_| PyTypeError::new_err(format!("invalid dtype '{dtype}'")))?; Ok(Self(dtype)) } else { ob.extract(py) } } } static CUDA_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); static METAL_DEVICE: std::sync::Mutex<Option<Device>> = std::sync::Mutex::new(None); #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum PyDevice { Cpu, Cuda, Metal, } impl PyDevice { fn from_device(device: &Device) -> Self { match device { Device::Cpu => Self::Cpu, Device::Cuda(_) => Self::Cuda, Device::Metal(_) => Self::Metal, } } fn as_device(&self) -> PyResult<Device> { match self { Self::Cpu => Ok(Device::Cpu), Self::Cuda => { let mut device = CUDA_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_cuda(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } Self::Metal => { let mut device = METAL_DEVICE.lock().unwrap(); if let Some(device) = device.as_ref() { return Ok(device.clone()); }; let d = Device::new_metal(0).map_err(wrap_err)?; *device = Some(d.clone()); Ok(d) } } } } impl<'source> FromPyObject<'source> for PyDevice { fn extract(ob: &'source PyAny) -> PyResult<Self> { let device: &str = ob.extract()?; let device = match device { "cpu" => PyDevice::Cpu, "cuda" => PyDevice::Cuda, _ => Err(PyTypeError::new_err(format!("invalid device '{device}'")))?, }; Ok(device) } } impl ToPyObject for PyDevice { fn to_object(&self, py: Python<'_>) -> PyObject { let str = match self { PyDevice::Cpu => "cpu", PyDevice::Cuda => "cuda", PyDevice::Metal => "metal", }; str.to_object(py) } } trait PyWithDType: WithDType { fn to_py(&self, py: Python<'_>) -> PyObject; } macro_rules! pydtype { ($ty:ty, $conv:expr) => { impl PyWithDType for $ty { fn to_py(&self, py: Python<'_>) -> PyObject { $conv(*self).to_object(py) } } }; } pydtype!(i64, |v| v); pydtype!(u8, |v| v); pydtype!(u32, |v| v); pydtype!(f16, f32::from); pydtype!(bf16, f32::from); pydtype!(f32, |v| v); pydtype!(f64, |v| v); fn actual_index(t: &Tensor, dim: usize, index: i64) -> ::candle::Result<usize> { let dim = t.dim(dim)?; if 0 <= index { let index = index as usize; if dim <= index { ::candle::bail!("index {index} is too large for tensor dimension {dim}") } Ok(index) } else { if (dim as i64) < -index { ::candle::bail!("index {index} is too low for tensor dimension {dim}") } Ok((dim as i64 + index) as usize) } } fn actual_dim(t: &Tensor, dim: i64) -> ::candle::Result<usize> { let rank = t.rank(); if 0 <= dim { let dim = dim as usize; if rank <= dim { ::candle::bail!("dimension index {dim} is too large for tensor rank {rank}") } Ok(dim) } else { if (rank as i64) < -dim { ::candle::bail!("dimension index {dim} is too low for tensor rank {rank}") } Ok((rank as i64 + dim) as usize) } } // TODO: Something similar to this should probably be a part of candle core. trait MapDType { type Output; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output>; fn map(&self, t: &Tensor) -> PyResult<Self::Output> { match t.dtype() { DType::U8 => self.f::<u8>(t), DType::U32 => self.f::<u32>(t), DType::I64 => self.f::<i64>(t), DType::BF16 => self.f::<bf16>(t), DType::F16 => self.f::<f16>(t), DType::F32 => self.f::<f32>(t), DType::F64 => self.f::<f64>(t), } } } enum Indexer { Index(usize), Slice(usize, usize), Elipsis, Expand, IndexSelect(Tensor), } #[derive(Clone, Debug)] struct TorchTensor(PyObject); impl<'source> pyo3::FromPyObject<'source> for TorchTensor { fn extract(ob: &'source PyAny) -> PyResult<Self> { let numpy_value: PyObject = ob.getattr("numpy")?.call0()?.extract()?; Ok(TorchTensor(numpy_value)) } } #[pymethods] impl PyTensor { #[new] #[pyo3(text_signature = "(self, data:_ArrayLike)")] // TODO: Handle arbitrary input dtype and shape. /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. fn new(py: Python<'_>, data: PyObject) -> PyResult<Self> { use Device::Cpu; let tensor = if let Ok(vs) = data.extract::<u32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<i64>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<f32>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<u32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<i64>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<f32>>(py) { let len = vs.len(); Tensor::from_vec(vs, len, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<u32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<i64>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<f32>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<u32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<i64>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(vs) = data.extract::<Vec<Vec<Vec<f32>>>>(py) { Tensor::new(vs, &Cpu).map_err(wrap_err)? } else if let Ok(TorchTensor(numpy)) = data.extract::<TorchTensor>(py) { return PyTensor::new(py, numpy); } else { let ty = data.as_ref(py).get_type(); Err(PyTypeError::new_err(format!( "incorrect type {ty} for tensor" )))? }; Ok(Self(tensor)) } /// Gets the tensor's data as a Python scalar or array-like object. /// &RETURNS&: _ArrayLike fn values(&self, py: Python<'_>) -> PyResult<PyObject> { struct M<'a>(Python<'a>); impl<'a> MapDType for M<'a> { type Output = PyObject; fn f<T: PyWithDType>(&self, t: &Tensor) -> PyResult<Self::Output> { match t.rank() { 0 => Ok(t.to_scalar::<T>().map_err(wrap_err)?.to_py(self.0)), 1 => { let v = t.to_vec1::<T>().map_err(wrap_err)?; let v = v.iter().map(|v| v.to_py(self.0)).collect::<Vec<_>>(); Ok(v.to_object(self.0)) } 2 => { let v = t.to_vec2::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect::<Vec<Vec<_>>>(); Ok(v.to_object(self.0)) } 3 => { let v = t.to_vec3::<T>().map_err(wrap_err)?; let v = v .iter() .map(|v| { v.iter() .map(|v| v.iter().map(|v| v.to_py(self.0)).collect()) .collect() }) .collect::<Vec<Vec<Vec<_>>>>(); Ok(v.to_object(self.0)) } n => Err(PyTypeError::new_err(format!( "TODO: conversion to PyObject is not handled for rank {n}" )))?, } } } // TODO: Handle arbitrary shapes. M(py).map(self) } /// Converts candle's tensor to pytorch's tensor /// &RETURNS&: torch.Tensor fn to_torch(&self, py: Python<'_>) -> PyResult<PyObject> { let candle_values = self.values(py)?; let torch_tensor: PyObject = py .import("torch")? .getattr("tensor")? .call1((candle_values,))? .extract()?; Ok(torch_tensor) } #[getter] /// Gets the tensor's shape. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new(py, self.0.dims()).to_object(py) } #[getter] /// Gets the tensor's element count. /// &RETURNS&: int fn nelement(&self) -> usize { self.0.elem_count() } #[getter] /// Gets the tensor's strides. /// &RETURNS&: Tuple[int] fn stride(&self, py: Python<'_>) -> PyObject { PyTuple::new(py, self.0.stride()).to_object(py) } #[getter] /// Gets the tensor's dtype. /// &RETURNS&: DType fn dtype(&self) -> PyDType { PyDType(self.0.dtype()) } #[getter] /// Gets the tensor's device. /// &RETURNS&: Device fn device(&self, py: Python<'_>) -> PyObject { PyDevice::from_device(self.0.device()).to_object(py) } #[getter] /// Gets the tensor's rank. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } fn __repr__(&self) -> String { format!("{}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Performs the `abs` operation on the tensor. /// &RETURNS&: Tensor fn abs(&self) -> PyResult<Self> { Ok(PyTensor(self.0.abs().map_err(wrap_err)?)) } /// Performs the `sin` operation on the tensor. /// &RETURNS&: Tensor fn sin(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sin().map_err(wrap_err)?)) } /// Performs the `cos` operation on the tensor. /// &RETURNS&: Tensor fn cos(&self) -> PyResult<Self> { Ok(PyTensor(self.0.cos().map_err(wrap_err)?)) } /// Performs the `log` operation on the tensor. /// &RETURNS&: Tensor fn log(&self) -> PyResult<Self> { Ok(PyTensor(self.0.log().map_err(wrap_err)?)) } /// Squares the tensor. /// &RETURNS&: Tensor fn sqr(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqr().map_err(wrap_err)?)) } /// Calculates the square root of the tensor. /// &RETURNS&: Tensor fn sqrt(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sqrt().map_err(wrap_err)?)) } /// Get the `recip` of the tensor. /// &RETURNS&: Tensor fn recip(&self) -> PyResult<Self> { Ok(PyTensor(self.0.recip().map_err(wrap_err)?)) } /// Performs the `exp` operation on the tensor. /// &RETURNS&: Tensor fn exp(&self) -> PyResult<Self> { Ok(PyTensor(self.0.exp().map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, p:float)")] /// Performs the `pow` operation on the tensor with the given exponent. /// &RETURNS&: Tensor fn powf(&self, p: f64) -> PyResult<Self> { Ok(PyTensor(self.0.powf(p).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor, dim:int)")] /// Select values for the input tensor at the target indexes across the specified dimension. /// /// The `indexes` is argument is an int tensor with a single dimension. /// The output has the same number of dimension as the `self` input. The target dimension of /// the output has length the length of `indexes` and the values are taken from `self` using /// the index from `indexes`. Other dimensions have the same number of elements as the input /// tensor. /// &RETURNS&: Tensor fn index_select(&self, rhs: &Self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.index_select(rhs, dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Performs a matrix multiplication between the two tensors. /// &RETURNS&: Tensor fn matmul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.matmul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Adds the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_add(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_add(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Subtracts the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_sub(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_sub(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Multiplies the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_mul(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_mul(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, rhs:Tensor)")] /// Divides the two tensors, while broadcasting the right-hand-side tensor to match the shape of the left-hand-side tensor. /// &RETURNS&: Tensor fn broadcast_div(&self, rhs: &Self) -> PyResult<Self> { Ok(PyTensor(self.0.broadcast_div(rhs).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, on_true:Tensor, on_false:Tensor)")] /// Returns a tensor with the same shape as the input tensor, the values are taken from /// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the /// input tensor is equal to zero. /// &RETURNS&: Tensor fn where_cond(&self, on_true: &Self, on_false: &Self) -> PyResult<Self> { Ok(PyTensor( self.0.where_cond(on_true, on_false).map_err(wrap_err)?, )) } #[getter] /// Index a tensor. /// &RETURNS&: Tensor fn __getitem__(&self, py: Python, idx: PyObject) -> PyResult<Self> { let mut indexers: Vec<Indexer> = vec![]; let dims = self.0.shape().dims(); fn to_absolute_index(index: isize, current_dim: usize, dims: &[usize]) -> PyResult<usize> { // Convert a relative index to an absolute index e.g. tensor[-1] -> tensor[0] let actual_index = if index < 0 { dims[current_dim] as isize + index } else { index }; // Check that the index is in range if actual_index < 0 || actual_index >= dims[current_dim] as isize { return Err(PyValueError::new_err(format!( "index out of range for dimension '{i}' with indexer '{value}'", i = current_dim, value = index ))); } Ok(actual_index as usize) } fn extract_indexer( py_indexer: &PyAny, current_dim: usize, dims: &[usize], index_argument_count: usize, ) -> PyResult<(Indexer, usize)> { if let Ok(index) = py_indexer.extract() { // Handle a single index e.g. tensor[0] or tensor[-1] Ok(( Indexer::Index(to_absolute_index(index, current_dim, dims)?), current_dim + 1, )) } else if let Ok(slice) = py_indexer.downcast::<pyo3::types::PySlice>() { // Handle a single slice e.g. tensor[0:1] or tensor[0:-1] let index = slice.indices(dims[current_dim] as c_long)?; Ok(( Indexer::Slice(index.start as usize, index.stop as usize), current_dim + 1, )) } else if let Ok(tensor) = py_indexer.extract::<PyTensor>() { // Handle a tensor as indices e.g. tensor[tensor([0,1])] let t = tensor.0; if t.rank() != 1 { return Err(PyTypeError::new_err( "multi-dimensional tensor indexing is not supported", )); } Ok((Indexer::IndexSelect(t), current_dim + 1)) } else if let Ok(list) = py_indexer.downcast::<pyo3::types::PyList>() { // Handle a list of indices e.g. tensor[[0,1]] let mut indexes = vec![]; for item in list.iter() { let index = item.extract::<i64>()?; indexes.push(index); } Ok(( Indexer::IndexSelect( Tensor::from_vec(indexes, list.len(), &Device::Cpu).map_err(wrap_err)?, ), current_dim + 1, )) } else if py_indexer.is_ellipsis() { // Handle '...' e.g. tensor[..., 0] if current_dim > 0 { return Err(PyTypeError::new_err( "Ellipsis ('...') can only be used at the start of an indexing operation", )); } Ok((Indexer::Elipsis, dims.len() - (index_argument_count - 1))) } else if py_indexer.is_none() { // Handle None e.g. tensor[None, 0] Ok((Indexer::Expand, current_dim)) } else { Err(PyTypeError::new_err(format!( "unsupported indexer {}", py_indexer ))) } } if let Ok(tuple) = idx.downcast::<pyo3::types::PyTuple>(py) { let not_none_count: usize = tuple.iter().filter(|x| !x.is_none()).count(); if not_none_count > dims.len() { return Err(PyValueError::new_err("provided too many indices")); } let mut current_dim = 0; for item in tuple.iter() { let (indexer, new_current_dim) = extract_indexer(item, current_dim, dims, not_none_count)?; current_dim = new_current_dim; indexers.push(indexer); } } else { let (indexer, _) = extract_indexer(idx.downcast::<PyAny>(py)?, 0, dims, 1)?; indexers.push(indexer); } let mut x = self.0.clone(); let mut current_dim = 0; // Apply the indexers for indexer in indexers.iter() { x = match indexer { Indexer::Index(n) => x .narrow(current_dim, *n, 1) .map_err(wrap_err)? .squeeze(current_dim) .map_err(wrap_err)?, Indexer::Slice(start, stop) => { let out = x .narrow(current_dim, *start, stop.saturating_sub(*start)) .map_err(wrap_err)?; current_dim += 1; out } Indexer::Elipsis => { // Elipsis is a special case, it means that all remaining dimensions should be selected => advance the current_dim to the last dimension we have indexers for current_dim += dims.len() - (indexers.len() - 1); x } Indexer::Expand => { // Expand is a special case, it means that a new dimension should be added => unsqueeze and advance the current_dim let out = x.unsqueeze(current_dim).map_err(wrap_err)?; current_dim += 1; out } Indexer::IndexSelect(indexes) => { let out = x .index_select( &indexes.to_device(x.device()).map_err(wrap_err)?, current_dim, ) .map_err(wrap_err)?; current_dim += 1; out } } } Ok(Self(x)) } /// Add two tensors. /// &RETURNS&: Tensor fn __add__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_add(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 + rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for add"))? }; Ok(Self(tensor)) } fn __radd__(&self, rhs: &PyAny) -> PyResult<Self> { self.__add__(rhs) } /// Multiply two tensors. /// &RETURNS&: Tensor fn __mul__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_mul(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 * rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for mul"))? }; Ok(Self(tensor)) } fn __rmul__(&self, rhs: &PyAny) -> PyResult<Self> { self.__mul__(rhs) } /// Subtract two tensors. /// &RETURNS&: Tensor fn __sub__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_sub(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 - rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for sub"))? }; Ok(Self(tensor)) } /// Divide two tensors. /// &RETURNS&: Tensor fn __truediv__(&self, rhs: &PyAny) -> PyResult<Self> { let tensor = if let Ok(rhs) = rhs.extract::<Self>() { self.0.broadcast_div(&rhs.0).map_err(wrap_err)? } else if let Ok(rhs) = rhs.extract::<f64>() { (&self.0 / rhs).map_err(wrap_err)? } else { Err(PyTypeError::new_err("unsupported rhs for div"))? }; Ok(Self(tensor)) } /// Rich-compare two tensors. /// &RETURNS&: Tensor fn __richcmp__(&self, rhs: &PyAny, op: CompareOp) -> PyResult<Self> { let compare = |lhs: &Tensor, rhs: &Tensor| { let t = match op { CompareOp::Eq => lhs.eq(rhs), CompareOp::Ne => lhs.ne(rhs), CompareOp::Lt => lhs.lt(rhs), CompareOp::Le => lhs.le(rhs), CompareOp::Gt => lhs.gt(rhs), CompareOp::Ge => lhs.ge(rhs), }; Ok(PyTensor(t.map_err(wrap_err)?)) }; if let Ok(rhs) = rhs.extract::<PyTensor>() { if self.0.shape() == rhs.0.shape() { compare(&self.0, &rhs.0) } else { // We broadcast manually here because `candle.cmp` does not support automatic broadcasting let broadcast_shape = self .0 .shape() .broadcast_shape_binary_op(rhs.0.shape(), "cmp") .map_err(wrap_err)?; let broadcasted_lhs = self.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; let broadcasted_rhs = rhs.0.broadcast_as(&broadcast_shape).map_err(wrap_err)?; compare(&broadcasted_lhs, &broadcasted_rhs) } } else if let Ok(rhs) = rhs.extract::<f64>() { let scalar_tensor = Tensor::new(rhs, self.0.device()) .map_err(wrap_err)? .to_dtype(self.0.dtype()) .map_err(wrap_err)? .broadcast_as(self.0.shape()) .map_err(wrap_err)?; compare(&self.0, &scalar_tensor) } else { return Err(PyTypeError::new_err("unsupported rhs for __richcmp__")); } } fn __hash__(&self) -> u64 { // we have overridden __richcmp__ => py03 wants us to also override __hash__ // we simply hash the address of the tensor let mut hasher = DefaultHasher::new(); let pointer = &self.0 as *const Tensor; let address = pointer as usize; address.hash(&mut hasher); hasher.finish() } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Reshapes the tensor to the given shape. /// &RETURNS&: Tensor fn reshape(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .reshape(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape. /// &RETURNS&: Tensor fn broadcast_as(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_as(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(signature=(*shape), text_signature = "(self, *shape:Shape)")] /// Broadcasts the tensor to the given shape, adding new dimensions on the left. /// &RETURNS&: Tensor fn broadcast_left(&self, shape: PyShapeWithHole) -> PyResult<Self> { Ok(PyTensor( self.0 .broadcast_left(shape.to_absolute(&self.0)?) .map_err(wrap_err)?, )) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with the specified dimension removed if its size was one. /// &RETURNS&: Tensor fn squeeze(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.squeeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Creates a new tensor with a dimension of size one inserted at the specified position. /// &RETURNS&: Tensor fn unsqueeze(&self, dim: usize) -> PyResult<Self> { Ok(PyTensor(self.0.unsqueeze(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, index:int)")] /// Gets the value at the specified index. /// &RETURNS&: Tensor fn get(&self, index: i64) -> PyResult<Self> { let index = actual_index(self, 0, index).map_err(wrap_err)?; Ok(PyTensor(self.0.get(index).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim1:int, dim2:int)")] /// Returns a tensor that is a transposed version of the input, the given dimensions are swapped. /// &RETURNS&: Tensor fn transpose(&self, dim1: usize, dim2: usize) -> PyResult<Self> { Ok(PyTensor(self.0.transpose(dim1, dim2).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int, start:int, len:int)")] /// Returns a new tensor that is a narrowed version of the input, the dimension `dim` /// ranges from `start` to `start + len`. /// &RETURNS&: Tensor fn narrow(&self, dim: i64, start: i64, len: usize) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; let start = actual_index(self, dim, start).map_err(wrap_err)?; Ok(PyTensor(self.0.narrow(dim, start, len).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the maximum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmax_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmax_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Returns the indices of the minimum value(s) across the selected dimension. /// &RETURNS&: Tensor fn argmin_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.argmin_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the maximum value across the selected dimension. /// &RETURNS&: Tensor fn max_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.max_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] /// Gathers the minimum value across the selected dimension. /// &RETURNS&: Tensor fn min_keepdim(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.min_keepdim(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:Union[int, List[int]])")] /// Returns the sum of all elements in the input tensor. The sum is performed over all the input dimensions. /// &RETURNS&: Tensor fn sum_keepdim(&self, dims: PyObject, py: Python<'_>) -> PyResult<Self> { let dims = if let Ok(dim) = dims.extract::<usize>(py) { vec![dim] } else { dims.extract::<Vec<usize>>(py)? }; Ok(PyTensor( self.0.sum_keepdim(dims.as_slice()).map_err(wrap_err)?, )) } /// Returns the sum of the tensor. /// &RETURNS&: Tensor fn sum_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.sum_all().map_err(wrap_err)?)) } /// Returns the mean of the tensor. /// &RETURNS&: Tensor fn mean_all(&self) -> PyResult<Self> { let elements = self.0.elem_count(); let sum = self.0.sum_all().map_err(wrap_err)?; let mean = (sum / elements as f64).map_err(wrap_err)?; Ok(PyTensor(mean)) } #[pyo3(text_signature = "(self, dim:int)")] /// Flattens the tensor on the dimension indexes from `dim` (inclusive) to the last dimension. /// &RETURNS&: Tensor fn flatten_from(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_from(dim).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, dim:int)")] ///Flattens the tensor on the dimension indexes from `0` to `dim` (inclusive). /// &RETURNS&: Tensor fn flatten_to(&self, dim: i64) -> PyResult<Self> { let dim = actual_dim(self, dim).map_err(wrap_err)?; Ok(PyTensor(self.0.flatten_to(dim).map_err(wrap_err)?)) } /// Flattens the tensor into a 1D tensor. /// &RETURNS&: Tensor fn flatten_all(&self) -> PyResult<Self> { Ok(PyTensor(self.0.flatten_all().map_err(wrap_err)?)) } /// Transposes the tensor. /// &RETURNS&: Tensor fn t(&self) -> PyResult<Self> { Ok(PyTensor(self.0.t().map_err(wrap_err)?)) } /// Makes the tensor contiguous in memory. /// &RETURNS&: Tensor fn contiguous(&self) -> PyResult<Self> { Ok(PyTensor(self.0.contiguous().map_err(wrap_err)?)) } /// Returns true if the tensor is contiguous in C order. /// &RETURNS&: bool fn is_contiguous(&self) -> bool { self.0.is_contiguous() } /// Returns true if the tensor is contiguous in Fortran order. /// &RETURNS&: bool fn is_fortran_contiguous(&self) -> bool { self.0.is_fortran_contiguous() } /// Detach the tensor from the computation graph. /// &RETURNS&: Tensor fn detach(&self) -> PyResult<Self> { Ok(PyTensor(self.0.detach().map_err(wrap_err)?)) } /// Returns a copy of the tensor. /// &RETURNS&: Tensor fn copy(&self) -> PyResult<Self> { Ok(PyTensor(self.0.copy().map_err(wrap_err)?)) } #[pyo3(signature = (*args, **kwargs), text_signature = "(self, *args, **kwargs)")] /// Performs Tensor dtype and/or device conversion. /// &RETURNS&: Tensor fn to(&self, args: &PyTuple, kwargs: Option<&PyDict>) -> PyResult<Self> { let mut device: Option<PyDevice> = None; let mut dtype: Option<PyDType> = None; let mut other: Option<PyTensor> = None; fn handle_duplicates<T>( opt: &mut Option<T>, extraction_result: PyResult<T>, err_msg: &'static str, ) -> PyResult<()> { if let Ok(sucessfull_extraction) = extraction_result { if opt.is_some() { return Err(PyValueError::new_err(err_msg)); } *opt = Some(sucessfull_extraction); } Ok(()) } //handle args for arg in args.iter() { if arg.extract::<PyDevice>().is_ok() { handle_duplicates( &mut device, arg.extract::<PyDevice>(), "cannot specify multiple devices", )?; } else if arg.extract::<PyDType>().is_ok() { handle_duplicates( &mut dtype, arg.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } else if arg.extract::<PyTensor>().is_ok() { handle_duplicates( &mut other, arg.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } else { return Err(PyTypeError::new_err(format!( "unsupported argument type `{:#?}`", arg.get_type().name() ))); } } if let Some(kwargs) = kwargs { if let Ok(Some(any)) = kwargs.get_item("dtype") { handle_duplicates( &mut dtype, any.extract::<PyDType>(), "cannot specify multiple dtypes", )?; } if let Ok(Some(any)) = kwargs.get_item("device") { handle_duplicates( &mut device, any.extract::<PyDevice>(), "cannot specify multiple devices", )?; } if let Ok(Some(any)) = kwargs.get_item("other") { handle_duplicates( &mut other, any.extract::<PyTensor>(), "cannot specify multiple output tensors", )?; } } if let Some(other) = other { if device.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a device", )); } if dtype.is_some() { return Err(PyValueError::new_err( "cannot specify both an output tensor and a dtype", )); } dtype = Some(other.dtype()); device = Some(PyDevice::from_device(other.0.device())); } let result = match (device, dtype) { (Some(device), Some(dtype)) => self .0 .to_device(&device.as_device()?) .map_err(wrap_err)? .to_dtype(dtype.0) .map_err(wrap_err)?, (Some(device), None) => self.0.to_device(&device.as_device()?).map_err(wrap_err)?, (None, Some(dtype)) => self.0.to_dtype(dtype.0).map_err(wrap_err)?, (None, None) => { return Err(PyTypeError::new_err("No valide dtype or device specified")) } }; Ok(PyTensor(result)) } #[pyo3(text_signature = "(self, dtype:Union[str,DType])")] /// Convert the tensor to a new dtype. /// &RETURNS&: Tensor fn to_dtype(&self, dtype: PyObject, py: Python<'_>) -> PyResult<Self> { let dtype = PyDType::from_pyobject(dtype, py)?; Ok(PyTensor(self.0.to_dtype(dtype.0).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, device:Union[str,Device])")] /// Move the tensor to a new device. /// &RETURNS&: Tensor fn to_device(&self, device: PyDevice) -> PyResult<Self> { let device = device.as_device()?; Ok(PyTensor(self.0.to_device(&device).map_err(wrap_err)?)) } #[pyo3(text_signature = "(self, quantized_dtype:str)")] /// Quantize the tensor. /// &RETURNS&: QTensor fn quantize(&self, quantized_dtype: &str) -> PyResult<PyQTensor> { use ::candle::quantized; let res = match quantized_dtype.to_lowercase().as_str() { "q2k" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ2K>(self), "q3k" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ3K>(self), "q4_0" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ4_0>(self), "q4_1" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ4_1>(self), "q4k" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ4K>(self), "q5_0" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ5_0>(self), "q5_1" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ5_1>(self), "q5k" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ5K>(self), "q6k" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ6K>(self), "q8_0" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ8_0>(self), "q8_1" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ8_1>(self), "q8k" => quantized::QTensor::quantize::<quantized::k_quants::BlockQ8K>(self), "f16" => quantized::QTensor::quantize::<f16>(self), "f32" => quantized::QTensor::quantize::<f32>(self), dt => { return Err(PyErr::new::<PyValueError, _>(format!( "unknown quantized-dtype {dt}" ))) } }; Ok(PyQTensor(Arc::new(res.map_err(wrap_err)?))) } } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int )")] /// Concatenate the tensors across one axis. /// &RETURNS&: Tensor fn cat(tensors: Vec<PyTensor>, dim: i64) -> PyResult<PyTensor> { if tensors.is_empty() { return Err(PyErr::new::<PyValueError, _>("empty input to cat")); } let dim = actual_dim(&tensors[0], dim).map_err(wrap_err)?; let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::cat(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensors:List[Tensor], dim:int)")] /// Stack the tensors along a new axis. /// &RETURNS&: Tensor fn stack(tensors: Vec<PyTensor>, dim: usize) -> PyResult<PyTensor> { let tensors = tensors.into_iter().map(|t| t.0).collect::<Vec<_>>(); let tensor = Tensor::stack(&tensors, dim).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(data:_ArrayLike)")] /// Creates a new tensor from a Python value. The value can be a scalar or array-like object. /// &RETURNS&: Tensor fn tensor(py: Python<'_>, data: PyObject) -> PyResult<PyTensor> { PyTensor::new(py, data) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values. /// &RETURNS&: Tensor fn rand(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::rand(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape,device=None), text_signature = "(*shape:Shape, device:Optional[Device]=None)")] /// Creates a new tensor with random values from a normal distribution. /// &RETURNS&: Tensor fn randn(_py: Python<'_>, shape: PyShape, device: Option<PyDevice>) -> PyResult<PyTensor> { let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::randn(0f32, 1f32, shape, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None),text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with ones. /// &RETURNS&: Tensor fn ones( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::ones(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (*shape, dtype=None, device=None), text_signature = "(*shape:Shape, dtype:Optional[DType]=None, device:Optional[Device]=None)")] /// Creates a new tensor filled with zeros. /// &RETURNS&: Tensor fn zeros( py: Python<'_>, shape: PyShape, dtype: Option<PyObject>, device: Option<PyDevice>, ) -> PyResult<PyTensor> { let dtype = match dtype { None => DType::F32, Some(dtype) => PyDType::from_pyobject(dtype, py)?.0, }; let device = device.unwrap_or(PyDevice::Cpu).as_device()?; let tensor = Tensor::zeros(shape, dtype, &device).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[derive(Debug, Clone)] #[pyclass(name = "QTensor")] /// A quantized tensor. struct PyQTensor(Arc<QTensor>); impl std::ops::Deref for PyQTensor { type Target = QTensor; fn deref(&self) -> &Self::Target { self.0.as_ref() } } #[pymethods] impl PyQTensor { #[getter] ///Gets the tensors quantized dtype. /// &RETURNS&: str fn ggml_dtype(&self) -> String { format!("{:?}", self.0.dtype()) } #[getter] ///Gets the rank of the tensor. /// &RETURNS&: int fn rank(&self) -> usize { self.0.rank() } #[getter] ///Gets the shape of the tensor. /// &RETURNS&: Tuple[int] fn shape(&self, py: Python<'_>) -> PyObject { PyTuple::new(py, self.0.shape().dims()).to_object(py) } fn __repr__(&self) -> String { format!("{:?}", self.0) } fn __str__(&self) -> String { self.__repr__() } /// Dequantizes the tensor. /// &RETURNS&: Tensor fn dequantize(&self) -> PyResult<PyTensor> { let tensor = self.0.dequantize(&Device::Cpu).map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyo3(text_signature = "(self, lhs:Tensor)")] /// Performs a quantized matrix multiplication, with the quantized tensor as the right hand side. /// &RETURNS&: Tensor fn matmul_t(&self, lhs: &PyTensor) -> PyResult<PyTensor> { let qmatmul = ::candle::quantized::QMatMul::from_arc(self.0.clone()).map_err(wrap_err)?; let res = qmatmul.forward(lhs).map_err(wrap_err)?; Ok(PyTensor(res)) } } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike])")] /// Loads a safetensors file. Returns a dictionary mapping tensor names to tensors. /// &RETURNS&: Dict[str,Tensor] fn load_safetensors(path: &str, py: Python<'_>) -> PyResult<PyObject> { let res = ::candle::safetensors::load(path, &Device::Cpu).map_err(wrap_err)?; let res = res .into_iter() .map(|(key, value)| (key, PyTensor(value).into_py(py))) .collect::<Vec<_>>(); Ok(res.into_py_dict(py).to_object(py)) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike], tensors:Dict[str,Tensor])")] /// Saves a dictionary of tensors to a safetensors file. /// &RETURNS&: None fn save_safetensors( path: &str, tensors: std::collections::HashMap<String, PyTensor>, ) -> PyResult<()> { let tensors = tensors .into_iter() .map(|(s, t)| (s, t.0)) .collect::<std::collections::HashMap<_, _>>(); ::candle::safetensors::save(&tensors, path).map_err(wrap_err) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike])")] /// Load a GGML file. Returns a tuple of three objects: a dictionary mapping tensor names to tensors, /// a dictionary mapping hyperparameter names to hyperparameter values, and a vocabulary. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any], List[str]] fn load_ggml(path: &str, py: Python<'_>) -> PyResult<(PyObject, PyObject, PyObject)> { let mut file = std::fs::File::open(path)?; let ggml = ::candle::quantized::ggml_file::Content::read(&mut file).map_err(wrap_err)?; let tensors = ggml .tensors .into_iter() .map(|(key, qtensor)| Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py)))) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict(py).to_object(py); let hparams = [ ("n_vocab", ggml.hparams.n_vocab), ("n_embd", ggml.hparams.n_embd), ("n_mult", ggml.hparams.n_mult), ("n_head", ggml.hparams.n_head), ("n_layer", ggml.hparams.n_layer), ("n_rot", ggml.hparams.n_rot), ("ftype", ggml.hparams.ftype), ]; let hparams = hparams.into_py_dict(py).to_object(py); let vocab = ggml .vocab .token_score_pairs .iter() .map(|(bytes, _)| String::from_utf8_lossy(bytes.as_slice()).to_string()) .collect::<Vec<String>>() .to_object(py); Ok((tensors, hparams, vocab)) } #[pyfunction] #[pyo3(text_signature = "(path:Union[str,PathLike])")] /// Loads a GGUF file. Returns a tuple of two dictionaries: the first maps tensor names to tensors, /// and the second maps metadata keys to metadata values. /// &RETURNS&: Tuple[Dict[str,QTensor], Dict[str,Any]] fn load_gguf(path: &str, py: Python<'_>) -> PyResult<(PyObject, PyObject)> { use ::candle::quantized::gguf_file; fn gguf_value_to_pyobject(v: &gguf_file::Value, py: Python<'_>) -> PyResult<PyObject> { let v: PyObject = match v { gguf_file::Value::U8(x) => x.into_py(py), gguf_file::Value::I8(x) => x.into_py(py), gguf_file::Value::U16(x) => x.into_py(py), gguf_file::Value::I16(x) => x.into_py(py), gguf_file::Value::U32(x) => x.into_py(py), gguf_file::Value::I32(x) => x.into_py(py), gguf_file::Value::U64(x) => x.into_py(py), gguf_file::Value::I64(x) => x.into_py(py), gguf_file::Value::F32(x) => x.into_py(py), gguf_file::Value::F64(x) => x.into_py(py), gguf_file::Value::Bool(x) => x.into_py(py), gguf_file::Value::String(x) => x.into_py(py), gguf_file::Value::Array(x) => { let list = pyo3::types::PyList::empty(py); for elem in x.iter() { list.append(gguf_value_to_pyobject(elem, py)?)?; } list.into() } }; Ok(v) } let mut file = std::fs::File::open(path)?; let gguf = gguf_file::Content::read(&mut file).map_err(wrap_err)?; let tensors = gguf .tensor_infos .keys() .map(|key| { let qtensor = gguf.tensor(&mut file, key)?; Ok((key, PyQTensor(Arc::new(qtensor)).into_py(py))) }) .collect::<::candle::Result<Vec<_>>>() .map_err(wrap_err)?; let tensors = tensors.into_py_dict(py).to_object(py); let metadata = gguf .metadata .iter() .map(|(key, value)| Ok((key, gguf_value_to_pyobject(value, py)?))) .collect::<PyResult<Vec<_>>>()? .into_py_dict(py) .to_object(py); Ok((tensors, metadata)) } #[pyfunction] #[pyo3( text_signature = "(path:Union[str,PathLike], tensors:Dict[str,QTensor], metadata:Dict[str,Any])" )] /// Save quanitzed tensors and metadata to a GGUF file. fn save_gguf(path: &str, tensors: PyObject, metadata: PyObject, py: Python<'_>) -> PyResult<()> { use ::candle::quantized::gguf_file; fn pyobject_to_gguf_value(v: &PyAny, py: Python<'_>) -> PyResult<gguf_file::Value> { let v: gguf_file::Value = if let Ok(x) = v.extract::<u8>() { gguf_file::Value::U8(x) } else if let Ok(x) = v.extract::<i8>() { gguf_file::Value::I8(x) } else if let Ok(x) = v.extract::<u16>() { gguf_file::Value::U16(x) } else if let Ok(x) = v.extract::<i16>() { gguf_file::Value::I16(x) } else if let Ok(x) = v.extract::<u32>() { gguf_file::Value::U32(x) } else if let Ok(x) = v.extract::<i32>() { gguf_file::Value::I32(x) } else if let Ok(x) = v.extract::<u64>() { gguf_file::Value::U64(x) } else if let Ok(x) = v.extract::<i64>() { gguf_file::Value::I64(x) } else if let Ok(x) = v.extract::<f32>() { gguf_file::Value::F32(x) } else if let Ok(x) = v.extract::<f64>() { gguf_file::Value::F64(x) } else if let Ok(x) = v.extract::<bool>() { gguf_file::Value::Bool(x) } else if let Ok(x) = v.extract::<String>() { gguf_file::Value::String(x) } else if let Ok(x) = v.extract::<Vec<PyObject>>() { let x = x .into_iter() .map(|f| pyobject_to_gguf_value(f.as_ref(py), py)) .collect::<PyResult<Vec<_>>>()?; gguf_file::Value::Array(x) } else { return Err(PyErr::new::<PyValueError, _>(format!( "unsupported type {:?}", v ))); }; Ok(v) } let tensors = tensors .extract::<&PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, value.extract::<PyQTensor>()?.0, )) }) .collect::<PyResult<Vec<_>>>()?; let metadata = metadata .extract::<&PyDict>(py) .map_err(|_| PyErr::new::<PyValueError, _>("expected a dict"))? .iter() .map(|(key, value)| { Ok(( key.extract::<String>() .map_err(|_| PyErr::new::<PyValueError, _>("keys must be strings"))?, pyobject_to_gguf_value(value, py)?, )) }) .collect::<PyResult<Vec<_>>>()?; let converted_metadata: Vec<_> = metadata .iter() .map(|(name, value)| (name.as_str(), value)) .collect(); let converted_tensors: Vec<_> = tensors .iter() .map(|(name, tensor)| (name.as_str(), tensor.as_ref())) .collect(); let mut file = std::fs::File::create(path)?; gguf_file::write(&mut file, &converted_metadata, &converted_tensors).map_err(wrap_err) } #[pyfunction] /// Returns true if the 'cuda' backend is available. /// &RETURNS&: bool fn cuda_is_available() -> bool { ::candle::utils::cuda_is_available() } #[pyfunction] /// Returns true if candle was compiled with 'accelerate' support. /// &RETURNS&: bool fn has_accelerate() -> bool { ::candle::utils::has_accelerate() } #[pyfunction] /// Returns true if candle was compiled with MKL support. /// &RETURNS&: bool fn has_mkl() -> bool { ::candle::utils::has_mkl() } #[pyfunction] /// Returns the number of threads used by the candle. /// &RETURNS&: int fn get_num_threads() -> usize { ::candle::utils::get_num_threads() } fn candle_utils(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(cuda_is_available, m)?)?; m.add_function(wrap_pyfunction!(get_num_threads, m)?)?; m.add_function(wrap_pyfunction!(has_accelerate, m)?)?; m.add_function(wrap_pyfunction!(has_mkl, m)?)?; m.add_function(wrap_pyfunction!(load_ggml, m)?)?; m.add_function(wrap_pyfunction!(load_gguf, m)?)?; m.add_function(wrap_pyfunction!(save_gguf, m)?)?; m.add_function(wrap_pyfunction!(load_safetensors, m)?)?; m.add_function(wrap_pyfunction!(save_safetensors, m)?)?; Ok(()) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor, dim:int)")] /// Applies the Softmax function to a given tensor.# /// &RETURNS&: Tensor fn softmax(tensor: PyTensor, dim: i64) -> PyResult<PyTensor> { let dim = actual_dim(&tensor, dim).map_err(wrap_err)?; let sm = candle_nn::ops::softmax(&tensor.0, dim).map_err(wrap_err)?; Ok(PyTensor(sm)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d avg-pool function to a given tensor.# /// &RETURNS&: Tensor fn avg_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .avg_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(signature = (tensor, ksize, *, stride=1), text_signature = "(tensor:Tensor, ksize:int, stride:int=1)")] /// Applies the 2d max-pool function to a given tensor.# /// &RETURNS&: Tensor fn max_pool2d(tensor: PyTensor, ksize: usize, stride: usize) -> PyResult<PyTensor> { let tensor = tensor .max_pool2d_with_stride(ksize, stride) .map_err(wrap_err)?; Ok(PyTensor(tensor)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Sigmoid Linear Unit (SiLU) function to a given tensor. /// &RETURNS&: Tensor fn silu(tensor: PyTensor) -> PyResult<PyTensor> { let s = candle_nn::ops::silu(&tensor.0).map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Gaussian Error Linear Unit (GELU) function to a given tensor. /// &RETURNS&: Tensor fn gelu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.gelu_erf().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the Rectified Linear Unit (ReLU) function to a given tensor. /// &RETURNS&: Tensor fn relu(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.relu().map_err(wrap_err)?; Ok(PyTensor(s)) } #[pyfunction] #[pyo3(text_signature = "(tensor:Tensor)")] /// Applies the tanh function to a given tensor. /// &RETURNS&: Tensor fn tanh(tensor: PyTensor) -> PyResult<PyTensor> { let s = tensor.0.tanh().map_err(wrap_err)?; Ok(PyTensor(s)) } fn candle_functional_m(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_function(wrap_pyfunction!(silu, m)?)?; m.add_function(wrap_pyfunction!(softmax, m)?)?; m.add_function(wrap_pyfunction!(max_pool2d, m)?)?; m.add_function(wrap_pyfunction!(avg_pool2d, m)?)?; m.add_function(wrap_pyfunction!(gelu, m)?)?; m.add_function(wrap_pyfunction!(relu, m)?)?; m.add_function(wrap_pyfunction!(tanh, m)?)?; Ok(()) } #[cfg(feature = "onnx")] fn candle_onnx_m(_py: Python<'_>, m: &PyModule) -> PyResult<()> { use onnx::{PyONNXModel, PyONNXTensorDescriptor}; m.add_class::<PyONNXModel>()?; m.add_class::<PyONNXTensorDescriptor>()?; Ok(()) } #[pymodule] fn candle(py: Python<'_>, m: &PyModule) -> PyResult<()> { let utils = PyModule::new(py, "utils")?; candle_utils(py, utils)?; m.add_submodule(utils)?; let nn = PyModule::new(py, "functional")?; candle_functional_m(py, nn)?; m.add_submodule(nn)?; #[cfg(feature = "onnx")] { let onnx = PyModule::new(py, "onnx")?; candle_onnx_m(py, onnx)?; m.add_submodule(onnx)?; } m.add_class::<PyTensor>()?; m.add_class::<PyQTensor>()?; m.add_class::<PyDType>()?; m.add("u8", PyDType(DType::U8))?; m.add("u32", PyDType(DType::U32))?; m.add("i64", PyDType(DType::I64))?; m.add("bf16", PyDType(DType::BF16))?; m.add("f16", PyDType(DType::F16))?; m.add("f32", PyDType(DType::F32))?; m.add("f64", PyDType(DType::F64))?; m.add_function(wrap_pyfunction!(cat, m)?)?; m.add_function(wrap_pyfunction!(ones, m)?)?; m.add_function(wrap_pyfunction!(rand, m)?)?; m.add_function(wrap_pyfunction!(randn, m)?)?; m.add_function(wrap_pyfunction!(tensor, m)?)?; m.add_function(wrap_pyfunction!(stack, m)?)?; m.add_function(wrap_pyfunction!(zeros, m)?)?; Ok(()) }
0
hf_public_repos/candle/candle-pyo3
hf_public_repos/candle/candle-pyo3/src/shape.rs
use ::candle::Tensor; use pyo3::prelude::*; #[derive(Clone, Debug)] /// Represents an absolute shape e.g. (1, 2, 3) pub struct PyShape(Vec<usize>); impl<'source> pyo3::FromPyObject<'source> for PyShape { fn extract(ob: &'source PyAny) -> PyResult<Self> { if ob.is_none() { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>( "Shape cannot be None", )); } let tuple = ob.downcast::<pyo3::types::PyTuple>()?; if tuple.len() == 1 { let first_element = tuple.get_item(0)?; let dims: Vec<usize> = pyo3::FromPyObject::extract(first_element)?; Ok(PyShape(dims)) } else { let dims: Vec<usize> = pyo3::FromPyObject::extract(tuple)?; Ok(PyShape(dims)) } } } impl From<PyShape> for ::candle::Shape { fn from(val: PyShape) -> Self { val.0.into() } } #[derive(Clone, Debug)] /// Represents a shape with a hole in it e.g. (1, -1, 3) pub struct PyShapeWithHole(Vec<isize>); impl<'source> pyo3::FromPyObject<'source> for PyShapeWithHole { fn extract(ob: &'source PyAny) -> PyResult<Self> { if ob.is_none() { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>( "Shape cannot be None", )); } let tuple = ob.downcast::<pyo3::types::PyTuple>()?; let dims: Vec<isize> = if tuple.len() == 1 { let first_element = tuple.get_item(0)?; pyo3::FromPyObject::extract(first_element)? } else { pyo3::FromPyObject::extract(tuple)? }; // Ensure we have only positive numbers and at most one "hole" (-1) let negative_ones = dims.iter().filter(|&&x| x == -1).count(); let any_invalid_dimensions = dims.iter().any(|&x| x < -1 || x == 0); if negative_ones > 1 || any_invalid_dimensions { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!( "Invalid dimension in shape: {:?}", dims ))); } Ok(PyShapeWithHole(dims)) } } impl PyShapeWithHole { /// Returns `true` if the shape is absolute e.g. (1, 2, 3) pub fn is_absolute(&self) -> bool { self.0.iter().all(|x| *x > 0) } /// Convert a relative shape to an absolute shape e.g. (1, -1) -> (1, 12) pub fn to_absolute(&self, t: &Tensor) -> PyResult<PyShape> { if self.is_absolute() { return Ok(PyShape( self.0.iter().map(|x| *x as usize).collect::<Vec<usize>>(), )); } let mut elements = t.elem_count(); let mut new_dims: Vec<usize> = vec![]; for dim in self.0.iter() { if *dim > 0 { new_dims.push(*dim as usize); elements /= *dim as usize; } else if *dim == -1 { new_dims.push(elements); } else { return Err(PyErr::new::<pyo3::exceptions::PyValueError, _>(format!( "Invalid dimension in shape: {}", dim ))); } } Ok(PyShape(new_dims)) } }
0
hf_public_repos/candle/candle-pyo3
hf_public_repos/candle/candle-pyo3/src/utils.rs
use pyo3::exceptions::PyValueError; use pyo3::prelude::*; pub fn wrap_err(err: ::candle::Error) -> PyErr { PyErr::new::<PyValueError, _>(format!("{err:?}")) }
0
hf_public_repos/candle
hf_public_repos/candle/.cargo/config.toml
[build] rustflags = ["-C", "target-cpu=native"] [target.wasm32-unknown-unknown] rustflags = ["-C", "target-feature=+simd128"] [target.x86_64-apple-darwin] rustflags = ["-C", "target-feature=-avx,-avx2"]
0
hf_public_repos/candle
hf_public_repos/candle/candle-onnx/Cargo.toml
[package] name = "candle-onnx" version = "0.3.1" edition = "2021" description = "ONNX support for Candle" repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" [dependencies] candle = { path = "../candle-core", version = "0.3.1", package = "candle-core" } candle-nn = { path = "../candle-nn", version = "0.3.1" } prost = "0.12.1" [build-dependencies] prost-build = "0.12.1" [dev-dependencies] anyhow = { version = "1", features = ["backtrace"] } clap = { version = "4.2.4", features = ["derive"] }
0
hf_public_repos/candle
hf_public_repos/candle/candle-onnx/build.rs
use std::io::Result; fn main() -> Result<()> { prost_build::compile_protos(&["src/onnx.proto3"], &["src/"])?; Ok(()) }
0
hf_public_repos/candle
hf_public_repos/candle/candle-onnx/README.md
# candle-onnx This crate adds ONNX support to candle ## FAQ #### Missing protoc installation when compiling candle-onnx The candle-onnx dependency prost-build no longer comes bundled with prost binaries. This could cause the following error when attempting to compile candle-onnx: ``` error: failed to run custom build command for `candle-onnx` Caused by: // (...) Could not find `protoc` installation and this build crate cannot proceed without this knowledge. ``` To fix this issue install protoc on your system and make it available in your system `PATH`. See the [protoc documentation](https://grpc.io/docs/protoc-installation/) for more information.
0
hf_public_repos/candle/candle-onnx
hf_public_repos/candle/candle-onnx/tests/ops.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use candle::{Device, Result, Tensor}; use candle_onnx::onnx::{GraphProto, ModelProto, NodeProto, ValueInfoProto}; use std::collections::HashMap; const INPUT_X: &str = "x"; const INPUT_Y: &str = "y"; const OUTPUT_Z: &str = "z"; fn create_model_proto_with_graph(graph: Option<GraphProto>) -> ModelProto { ModelProto { metadata_props: vec![], training_info: vec![], functions: vec![], ir_version: 0, opset_import: vec![], producer_name: "".to_string(), producer_version: "".to_string(), domain: "".to_string(), model_version: 0, doc_string: "".to_string(), graph, } } #[test] fn test_evaluation_fails_without_defined_graph() -> Result<()> { let manual_graph = create_model_proto_with_graph(None); let inputs: HashMap<String, Tensor> = HashMap::new(); match candle_onnx::simple_eval(&manual_graph, inputs) { Err(err) => assert_eq!(err.to_string(), "no graph defined in proto"), Ok(_) => panic!("Expected an error due to undefined graph"), } Ok(()) } // "Add" #[test] fn test_add_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Add".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 4.0f64); Ok(()) } // "Sub" #[test] fn test_sub_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Sub".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 0.0f64); Ok(()) } // "Mul" #[test] fn test_mul_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Mul".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 4.0f64); Ok(()) } // "Div" #[test] fn test_div_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Div".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z .to_vec1::<f64>()? .to_vec() .get(0) .expect("Failed to get first element") .clone(); assert_eq!(first, 1.0f64); Ok(()) } // "Equal" #[test] fn test_equal_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Equal".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?); inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0]; assert_eq!(first, 1); Ok(()) } // "Not" #[test] fn test_not_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Not".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), Tensor::new(&[0.], &Device::Cpu)?); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0]; assert_eq!(first, 1); Ok(()) } // "MatMul" #[test] fn test_matmul_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "MatMul".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert( INPUT_X.to_string(), Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?, ); inputs.insert( INPUT_Y.to_string(), Tensor::from_vec( // vec![5.0f32, 6.0f32, 7.0f32, 8.0f32], &[2, 2], &Device::Cpu, )?, ); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![19.0, 22.0], vec![43.0, 50.0]]); Ok(()) } // "Reshape" #[test] fn test_reshape_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Reshape".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string(), INPUT_Y.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let y = Tensor::from_vec( // vec![4i64], &[1], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); inputs.insert(INPUT_Y.to_string(), y); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec1::<f32>()?; assert_eq!(results, vec![1.0, 2.0, 3.0, 4.0]); Ok(()) } // "LogSoftmax" #[test] fn test_logsoftmax_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "LogSoftmax".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]] ); Ok(()) } // "Softmax" #[test] fn test_softmax_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Softmax".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!( results, vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]] ); Ok(()) } // "Transpose" #[test] fn test_transpose_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Transpose".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![1.0, 3.0], vec![2.0, 4.0]]); Ok(()) } // "Dropout" #[test] fn test_dropout_operation() -> Result<()> { let manual_graph = create_model_proto_with_graph(Some(GraphProto { node: vec![NodeProto { op_type: "Dropout".to_string(), domain: "".to_string(), attribute: vec![], input: vec![INPUT_X.to_string()], output: vec![OUTPUT_Z.to_string()], name: "".to_string(), doc_string: "".to_string(), }], name: "".to_string(), initializer: vec![], input: vec![ ValueInfoProto { name: INPUT_X.to_string(), doc_string: "".to_string(), r#type: None, }, ValueInfoProto { name: INPUT_Y.to_string(), doc_string: "".to_string(), r#type: None, }, ], output: vec![ValueInfoProto { name: OUTPUT_Z.to_string(), doc_string: "".to_string(), r#type: None, }], value_info: vec![], doc_string: "".to_string(), sparse_initializer: vec![], quantization_annotation: vec![], })); let x = Tensor::from_vec( // vec![1.0f32, 2.0f32, 3.0f32, 4.0f32], &[2, 2], &Device::Cpu, )?; let mut inputs: HashMap<String, Tensor> = HashMap::new(); inputs.insert(INPUT_X.to_string(), x); let eval = candle_onnx::simple_eval(&manual_graph, inputs)?; assert_eq!(eval.len(), 1); let z = eval.get(OUTPUT_Z).expect("Output 'z' not found"); let results = z.to_vec2::<f32>()?; assert_eq!(results, vec![vec![1.0, 2.0], vec![3.0, 4.0]]); Ok(()) } // Below are ops that are implemented but not tested yet // "MaxPool" // #[test] // "AveragePool" // #[test] // "BatchNormalization" // #[test] // "Squeeze" // #[test] // "ConstantOfShape" // #[test] // "Unsqueeze" // #[test] // "Clip" // #[test] // "Gather" // #[test] // "Shape" // #[test] // "Conv" // #[test] // "Concat" // #[test] // "Abs" // #[test] // "Cos" // #[test] // "Sin" // #[test] // "Neg" // #[test] // "Erf" // #[test] // "Tanh" // #[test] // "Sigmoid" // #[test] // "Gelu" // #[test] // "Relu" // #[test] // "Constant" // #[test] // "Cast" // #[test]
0
hf_public_repos/candle/candle-onnx
hf_public_repos/candle/candle-onnx/src/lib.rs
use candle::Result; use prost::Message; pub mod onnx { include!(concat!(env!("OUT_DIR"), "/onnx.rs")); } pub mod eval; pub use eval::{dtype, simple_eval}; pub fn read_file<P: AsRef<std::path::Path>>(p: P) -> Result<onnx::ModelProto> { let buf = std::fs::read(p)?; onnx::ModelProto::decode(buf.as_slice()).map_err(candle::Error::wrap) }
0
hf_public_repos/candle/candle-onnx
hf_public_repos/candle/candle-onnx/src/eval.rs
use crate::onnx; use crate::onnx::attribute_proto::AttributeType; use crate::onnx::tensor_proto::DataType; use candle::{bail, DType, Device, Result, Tensor}; use std::collections::HashMap; pub type Value = Tensor; pub fn dtype(dt: DataType) -> Option<DType> { match dt { DataType::Uint8 => Some(DType::U8), DataType::Uint32 => Some(DType::U32), DataType::Int64 => Some(DType::I64), DataType::Float16 => Some(DType::F16), DataType::Float => Some(DType::F32), DataType::Double => Some(DType::F64), _ => None, } } trait Attr { const TYPE: AttributeType; fn get(attr: &onnx::AttributeProto) -> Result<&Self>; } impl Attr for i64 { const TYPE: AttributeType = AttributeType::Int; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(&attr.i) } } impl Attr for f32 { const TYPE: AttributeType = AttributeType::Float; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(&attr.f) } } impl Attr for [i64] { const TYPE: AttributeType = AttributeType::Ints; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { Ok(attr.ints.as_slice()) } } impl Attr for str { const TYPE: AttributeType = AttributeType::String; fn get(attr: &onnx::AttributeProto) -> Result<&Self> { std::str::from_utf8(&attr.s).map_err(candle::Error::wrap) } } fn get_attr_<'a>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a onnx::AttributeProto> { match node.attribute.iter().find(|attr| attr.name == name) { None => { bail!( "cannot find the '{name}' attribute in '{}' for {}", node.op_type, node.name ) } Some(dt) => Ok(dt), } } fn get_attr<'a, T: Attr + ?Sized>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a T> { let attr = get_attr_(node, name)?; if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } T::get(attr) } fn get_attr_opt<'a, T: Attr + ?Sized>( node: &'a onnx::NodeProto, name: &str, ) -> Result<Option<&'a T>> { match node.attribute.iter().find(|attr| attr.name == name) { None => Ok(None), Some(attr) => { if attr.r#type() != T::TYPE { bail!( "unsupported type {:?} for '{name}' attribute in '{}' for {}", attr.r#type, node.op_type, node.name ) } let val = T::get(attr)?; Ok(Some(val)) } } } pub fn get_tensor(t: &onnx::TensorProto, name: &str) -> Result<Tensor> { let dims: Vec<usize> = t.dims.iter().map(|&x| x as usize).collect(); match DataType::try_from(t.data_type) { Ok(DataType::Int32) => { if t.int32_data.is_empty() { let len = t.raw_data.len() / 4; let data: &[i32] = unsafe { std::slice::from_raw_parts(t.raw_data.as_ptr() as *const i32, len) }; let data = data.iter().map(|v| *v as i64).collect::<Vec<_>>(); Tensor::from_vec(data, len, &Device::Cpu) } else { let data = t.int32_data.iter().map(|v| *v as i64).collect::<Vec<_>>(); Tensor::from_vec(data, t.int32_data.len(), &Device::Cpu) } } Ok(dt) => match dtype(dt) { Some(dt) => { if dt == DType::F32 && !t.float_data.is_empty() { Tensor::from_slice(&t.float_data, dims.as_slice(), &Device::Cpu) } else if dt == DType::F64 && !t.double_data.is_empty() { Tensor::from_slice(&t.double_data, dims.as_slice(), &Device::Cpu) } else if dt == DType::I64 && !t.int64_data.is_empty() { Tensor::from_slice(&t.int64_data, dims.as_slice(), &Device::Cpu) } else { Tensor::from_raw_buffer( t.raw_data.as_slice(), dt, dims.as_slice(), &Device::Cpu, ) } } None => { bail!("unsupported 'value' data-type {dt:?} for {name}") } }, Err(_) => { bail!("unsupported 'value' data-type {} for {name}", t.data_type,) } } } // This function provides a direct evaluation of the proto. // Longer-term, we should first convert the proto to an intermediate representation of the compute // graph so as to make multiple evaluations more efficient. // An example upside of this would be to remove intermediary values when they are not needed // anymore. pub fn simple_eval( model: &onnx::ModelProto, inputs: HashMap<String, Value>, ) -> Result<HashMap<String, Value>> { let graph = match &model.graph { None => bail!("no graph defined in proto"), Some(graph) => graph, }; let mut values = inputs; for t in graph.initializer.iter() { let tensor = get_tensor(t, t.name.as_str())?; values.insert(t.name.to_string(), tensor); } for input in graph.input.iter() { let input_type = match &input.r#type { Some(input_type) => input_type, None => continue, }; let input_type = match &input_type.value { Some(input_type) => input_type, None => continue, }; let tensor_type = match input_type { onnx::type_proto::Value::TensorType(tt) => tt, _ => continue, }; let tensor = match values.get(&input.name) { None => bail!("missing input {}", input.name), Some(tensor) => tensor, }; let dt = match DataType::try_from(tensor_type.elem_type) { Ok(dt) => match dtype(dt) { Some(dt) => dt, None => { bail!("unsupported 'value' data-type {dt:?} for {}", input.name) } }, type_ => bail!("unsupported input type {type_:?}"), }; match &tensor_type.shape { None => continue, Some(shape) => { if shape.dim.len() != tensor.rank() { bail!( "unexpected rank for {}, got {:?}, expected {:?}", input.name, shape.dim, tensor.shape() ) } for (idx, (d, &dim)) in shape.dim.iter().zip(tensor.dims().iter()).enumerate() { match &d.value { Some(onnx::tensor_shape_proto::dimension::Value::DimValue(v)) => { if *v as usize != dim { bail!( "unexpected dim {idx} for {}, got {:?}, expected {:?}", input.name, shape.dim, tensor.shape() ) } } // We do not check equality constraints for the DimParam dimensions for now. Some(onnx::tensor_shape_proto::dimension::Value::DimParam(_)) | None => (), } } } }; if dt != tensor.dtype() { bail!( "unexpected dtype for {}, got {:?}, expected {dt:?}", input.name, tensor.dtype() ) } } // The nodes are topologically sorted so we can just process them in order. for node in graph.node.iter() { let get = |input_name: &str| match values.get(input_name) { Some(value) => Ok(value), None => bail!("cannot find {input_name} for op {}", node.name), }; // TODO: Validate node.input for each operator. match node.op_type.as_str() { "Add" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_add(input1)?; values.insert(node.output[0].clone(), output); } "Sub" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_sub(input1)?; values.insert(node.output[0].clone(), output); } "Mul" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_mul(input1)?; values.insert(node.output[0].clone(), output); } "Div" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_div(input1)?; values.insert(node.output[0].clone(), output); } "Equal" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_eq(input1)?; values.insert(node.output[0].clone(), output); } "Not" => { let xs = get(&node.input[0])?; let xs = xs.eq(&xs.zeros_like()?)?; values.insert(node.output[0].clone(), xs); } "MatMul" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?; let output = input0.broadcast_matmul(input1)?; values.insert(node.output[0].clone(), output); } "Reshape" => { let input0 = get(&node.input[0])?; let input1 = get(&node.input[1])?.to_vec1::<i64>()?; // TODO: Check that there is at most a single -1 or 0, handle other neg values. let mut other_than_minus1 = 1usize; for &v in input1.iter() { if v != -1 && v != 0 { other_than_minus1 *= v as usize } } let input1 = input1 .iter() .enumerate() .map(|(idx, &v)| match v { -1 => Ok(input0.elem_count() / other_than_minus1), 0 => input0.dim(idx), _ => Ok(v as usize), }) .collect::<Result<Vec<usize>>>()?; let output = input0.reshape(input1)?; values.insert(node.output[0].clone(), output); } "LogSoftmax" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<i64>(node, "axis")? { None => candle_nn::ops::softmax_last_dim(input)?, Some(&axis) => { let axis = input.normalize_axis(axis)?; candle_nn::ops::log_softmax(input, axis)? } }; values.insert(node.output[0].clone(), output); } "Softmax" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<i64>(node, "axis")? { None => candle_nn::ops::softmax_last_dim(input)?, Some(&axis) => { let axis = input.normalize_axis(axis)?; candle_nn::ops::softmax(input, axis)? } }; values.insert(node.output[0].clone(), output); } "Transpose" => { let input = get(&node.input[0])?; let output = match get_attr_opt::<[i64]>(node, "perm")? { None => input.t()?, Some(perm) => { let perm = perm.iter().map(|&v| v as usize).collect::<Vec<_>>(); input.permute(perm)? } }; values.insert(node.output[0].clone(), output); } "Dropout" => { let input = get(&node.input[0])?; // Do not apply dropout at the moment, consider that we're only doing inference. values.insert(node.output[0].clone(), input.clone()); } "MaxPool" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#MaxPool let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; if let Some(d) = dilations { if d.iter().any(|&v| v != 1) { bail!("MaxPool with dilation != 1, {dilations:?}") } } if let Some(d) = pads { if d.iter().any(|&v| v != 0) { bail!("MaxPool with pads != 0, {pads:?}") } } let xs = get(&node.input[0])?; let (k1, k2) = match kernel_shape { [k1, k2] => (*k1 as usize, *k2 as usize), _ => bail!("only 2d MaxPool is supported, kernel shape {kernel_shape:?}"), }; let ys = match strides { None => xs.max_pool2d((k1, k2))?, Some([s1, s2]) => { xs.max_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))? } Some(strides) => bail!("only 2d MaxPool is supported, strides {strides:?}"), }; values.insert(node.output[0].clone(), ys); } "AveragePool" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#AveragePool let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; if let Some(d) = dilations { if d.iter().any(|&v| v != 1) { bail!("AvgPool with dilation != 1, {dilations:?}") } } if let Some(d) = pads { if d.iter().any(|&v| v != 0) { bail!("AvgPool with pads != 0, {pads:?}") } } let xs = get(&node.input[0])?; let (k1, k2) = match kernel_shape { [k1, k2] => (*k1 as usize, *k2 as usize), _ => bail!("only 2d AvgPool is supported, kernel shape {kernel_shape:?}"), }; let ys = match strides { None => xs.avg_pool2d((k1, k2))?, Some([s1, s2]) => { xs.avg_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))? } Some(strides) => bail!("only 2d AvgPool is supported, strides {strides:?}"), }; values.insert(node.output[0].clone(), ys); } "BatchNormalization" => { let training_mode = get_attr_opt::<i64>(node, "training_mode")?; if training_mode.copied().unwrap_or(0) != 0 { bail!("training mode is not supported for BatchNorm") } let eps = get_attr_opt::<f32>(node, "epsilon")? .copied() .unwrap_or(1e-5); let xs = get(&node.input[0])?; let weight = get(&node.input[1])?; let bias = get(&node.input[2])?; let running_mean = get(&node.input[3])?; let running_var = get(&node.input[4])?; let target_shape: Vec<usize> = xs .dims() .iter() .enumerate() .map(|(idx, v)| if idx == 1 { *v } else { 1 }) .collect(); let target_shape = target_shape.as_slice(); let xs = xs .broadcast_sub(&running_mean.reshape(target_shape)?)? .broadcast_div(&(running_var.reshape(target_shape)? + eps as f64)?.sqrt()?)?; let weight = weight.reshape(target_shape)?; let bias = bias.reshape(target_shape)?; let xs = xs.broadcast_mul(&weight)?.broadcast_add(&bias)?; values.insert(node.output[0].clone(), xs); } "Squeeze" => { let xs = get(&node.input[0])?; let mut axes = if node.input.len() <= 1 { // contract all the dimensions with size 1 except the batch dim. xs.dims() .iter() .enumerate() .flat_map(|(idx, &s)| if s == 1 && idx > 0 { Some(idx) } else { None }) .collect() } else { get(&node.input[1])? .to_vec1::<i64>()? .iter() .map(|&i| xs.normalize_axis(i)) .collect::<Result<Vec<_>>>()? }; axes.sort(); let mut xs = xs.clone(); for &axis in axes.iter().rev() { xs = xs.squeeze(axis)? } values.insert(node.output[0].clone(), xs); } "ConstantOfShape" => { let dims = get(&node.input[0])?; let shape = dims .to_vec1::<i64>()? .into_iter() .map(|v| v as usize) .collect::<Vec<_>>(); let xs = Tensor::zeros(shape, DType::F32, dims.device())?; values.insert(node.output[0].clone(), xs); } "Unsqueeze" => { let xs = get(&node.input[0])?; let axes = match get_attr_opt::<[i64]>(node, "axes")? { Some(axis) => axis.to_vec(), None => get(&node.input[1])?.to_vec1::<i64>()?, }; let mut axes = axes .iter() .map(|&i| { if i == xs.rank() as i64 { Ok(xs.rank()) } else { xs.normalize_axis(i) } }) .collect::<Result<Vec<_>>>()?; axes.sort(); let mut xs = xs.clone(); for &axis in axes.iter().rev() { xs = xs.unsqueeze(axis)? } values.insert(node.output[0].clone(), xs); } "Clip" => { let xs = get(&node.input[0])?; let xs = if node.input.len() >= 2 { let mins = get(&node.input[1])?; xs.broadcast_maximum(mins)? } else { xs.clone() }; let xs = if node.input.len() >= 3 { let maxs = get(&node.input[2])?; xs.broadcast_minimum(maxs)? } else { xs.clone() }; values.insert(node.output[0].clone(), xs); } "Gather" => { let xs = get(&node.input[0])?; let indices = get(&node.input[1])?; let axis = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(0); let axis = xs.normalize_axis(axis)?; // TODO: Provide an op to handle the ONNX generalized gather op ideally in a // differentiable way. let xs = if indices.rank() == 0 { let index = indices.to_vec0::<i64>()? as usize; xs.narrow(axis, index, 1)?.squeeze(axis)? } else { todo!("implement gather for {xs:?} {indices:?} axis {axis}") }; values.insert(node.output[0].clone(), xs); } "Shape" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Shape let xs = get(&node.input[0])?; let start = get_attr_opt::<i64>(node, "start")?.copied().unwrap_or(0); let end = get_attr_opt::<i64>(node, "end")?.copied().unwrap_or(-1); let start = xs.normalize_axis(start)?; let end = xs.normalize_axis(end)?; let mut dims = vec![]; for idx in start..=end { dims.push(xs.dim(idx)? as i64) } let dims = Tensor::from_vec(dims, xs.rank(), xs.device())?; values.insert(node.output[0].clone(), dims); } "Conv" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Conv let dilations = get_attr_opt::<[i64]>(node, "dilations")?; let groups = get_attr_opt::<i64>(node, "group")?.copied().unwrap_or(1); let _kernel_shape = get_attr_opt::<[i64]>(node, "kernel_shape")?; let pads = get_attr_opt::<[i64]>(node, "pads")?; let strides = get_attr_opt::<[i64]>(node, "strides")?; let auto_pad = get_attr_opt::<str>(node, "auto_pad")?; match auto_pad { None | Some("NOTSET") => (), Some(s) => bail!("unsupported auto_pad {s}"), }; let xs = get(&node.input[0])?; let ws = get(&node.input[1])?; let ys = match ws.rank() { 3 => { let (pads, xs) = match pads { None => (0, xs.clone()), Some([p]) => (*p as usize, xs.clone()), Some([p1, p2]) => { if p1 != p2 { (0usize, xs.pad_with_zeros(2, *p1 as usize, *p2 as usize)?) } else { (*p1 as usize, xs.clone()) } } Some(pads) => { bail!("more pads than expected in conv1d {pads:?} {}", node.name) } }; let strides = match strides { None => 1, Some([p]) => *p as usize, Some(s) => { bail!("more strides than expected in conv1d {s:?} {}", node.name) } }; let dilations = match dilations { None => 1, Some([p]) => *p as usize, Some(s) => { bail!("more dilations than expected in conv1d {s:?} {}", node.name) } }; xs.conv1d(ws, pads, strides, dilations, groups as usize)? } 4 => { let (pads, xs) = match pads { None => (0, xs.clone()), Some([p]) => (*p as usize, xs.clone()), Some(&[p1, p2, p3, p4]) => { let p1 = p1 as usize; let p2 = p2 as usize; let p3 = p3 as usize; let p4 = p4 as usize; if p1 != p2 || p1 != p3 || p1 != p4 { (0, xs.pad_with_zeros(2, p1, p3)?.pad_with_zeros(3, p2, p4)?) } else { (p1, xs.clone()) } } Some(pads) => { bail!("more pads than expected in conv2d {pads:?} {}", node.name) } }; let strides = match strides { None => 1, Some([p]) => *p as usize, Some([p1, p2]) => { if p1 != p2 { bail!( "strides have to be the same on both axis {pads:?} {}", node.name ) } *p1 as usize } Some(s) => { bail!("more strides than expected in conv2d {s:?} {}", node.name) } }; let dilations = match dilations { None => 1, Some([p]) => *p as usize, Some([p1, p2]) => { if p1 != p2 { bail!( "dilations have to be the same on both axis {pads:?} {}", node.name ) } *p1 as usize } Some(s) => { bail!("more dilations than expected in conv2d {s:?} {}", node.name) } }; xs.conv2d(ws, pads, strides, dilations, groups as usize)? } rank => bail!( "unsupported rank for weight matrix {rank} in conv {}", node.name ), }; let ys = if node.input.len() > 2 { let bs = get(&node.input[2])?; let mut bs_shape = vec![1; ys.rank()]; bs_shape[1] = bs.elem_count(); ys.broadcast_add(&bs.reshape(bs_shape)?)? } else { ys }; values.insert(node.output[0].clone(), ys); } "Concat" => { // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Concat let inputs = node .input .iter() .map(|n| Ok(get(n.as_str())?.clone())) .collect::<Result<Vec<Value>>>()?; let axis: i64 = *get_attr(node, "axis")?; if inputs.is_empty() { bail!("empty concat") }; let axis = inputs[0].normalize_axis(axis)?; let output = Tensor::cat(&inputs, axis)?; values.insert(node.output[0].clone(), output); } "Abs" => { let input = get(&node.input[0])?; let output = input.abs()?; values.insert(node.output[0].clone(), output); } "Cos" => { let input = get(&node.input[0])?; let output = input.cos()?; values.insert(node.output[0].clone(), output); } "Sin" => { let input = get(&node.input[0])?; let output = input.sin()?; values.insert(node.output[0].clone(), output); } "Neg" => { let input = get(&node.input[0])?; let output = input.neg()?; values.insert(node.output[0].clone(), output); } "Erf" => { let input = get(&node.input[0])?; let output = input.erf()?; values.insert(node.output[0].clone(), output); } "Tanh" => { let input = get(&node.input[0])?; let output = input.tanh()?; values.insert(node.output[0].clone(), output); } "Sigmoid" => { let input = get(&node.input[0])?; let output = candle_nn::ops::sigmoid(input)?; values.insert(node.output[0].clone(), output); } "Gelu" => { let input = get(&node.input[0])?; let output = input.gelu_erf()?; values.insert(node.output[0].clone(), output); } "Relu" => { let input = get(&node.input[0])?; let output = input.relu()?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Constant "Constant" => { let value = match node.attribute.iter().find(|attr| attr.name == "value") { None => { // TODO: support sparse_value etc. bail!("cannot find 'value' attr in 'Constant' for {}", node.name) } Some(value) => value, }; let output = match value.r#type() { AttributeType::Tensor => { let t = value.t.as_ref().unwrap(); get_tensor(t, &node.name)? } rtype => bail!("unsupported 'value' type {rtype:?} for {}", node.name), }; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#Cast "Cast" => { let input = get(&node.input[0])?; let dt: i64 = *get_attr(node, "to")?; let dtype = match DataType::try_from(dt as i32) { Ok(DataType::Int32) => DType::I64, Ok(dt) => match dtype(dt) { Some(dt) => dt, None => { bail!("unsupported 'to' value {dt:?} for cast {}", node.name) } }, Err(_) => { bail!("unsupported 'to' value {dt:?} for cast {}", node.name) } }; let output = input.to_dtype(dtype)?; values.insert(node.output[0].clone(), output); } // https://github.com/onnx/onnx/blob/main/docs/Operators.md#CumSum "CumSum" => { let exclusive = get_attr_opt::<i64>(node, "exclusive")? .copied() .unwrap_or(0); let reverse = get_attr_opt::<i64>(node, "reverse")?.copied().unwrap_or(0); if exclusive != 0 { bail!("only exclusive == 0 is supported in CumSum") } if reverse != 0 { bail!("only reverse == 0 is supported in CumSum") } let input = get(&node.input[0])?; let axis = get(&node.input[1])? .to_dtype(DType::U32)? .to_vec0::<u32>()?; let output = input.cumsum(axis as usize)?; values.insert(node.output[0].clone(), output); } op_type => bail!("unsupported op_type {op_type} for op {node:?}"), } } graph .output .iter() .map(|output| match values.remove(&output.name) { None => bail!("cannot find output {}", output.name), Some(value) => Ok((output.name.clone(), value)), }) .collect() }
0
hf_public_repos/candle/candle-onnx
hf_public_repos/candle/candle-onnx/src/onnx.proto3
// // WARNING: This file is automatically generated! Please edit onnx.in.proto. // // SPDX-License-Identifier: Apache-2.0 syntax = "proto3"; package onnx; // Overview // // ONNX is an open specification that is comprised of the following components: // // 1) A definition of an extensible computation graph model. // 2) Definitions of standard data types. // 3) Definitions of built-in operators. // // This document describes the syntax of models and their computation graphs, // as well as the standard data types. Together, they are referred to as the ONNX // Intermediate Representation, or 'IR' for short. // // The normative semantic specification of the ONNX IR is found in docs/IR.md. // Definitions of the built-in neural network operators may be found in docs/Operators.md. // Notes // // Protobuf compatibility // // To simplify framework compatibility, ONNX is defined using the subset of protobuf // that is compatible with both protobuf v2 and v3. This means that we do not use any // protobuf features that are only available in one of the two versions. // // Here are the most notable contortions we have to carry out to work around // these limitations: // // - No 'map' (added protobuf 3.0). We instead represent mappings as lists // of key-value pairs, where order does not matter and duplicates // are not allowed. // Versioning // // ONNX versioning is specified in docs/IR.md and elaborated on in docs/Versioning.md // // To be compatible with both proto2 and proto3, we will use a version number // that is not defined by the default value but an explicit enum number. enum Version { // proto3 requires the first enum value to be zero. // We add this just to appease the compiler. _START_VERSION = 0; // The version field is always serialized and we will use it to store the // version that the graph is generated from. This helps us set up version // control. // For the IR, we are using simple numbers starting with 0x00000001, // which was the version we published on Oct 10, 2017. IR_VERSION_2017_10_10 = 0x0000000000000001; // IR_VERSION 2 published on Oct 30, 2017 // - Added type discriminator to AttributeProto to support proto3 users IR_VERSION_2017_10_30 = 0x0000000000000002; // IR VERSION 3 published on Nov 3, 2017 // - For operator versioning: // - Added new message OperatorSetIdProto // - Added opset_import in ModelProto // - For vendor extensions, added domain in NodeProto IR_VERSION_2017_11_3 = 0x0000000000000003; // IR VERSION 4 published on Jan 22, 2019 // - Relax constraint that initializers should be a subset of graph inputs // - Add type BFLOAT16 IR_VERSION_2019_1_22 = 0x0000000000000004; // IR VERSION 5 published on March 18, 2019 // - Add message TensorAnnotation. // - Add quantization annotation in GraphProto to map tensor with its scale and zero point quantization parameters. IR_VERSION_2019_3_18 = 0x0000000000000005; // IR VERSION 6 published on Sep 19, 2019 // - Add support for sparse tensor constants stored in model. // - Add message SparseTensorProto // - Add sparse initializers IR_VERSION_2019_9_19 = 0x0000000000000006; // IR VERSION 7 published on May 8, 2020 // - Add support to allow function body graph to rely on multiple external opreator sets. // - Add a list to promote inference graph's initializers to global and // mutable variables. Global variables are visible in all graphs of the // stored models. // - Add message TrainingInfoProto to store initialization // method and training algorithm. The execution of TrainingInfoProto // can modify the values of mutable variables. // - Implicitly add inference graph into each TrainingInfoProto's algorithm. IR_VERSION_2020_5_8 = 0x0000000000000007; // IR VERSION 8 published on July 30, 2021 // Introduce TypeProto.SparseTensor // Introduce TypeProto.Optional // Added a list of FunctionProtos local to the model // Deprecated since_version and operator status from FunctionProto IR_VERSION_2021_7_30 = 0x0000000000000008; // IR VERSION 9 published on May 5, 2023 // Added AttributeProto to FunctionProto so that default attribute values can be set. // Added FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ. IR_VERSION = 0x0000000000000009; } // Attributes // // A named attribute containing either singular float, integer, string, graph, // and tensor values, or repeated float, integer, string, graph, and tensor values. // An AttributeProto MUST contain the name field, and *only one* of the // following content fields, effectively enforcing a C/C++ union equivalent. message AttributeProto { reserved 12, 16 to 19; reserved "v"; // Note: this enum is structurally identical to the OpSchema::AttrType // enum defined in schema.h. If you rev one, you likely need to rev the other. enum AttributeType { UNDEFINED = 0; FLOAT = 1; INT = 2; STRING = 3; TENSOR = 4; GRAPH = 5; SPARSE_TENSOR = 11; TYPE_PROTO = 13; FLOATS = 6; INTS = 7; STRINGS = 8; TENSORS = 9; GRAPHS = 10; SPARSE_TENSORS = 12; TYPE_PROTOS = 14; } // The name field MUST be present for this version of the IR. string name = 1; // namespace Attribute // if ref_attr_name is not empty, ref_attr_name is the attribute name in parent function. // In this case, this AttributeProto does not contain data, and it's a reference of attribute // in parent scope. // NOTE: This should ONLY be used in function (sub-graph). It's invalid to be used in main graph. string ref_attr_name = 21; // A human-readable documentation for this attribute. Markdown is allowed. string doc_string = 13; // The type field MUST be present for this version of the IR. // For 0.0.1 versions of the IR, this field was not defined, and // implementations needed to use has_field heuristics to determine // which value field was in use. For IR_VERSION 0.0.2 or later, this // field MUST be set and match the f|i|s|t|... field in use. This // change was made to accommodate proto3 implementations. AttributeType type = 20; // discriminator that indicates which field below is in use // Exactly ONE of the following fields must be present for this version of the IR float f = 2; // float int64 i = 3; // int bytes s = 4; // UTF-8 string TensorProto t = 5; // tensor value GraphProto g = 6; // graph SparseTensorProto sparse_tensor = 22; // sparse tensor value // Do not use field below, it's deprecated. // optional ValueProto v = 12; // value - subsumes everything but graph TypeProto tp = 14; // type proto repeated float floats = 7; // list of floats repeated int64 ints = 8; // list of ints repeated bytes strings = 9; // list of UTF-8 strings repeated TensorProto tensors = 10; // list of tensors repeated GraphProto graphs = 11; // list of graph repeated SparseTensorProto sparse_tensors = 23; // list of sparse tensors repeated TypeProto type_protos = 15;// list of type protos } // Defines information on value, including the name, the type, and // the shape of the value. message ValueInfoProto { // This field MUST be present in this version of the IR. string name = 1; // namespace Value // This field MUST be present in this version of the IR for // inputs and outputs of the top-level graph. TypeProto type = 2; // A human-readable documentation for this value. Markdown is allowed. string doc_string = 3; } // Nodes // // Computation graphs are made up of a DAG of nodes, which represent what is // commonly called a "layer" or "pipeline stage" in machine learning frameworks. // // For example, it can be a node of type "Conv" that takes in an image, a filter // tensor and a bias tensor, and produces the convolved output. message NodeProto { repeated string input = 1; // namespace Value repeated string output = 2; // namespace Value // An optional identifier for this node in a graph. // This field MAY be absent in ths version of the IR. string name = 3; // namespace Node // The symbolic identifier of the Operator to execute. string op_type = 4; // namespace Operator // The domain of the OperatorSet that specifies the operator named by op_type. string domain = 7; // namespace Domain // Additional named attributes. repeated AttributeProto attribute = 5; // A human-readable documentation for this node. Markdown is allowed. string doc_string = 6; } // Training information // TrainingInfoProto stores information for training a model. // In particular, this defines two functionalities: an initialization-step // and a training-algorithm-step. Initialization resets the model // back to its original state as if no training has been performed. // Training algorithm improves the model based on input data. // // The semantics of the initialization-step is that the initializers // in ModelProto.graph and in TrainingInfoProto.algorithm are first // initialized as specified by the initializers in the graph, and then // updated by the "initialization_binding" in every instance in // ModelProto.training_info. // // The field "algorithm" defines a computation graph which represents a // training algorithm's step. After the execution of a // TrainingInfoProto.algorithm, the initializers specified by "update_binding" // may be immediately updated. If the targeted training algorithm contains // consecutive update steps (such as block coordinate descent methods), // the user needs to create a TrainingInfoProto for each step. message TrainingInfoProto { // This field describes a graph to compute the initial tensors // upon starting the training process. Initialization graph has no input // and can have multiple outputs. Usually, trainable tensors in neural // networks are randomly initialized. To achieve that, for each tensor, // the user can put a random number operator such as RandomNormal or // RandomUniform in TrainingInfoProto.initialization.node and assign its // random output to the specific tensor using "initialization_binding". // This graph can also set the initializers in "algorithm" in the same // TrainingInfoProto; a use case is resetting the number of training // iteration to zero. // // By default, this field is an empty graph and its evaluation does not // produce any output. Thus, no initializer would be changed by default. GraphProto initialization = 1; // This field represents a training algorithm step. Given required inputs, // it computes outputs to update initializers in its own or inference graph's // initializer lists. In general, this field contains loss node, gradient node, // optimizer node, increment of iteration count. // // An execution of the training algorithm step is performed by executing the // graph obtained by combining the inference graph (namely "ModelProto.graph") // and the "algorithm" graph. That is, the actual // input/initializer/output/node/value_info/sparse_initializer list of // the training graph is the concatenation of // "ModelProto.graph.input/initializer/output/node/value_info/sparse_initializer" // and "algorithm.input/initializer/output/node/value_info/sparse_initializer" // in that order. This combined graph must satisfy the normal ONNX conditions. // Now, let's provide a visualization of graph combination for clarity. // Let the inference graph (i.e., "ModelProto.graph") be // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d // and the "algorithm" graph be // tensor_d -> Add -> tensor_e // The combination process results // tensor_a, tensor_b -> MatMul -> tensor_c -> Sigmoid -> tensor_d -> Add -> tensor_e // // Notice that an input of a node in the "algorithm" graph may reference the // output of a node in the inference graph (but not the other way round). Also, inference // node cannot reference inputs of "algorithm". With these restrictions, inference graph // can always be run independently without training information. // // By default, this field is an empty graph and its evaluation does not // produce any output. Evaluating the default training step never // update any initializers. GraphProto algorithm = 2; // This field specifies the bindings from the outputs of "initialization" to // some initializers in "ModelProto.graph.initializer" and // the "algorithm.initializer" in the same TrainingInfoProto. // See "update_binding" below for details. // // By default, this field is empty and no initializer would be changed // by the execution of "initialization". repeated StringStringEntryProto initialization_binding = 3; // Gradient-based training is usually an iterative procedure. In one gradient // descent iteration, we apply // // x = x - r * g // // where "x" is the optimized tensor, "r" stands for learning rate, and "g" is // gradient of "x" with respect to a chosen loss. To avoid adding assignments // into the training graph, we split the update equation into // // y = x - r * g // x = y // // The user needs to save "y = x - r * g" into TrainingInfoProto.algorithm. To // tell that "y" should be assigned to "x", the field "update_binding" may // contain a key-value pair of strings, "x" (key of StringStringEntryProto) // and "y" (value of StringStringEntryProto). // For a neural network with multiple trainable (mutable) tensors, there can // be multiple key-value pairs in "update_binding". // // The initializers appears as keys in "update_binding" are considered // mutable variables. This implies some behaviors // as described below. // // 1. We have only unique keys in all "update_binding"s so that two // variables may not have the same name. This ensures that one // variable is assigned up to once. // 2. The keys must appear in names of "ModelProto.graph.initializer" or // "TrainingInfoProto.algorithm.initializer". // 3. The values must be output names of "algorithm" or "ModelProto.graph.output". // 4. Mutable variables are initialized to the value specified by the // corresponding initializer, and then potentially updated by // "initializer_binding"s and "update_binding"s in "TrainingInfoProto"s. // // This field usually contains names of trainable tensors // (in ModelProto.graph), optimizer states such as momentums in advanced // stochastic gradient methods (in TrainingInfoProto.graph), // and number of training iterations (in TrainingInfoProto.graph). // // By default, this field is empty and no initializer would be changed // by the execution of "algorithm". repeated StringStringEntryProto update_binding = 4; } // Models // // ModelProto is a top-level file/container format for bundling a ML model and // associating its computation graph with metadata. // // The semantics of the model are described by the associated GraphProto's. message ModelProto { // The version of the IR this model targets. See Version enum above. // This field MUST be present. int64 ir_version = 1; // The OperatorSets this model relies on. // All ModelProtos MUST have at least one entry that // specifies which version of the ONNX OperatorSet is // being imported. // // All nodes in the ModelProto's graph will bind against the operator // with the same-domain/same-op_type operator with the HIGHEST version // in the referenced operator sets. repeated OperatorSetIdProto opset_import = 8; // The name of the framework or tool used to generate this model. // This field SHOULD be present to indicate which implementation/tool/framework // emitted the model. string producer_name = 2; // The version of the framework or tool used to generate this model. // This field SHOULD be present to indicate which implementation/tool/framework // emitted the model. string producer_version = 3; // Domain name of the model. // We use reverse domain names as name space indicators. For example: // `com.facebook.fair` or `com.microsoft.cognitiveservices` // // Together with `model_version` and GraphProto.name, this forms the unique identity of // the graph. string domain = 4; // The version of the graph encoded. See Version enum below. int64 model_version = 5; // A human-readable documentation for this model. Markdown is allowed. string doc_string = 6; // The parameterized graph that is evaluated to execute the model. GraphProto graph = 7; // Named metadata values; keys should be distinct. repeated StringStringEntryProto metadata_props = 14; // Training-specific information. Sequentially executing all stored // `TrainingInfoProto.algorithm`s and assigning their outputs following // the corresponding `TrainingInfoProto.update_binding`s is one training // iteration. Similarly, to initialize the model // (as if training hasn't happened), the user should sequentially execute // all stored `TrainingInfoProto.initialization`s and assigns their outputs // using `TrainingInfoProto.initialization_binding`s. // // If this field is empty, the training behavior of the model is undefined. repeated TrainingInfoProto training_info = 20; // A list of function protos local to the model. // // Name of the function "FunctionProto.name" should be unique within the domain "FunctionProto.domain". // In case of any conflicts the behavior (whether the model local functions are given higher priority, // or standard operator sets are given higher priotity or this is treated as error) is defined by // the runtimes. // // The operator sets imported by FunctionProto should be compatible with the ones // imported by ModelProto and other model local FunctionProtos. // Example, if same operator set say 'A' is imported by a FunctionProto and ModelProto // or by 2 FunctionProtos then versions for the operator set may be different but, // the operator schema returned for op_type, domain, version combination // for both the versions should be same for every node in the function body. // // One FunctionProto can reference other FunctionProto in the model, however, recursive reference // is not allowed. repeated FunctionProto functions = 25; }; // StringStringEntryProto follows the pattern for cross-proto-version maps. // See https://developers.google.com/protocol-buffers/docs/proto3#maps message StringStringEntryProto { string key = 1; string value = 2; }; message TensorAnnotation { string tensor_name = 1; // <key, value> pairs to annotate tensor specified by <tensor_name> above. // The keys used in the mapping below must be pre-defined in ONNX spec. // For example, for 8-bit linear quantization case, 'SCALE_TENSOR', 'ZERO_POINT_TENSOR' will be pre-defined as // quantization parameter keys. repeated StringStringEntryProto quant_parameter_tensor_names = 2; } // Graphs // // A graph defines the computational logic of a model and is comprised of a parameterized // list of nodes that form a directed acyclic graph based on their inputs and outputs. // This is the equivalent of the "network" or "graph" in many deep learning // frameworks. message GraphProto { // The nodes in the graph, sorted topologically. repeated NodeProto node = 1; // The name of the graph. string name = 2; // namespace Graph // A list of named tensor values, used to specify constant inputs of the graph. // Each initializer (both TensorProto as well SparseTensorProto) MUST have a name. // The name MUST be unique across both initializer and sparse_initializer, // but the name MAY also appear in the input list. repeated TensorProto initializer = 5; // Initializers (see above) stored in sparse format. repeated SparseTensorProto sparse_initializer = 15; // A human-readable documentation for this graph. Markdown is allowed. string doc_string = 10; // The inputs and outputs of the graph. repeated ValueInfoProto input = 11; repeated ValueInfoProto output = 12; // Information for the values in the graph. The ValueInfoProto.name's // must be distinct. It is optional for a value to appear in value_info list. repeated ValueInfoProto value_info = 13; // This field carries information to indicate the mapping among a tensor and its // quantization parameter tensors. For example: // For tensor 'a', it may have {'SCALE_TENSOR', 'a_scale'} and {'ZERO_POINT_TENSOR', 'a_zero_point'} annotated, // which means, tensor 'a_scale' and tensor 'a_zero_point' are scale and zero point of tensor 'a' in the model. repeated TensorAnnotation quantization_annotation = 14; reserved 3, 4, 6 to 9; reserved "ir_version", "producer_version", "producer_tag", "domain"; } // Tensors // // A serialized tensor value. message TensorProto { enum DataType { UNDEFINED = 0; // Basic types. FLOAT = 1; // float UINT8 = 2; // uint8_t INT8 = 3; // int8_t UINT16 = 4; // uint16_t INT16 = 5; // int16_t INT32 = 6; // int32_t INT64 = 7; // int64_t STRING = 8; // string BOOL = 9; // bool // IEEE754 half-precision floating-point format (16 bits wide). // This format has 1 sign bit, 5 exponent bits, and 10 mantissa bits. FLOAT16 = 10; DOUBLE = 11; UINT32 = 12; UINT64 = 13; COMPLEX64 = 14; // complex with float32 real and imaginary components COMPLEX128 = 15; // complex with float64 real and imaginary components // Non-IEEE floating-point format based on IEEE754 single-precision // floating-point number truncated to 16 bits. // This format has 1 sign bit, 8 exponent bits, and 7 mantissa bits. BFLOAT16 = 16; // Non-IEEE floating-point format based on papers // FP8 Formats for Deep Learning, https://arxiv.org/abs/2209.05433, // 8-bit Numerical Formats For Deep Neural Networks, https://arxiv.org/pdf/2206.02915.pdf. // Operators supported FP8 are Cast, CastLike, QuantizeLinear, DequantizeLinear. // The computation usually happens inside a block quantize / dequantize // fused by the runtime. FLOAT8E4M3FN = 17; // float 8, mostly used for coefficients, supports nan, not inf FLOAT8E4M3FNUZ = 18; // float 8, mostly used for coefficients, supports nan, not inf, no negative zero FLOAT8E5M2 = 19; // follows IEEE 754, supports nan, inf, mostly used for gradients FLOAT8E5M2FNUZ = 20; // follows IEEE 754, supports nan, inf, mostly used for gradients, no negative zero // Future extensions go here. } // The shape of the tensor. repeated int64 dims = 1; // The data type of the tensor. // This field MUST have a valid TensorProto.DataType value int32 data_type = 2; // For very large tensors, we may want to store them in chunks, in which // case the following fields will specify the segment that is stored in // the current TensorProto. message Segment { int64 begin = 1; int64 end = 2; } Segment segment = 3; // Tensor content must be organized in row-major order. // // Depending on the data_type field, exactly one of the fields below with // name ending in _data is used to store the elements of the tensor. // For float and complex64 values // Complex64 tensors are encoded as a single array of floats, // with the real components appearing in odd numbered positions, // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be FLOAT or COMPLEX64. repeated float float_data = 4 [packed = true]; // For int32, uint8, int8, uint16, int16, bool, float8, and float16 values // float16 and float8 values must be bit-wise converted to an uint16_t prior // to writing to the buffer. // When this field is present, the data_type field MUST be // INT32, INT16, INT8, UINT16, UINT8, BOOL, FLOAT16, BFLOAT16, FLOAT8E4M3FN, FLOAT8E4M3FNUZ, FLOAT8E5M2, FLOAT8E5M2FNUZ repeated int32 int32_data = 5 [packed = true]; // For strings. // Each element of string_data is a UTF-8 encoded Unicode // string. No trailing null, no leading BOM. The protobuf "string" // scalar type is not used to match ML community conventions. // When this field is present, the data_type field MUST be STRING repeated bytes string_data = 6; // For int64. // When this field is present, the data_type field MUST be INT64 repeated int64 int64_data = 7 [packed = true]; // Optionally, a name for the tensor. string name = 8; // namespace Value // A human-readable documentation for this tensor. Markdown is allowed. string doc_string = 12; // Serializations can either use one of the fields above, or use this // raw bytes field. The only exception is the string case, where one is // required to store the content in the repeated bytes string_data field. // // When this raw_data field is used to store tensor value, elements MUST // be stored in as fixed-width, little-endian order. // Floating-point data types MUST be stored in IEEE 754 format. // Complex64 elements must be written as two consecutive FLOAT values, real component first. // Complex128 elements must be written as two consecutive DOUBLE values, real component first. // Boolean type MUST be written one byte per tensor element (00000001 for true, 00000000 for false). // // Note: the advantage of specific field rather than the raw_data field is // that in some cases (e.g. int data), protobuf does a better packing via // variable length storage, and may lead to smaller binary footprint. // When this field is present, the data_type field MUST NOT be STRING or UNDEFINED bytes raw_data = 9; // Data can be stored inside the protobuf file using type-specific fields or raw_data. // Alternatively, raw bytes data can be stored in an external file, using the external_data field. // external_data stores key-value pairs describing data location. Recognized keys are: // - "location" (required) - POSIX filesystem path relative to the directory where the ONNX // protobuf model was stored // - "offset" (optional) - position of byte at which stored data begins. Integer stored as string. // Offset values SHOULD be multiples 4096 (page size) to enable mmap support. // - "length" (optional) - number of bytes containing data. Integer stored as string. // - "checksum" (optional) - SHA1 digest of file specified in under 'location' key. repeated StringStringEntryProto external_data = 13; // Location of the data for this tensor. MUST be one of: // - DEFAULT - data stored inside the protobuf message. Data is stored in raw_data (if set) otherwise in type-specified field. // - EXTERNAL - data stored in an external location as described by external_data field. enum DataLocation { DEFAULT = 0; EXTERNAL = 1; } // If value not set, data is stored in raw_data (if set) otherwise in type-specified field. DataLocation data_location = 14; // For double // Complex128 tensors are encoded as a single array of doubles, // with the real components appearing in odd numbered positions, // and the corresponding imaginary component appearing in the // subsequent even numbered position. (e.g., [1.0 + 2.0i, 3.0 + 4.0i] // is encoded as [1.0, 2.0 ,3.0 ,4.0] // When this field is present, the data_type field MUST be DOUBLE or COMPLEX128 repeated double double_data = 10 [packed = true]; // For uint64 and uint32 values // When this field is present, the data_type field MUST be // UINT32 or UINT64 repeated uint64 uint64_data = 11 [packed = true]; } // A serialized sparse-tensor value message SparseTensorProto { // The sequence of non-default values are encoded as a tensor of shape [NNZ]. // The default-value is zero for numeric tensors, and empty-string for string tensors. // values must have a non-empty name present which serves as a name for SparseTensorProto // when used in sparse_initializer list. TensorProto values = 1; // The indices of the non-default values, which may be stored in one of two formats. // (a) Indices can be a tensor of shape [NNZ, rank] with the [i,j]-th value // corresponding to the j-th index of the i-th value (in the values tensor). // (b) Indices can be a tensor of shape [NNZ], in which case the i-th value // must be the linearized-index of the i-th value (in the values tensor). // The linearized-index can be converted into an index tuple (k_1,...,k_rank) // using the shape provided below. // The indices must appear in ascending order without duplication. // In the first format, the ordering is lexicographic-ordering: // e.g., index-value [1,4] must appear before [2,1] TensorProto indices = 2; // The shape of the underlying dense-tensor: [dim_1, dim_2, ... dim_rank] repeated int64 dims = 3; } // Defines a tensor shape. A dimension can be either an integer value // or a symbolic variable. A symbolic variable represents an unknown // dimension. message TensorShapeProto { message Dimension { oneof value { int64 dim_value = 1; string dim_param = 2; // namespace Shape }; // Standard denotation can optionally be used to denote tensor // dimensions with standard semantic descriptions to ensure // that operations are applied to the correct axis of a tensor. // Refer to https://github.com/onnx/onnx/blob/main/docs/DimensionDenotation.md#denotation-definition // for pre-defined dimension denotations. string denotation = 3; }; repeated Dimension dim = 1; } // Types // // The standard ONNX data types. message TypeProto { message Tensor { // This field MUST NOT have the value of UNDEFINED // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. int32 elem_type = 1; TensorShapeProto shape = 2; } // repeated T message Sequence { // The type and optional shape of each element of the sequence. // This field MUST be present for this version of the IR. TypeProto elem_type = 1; }; // map<K,V> message Map { // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. // This field MUST refer to an integral type ([U]INT{8|16|32|64}) or STRING int32 key_type = 1; // This field MUST be present for this version of the IR. TypeProto value_type = 2; }; // wrapper for Tensor, Sequence, or Map message Optional { // The type and optional shape of the element wrapped. // This field MUST be present for this version of the IR. // Possible values correspond to OptionalProto.DataType enum TypeProto elem_type = 1; }; message SparseTensor { // This field MUST NOT have the value of UNDEFINED // This field MUST have a valid TensorProto.DataType value // This field MUST be present for this version of the IR. int32 elem_type = 1; TensorShapeProto shape = 2; } oneof value { // The type of a tensor. Tensor tensor_type = 1; // NOTE: DNN-only implementations of ONNX MAY elect to not support non-tensor values // as input and output to graphs and nodes. These types are needed to naturally // support classical ML operators. DNN operators SHOULD restrict their input // and output types to tensors. // The type of a sequence. Sequence sequence_type = 4; // The type of a map. Map map_type = 5; // The type of an optional. Optional optional_type = 9; // Type of the sparse tensor SparseTensor sparse_tensor_type = 8; } // An optional denotation can be used to denote the whole // type with a standard semantic description as to what is // stored inside. Refer to https://github.com/onnx/onnx/blob/main/docs/TypeDenotation.md#type-denotation-definition // for pre-defined type denotations. string denotation = 6; } // Operator Sets // // OperatorSets are uniquely identified by a (domain, opset_version) pair. message OperatorSetIdProto { // The domain of the operator set being identified. // The empty string ("") or absence of this field implies the operator // set that is defined as part of the ONNX specification. // This field MUST be present in this version of the IR when referring to any other operator set. string domain = 1; // The version of the operator set being identified. // This field MUST be present in this version of the IR. int64 version = 2; } // Operator/function status. enum OperatorStatus { EXPERIMENTAL = 0; STABLE = 1; } message FunctionProto { // The name of the function, similar usage of op_type in OperatorProto. // Combined with FunctionProto.domain, this forms the unique identity of // the FunctionProto. string name = 1; // Deprecated since IR Version 8 // optional int64 since_version = 2; reserved 2; reserved "since_version"; // Deprecated since IR Version 8 // optional OperatorStatus status = 3; reserved 3; reserved "status"; // The inputs and outputs of the function. repeated string input = 4; repeated string output = 5; // The attribute parameters of the function. // It is for function parameters without default values. repeated string attribute = 6; // The attribute protos of the function. // It is for function attributes with default values. // A function attribute shall be represented either as // a string attribute or an AttributeProto, not both. repeated AttributeProto attribute_proto = 11; // The nodes in the function. repeated NodeProto node = 7; // A human-readable documentation for this function. Markdown is allowed. string doc_string = 8; // The OperatorSets this function body (graph) relies on. // // All nodes in the function body (graph) will bind against the operator // with the same-domain/same-op_type operator with the HIGHEST version // in the referenced operator sets. This means at most one version can be relied // for one domain. // // The operator sets imported by FunctionProto should be compatible with the ones // imported by ModelProto. Example, if same operator set say 'A' is imported by FunctionProto // and ModelProto then versions for the operator set may be different but, // the operator schema returned for op_type, domain, version combination // for both the versions should be same. repeated OperatorSetIdProto opset_import = 9; // The domain which this function belongs to. Combined with FunctionProto.name, this forms the unique identity of // the FunctionProto. string domain = 10; } // For using protobuf-lite option optimize_for = LITE_RUNTIME;
0
hf_public_repos/candle
hf_public_repos/candle/candle-core/Cargo.toml
[package] name = "candle-core" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } byteorder = { workspace = true } candle-kernels = { path = "../candle-kernels", version = "0.3.1", optional = true } candle-metal-kernels = { path = "../candle-metal-kernels", version = "0.3.1", optional = true } metal = { workspace = true, optional = true} cudarc = { workspace = true, optional = true } gemm = { workspace = true } half = { workspace = true } intel-mkl-src = { workspace = true, optional = true } libc = { workspace = true, optional = true } memmap2 = { workspace = true } num-traits = { workspace = true } num_cpus = { workspace = true } rand = { workspace = true } rand_distr = { workspace = true } rayon = { workspace = true } safetensors = { workspace = true } thiserror = { workspace = true } yoke = { workspace = true } zip = { workspace = true } [dev-dependencies] anyhow = { workspace = true } clap = { workspace = true } [features] default = [] cuda = ["cudarc", "dep:candle-kernels"] cudnn = ["cuda", "cudarc/cudnn"] mkl = ["dep:libc", "dep:intel-mkl-src"] accelerate = ["dep:libc", "dep:accelerate-src"] metal = ["dep:metal", "dep:candle-metal-kernels"]
0
hf_public_repos/candle
hf_public_repos/candle/candle-core/LICENSE
Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier identification within third-party archives. Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
0
hf_public_repos/candle
hf_public_repos/candle/candle-core/README.md
# candle Minimalist ML framework for Rust
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/custom_op_tests.rs
use candle_core::backend::BackendStorage; use candle_core::cpu_backend; use candle_core::test_utils::to_vec1_round; use candle_core::{CpuStorage, CustomOp1, DType, Device, Error, Layout, Result, Shape, Tensor}; fn fwd<T: num_traits::Float>(v: T, alpha: f64) -> T { if v.is_sign_positive() { v } else { let alpha = T::from(alpha).unwrap_or(T::nan()); (v.exp() - T::one()) * alpha } } struct Elu { alpha: f64, } impl CustomOp1 for Elu { fn name(&self) -> &'static str { "elu" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { let storage = candle_core::map_dtype!( "elu", s, |s| cpu_backend::unary_map(s, l, |v| fwd(v, self.alpha)), (BF16, F16, F32, F64) ); Ok((storage, l.shape().clone())) } } #[test] fn custom_op1_no_backward() -> Result<()> { let cpu = &Device::Cpu; let t = Tensor::arange(0u32, 12u32, cpu)?.to_dtype(DType::F32)?; let t = (t - 5.)?; let elu_t = t.apply_op1_no_bwd(&Elu { alpha: 1. })?; assert_eq!( to_vec1_round(&elu_t, 4)?, &[-0.9933, -0.9817, -0.9502, -0.8647, -0.6321, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0] ); Ok(()) } // Define a similar struct as Elu but with backward support. fn bwd<T: num_traits::Float>(v: T, alpha: f64) -> T { if v.is_sign_positive() { T::one() } else { let alpha = T::from(alpha).unwrap_or(T::nan()); v.exp() * alpha } } struct EluBackward { alpha: f64, } impl CustomOp1 for EluBackward { fn name(&self) -> &'static str { "elu-bwd" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { let storage = candle_core::map_dtype!( "elu-bwd", s, |s| cpu_backend::unary_map(s, l, |v| bwd(v, self.alpha)), (BF16, F16, F32, F64) ); Ok((storage, l.shape().clone())) } } struct EluWithBackward(Elu); impl EluWithBackward { fn new(alpha: f64) -> Self { Self(Elu { alpha }) } } impl CustomOp1 for EluWithBackward { fn name(&self) -> &'static str { "elu" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { self.0.cpu_fwd(s, l) } fn bwd(&self, arg: &Tensor, _res: &Tensor, grad_res: &Tensor) -> Result<Option<Tensor>> { let alpha = self.0.alpha; let bwd = arg.apply_op1(EluBackward { alpha })?; Ok(Some(grad_res.mul(&bwd)?)) } } #[test] fn custom_op1_with_backward() -> Result<()> { let cpu = &Device::Cpu; let t = candle_core::Var::new(&[-2f32, 0f32, 2f32], cpu)?; let elu_t = t.apply_op1(EluWithBackward::new(2.))?; assert_eq!(to_vec1_round(&elu_t, 4)?, &[-1.7293, 0.0, 2.0]); let grads = elu_t.backward()?; let grad_x = grads.get(&t).unwrap(); assert_eq!(to_vec1_round(grad_x, 4)?, [0.2707, 1.0, 1.0]); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/grad_tests.rs
use anyhow::{Context, Result}; use candle_core::{test_device, test_utils, Device, Shape, Tensor, Var}; fn simple_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4.], device)?; let x = x.as_tensor(); let y = (((x * x)? + x * 5f64)? + 4f64)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(x.to_vec1::<f32>()?, [3., 1., 4.]); // y = x^2 + 5.x + 4 assert_eq!(y.to_vec1::<f32>()?, [28., 10., 40.]); // dy/dx = 2.x + 5 assert_eq!(grad_x.to_vec1::<f32>()?, [11., 7., 13.]); Ok(()) } fn sum_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4.], device)?; let x = x.as_tensor(); let y = (x.sqr()?.sum_keepdim(0)? * 2.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [52.]); // y = 2.x^2 so dy/dx = 4.x assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]); // Same test as before but squeezing on the last dimension. let y = (x.sqr()?.sum_keepdim(0)? * 2.)?.squeeze(0)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_scalar::<f32>()?, 52.); // y = 2.x^2 so dy/dx = 4.x assert_eq!(grad_x.to_vec1::<f32>()?, &[12., 4., 16.]); Ok(()) } fn matmul_grad(device: &Device) -> Result<()> { let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let x = Var::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let y = Var::from_slice(&data, (2, 3, 2), device)?; let c = x.matmul(&y)?; let grads = c.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; let grad_y = grads.get(&y).context("no grad for y")?; assert_eq!(grad_x.shape(), &Shape::from((2, 2, 3))); assert_eq!(grad_y.shape(), &Shape::from((2, 3, 2))); assert_eq!( &*grad_x.to_vec3::<f32>()?, &[ [[1., 5., 9.], [1., 5., 9.]], [[13., 17., 21.], [13., 17., 21.]] ] ); assert_eq!( &*grad_y.to_vec3::<f32>()?, &[ [[3., 3.], [5., 5.], [7., 7.]], [[15., 15.], [17., 17.], [19., 19.]] ] ); Ok(()) } // The simplest gradient descent, using scalar variable. fn grad_descent(device: &Device) -> Result<()> { let x = Var::new(0f32, device)?; let learning_rate = 0.1; for _step in 0..100 { let xt = x.as_tensor(); let c = ((xt - 4.2)? * (xt - 4.2)?)?; let grads = c.backward()?; let x_grad = grads.get(&x).context("no grad for x")?; x.set(&(xt - x_grad * learning_rate)?)? } assert_eq!(x.to_scalar::<f32>()?, 4.199999); Ok(()) } fn unary_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., 4., 0.15], device)?; let x = x.as_tensor(); let y = (x.log()? + 1.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.0986, 1.0, 2.3863, -0.8971] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.3333, 1.0, 0.25, 6.6667] ); let y = x.exp()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( y.to_vec1::<f32>()?, [20.085537, 2.7182817, 54.59815, 1.1618342] ); assert_eq!( grad_x.to_vec1::<f32>()?, [20.085537, 2.7182817, 54.59815, 1.1618342] ); let y = x.exp()?.sqr()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( y.to_vec1::<f32>()?, [403.4288, 7.3890557, 2980.9578, 1.3498588] ); // exp(x)^2 = exp(2*x) assert_eq!( grad_x.to_vec1::<f32>()?, [806.8576, 14.778111, 5961.9155, 2.6997175] ); let y = x.sin()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [0.1411, 0.8415, -0.7568, 0.1494], ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [-0.99, 0.5403, -0.6536, 0.9888], ); let y = x.cos()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-0.99, 0.5403, -0.6536, 0.9888], ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [-0.1411, -0.8415, 0.7568, -0.1494], ); let y = x.sqr()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [9.0, 1.0, 16.0, 0.0225]); assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, 8.0, 0.3]); let y = x.sqr()?.sqrt()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [3.0, 1.0, 4.0, 0.15]); assert_eq!(test_utils::to_vec1_round(grad_x, 4)?, [1.0, 1.0, 1.0, 1.0]); let y = x.neg()?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [-3.0, -1.0, -4.0, -0.15]); assert_eq!(grad_x.to_vec1::<f32>()?, [-1.0, -1.0, -1.0, -1.0]); let y = x.affine(0.2, 1.)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [1.6, 1.2, 1.8, 1.03]); assert_eq!(grad_x.to_vec1::<f32>()?, [0.2, 0.2, 0.2, 0.2]); let y = Tensor::new(1f32, device)?.broadcast_div(x)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [0.3333, 1.0, 0.25, 6.6667] ); assert_eq!( grad_x.to_vec1::<f32>()?, [-0.11111111, -1.0, -0.0625, -44.444443], ); let y = x.broadcast_div(&Tensor::new(0.5f32, device)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [6., 2., 8., 0.3]); assert_eq!(grad_x.to_vec1::<f32>()?, [2., 2., 2., 2.]); let x = Var::new(&[3f32, 1., 4., 0.15], device)?; let y = x.powf(2.5)?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 2)?, [15.59, 1.0, 32.0, 0.01]); assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [12.99, 2.5, 20.0, 0.15] ); let y = x.tanh()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 2)?, [1.0, 0.76, 1.0, 0.15]); assert_eq!( test_utils::to_vec1_round(grad_x, 2)?, [0.01, 0.42, 0.0, 0.98], ); // testing compared to pytorch nn.GELU(approximate = 'tanh') let y = x.gelu()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.9964, 0.8412, 3.9999, 0.0839] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0116, 1.0830, 1.0003, 0.6188], ); // Testing compared to pytorch torch.erf // // import torch // x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True) // y = x.erf() // print(y) // loss = y.sum() // loss.backward() // print(x.grad) let y = x.erf()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!(test_utils::to_vec1_round(&y, 4)?, [1.0, 0.8427, 1.0, 0.168]); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.0001, 0.4151, 0.0, 1.1033], ); // Testing compared to pytorch nn.GELU(approximate = 'none') // // import torch // import torch.nn.functional as F // x = torch.tensor([3.0, 1.0, 4.0, 0.15], requires_grad=True) // y = F.gelu(x, approximate='none') // print(y) // loss = y.sum() // loss.backward() // print(x.grad) let y = x.gelu_erf()?; let grads = y.backward()?; let grad_x = grads.get(&x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [2.9960, 0.8413, 3.9999, 0.0839] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [1.0119, 1.0833, 1.0005, 0.6188], ); // Testing compared to pytorch elu // // import torch // import torch.nn.functional as F // x = torch.tensor([-1.0, 0.0, -2.0, 3.0], requires_grad=True) // y = F.elu(x, alpha=2.0) // print(y) // loss = y.min // loss = y.sum() // loss.backward() // print(x.grad) let elu_x = Var::new(&[-1.0f32, 0., -2., 3.], device)?; let y = elu_x.elu(2.)?; let grads = y.backward()?; let grad_x = grads.get(&elu_x).context("no grad for x")?; assert_eq!( test_utils::to_vec1_round(&y, 4)?, [-1.2642, 0.0000, -1.7293, 3.0000] ); assert_eq!( test_utils::to_vec1_round(grad_x, 4)?, [0.7358, 2.0000, 0.2707, 1.0000] ); Ok(()) } fn binary_grad(device: &Device) -> Result<()> { let x = Var::new(&[3f32, 1., -4., -1.], device)?; let x = x.as_tensor(); // leaky relu let y = x.maximum(&(x * 0.1)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(x.to_vec1::<f32>()?, [3., 1., -4., -1.]); assert_eq!(y.to_vec1::<f32>()?, [3., 1., -0.4, -0.1]); assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 0.1, 0.1]); let y = x.minimum(&(x * 0.1)?)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [0.3, 0.1, -4., -1.]); assert_eq!(grad_x.to_vec1::<f32>()?, [0.1, 0.1, 1., 1.]); // This one is easy to mess up, we want the gradient to be one as it is the identity function. let y = x.minimum(x)?; let grads = y.backward()?; let grad_x = grads.get(x).context("no grad for x")?; assert_eq!(y.to_vec1::<f32>()?, [3., 1., -4., -1.]); assert_eq!(grad_x.to_vec1::<f32>()?, [1., 1., 1., 1.]); let x_var = Var::new(&[3f32, 1., -4., -1., 5., 9.], device)?; let x = x_var.as_tensor(); let y_var = Var::new(&[2f32, 7., 1.], device)?; let y = y_var.as_tensor(); let ss = x .reshape((2, 3))? .slice_scatter0(&y.reshape((1, 3))?, 1)? .sqr()?; let grads = ss.backward()?; let grad_x = grads.get(x).context("no grad for x")?; let grad_y = grads.get(y).context("no grad for y")?; assert_eq!(ss.to_vec2::<f32>()?, [[9., 1., 16.], [4., 49., 1.]]); assert_eq!(grad_x.to_vec1::<f32>()?, [6.0, 2.0, -8.0, 0.0, 0.0, 0.0]); assert_eq!(grad_y.to_vec1::<f32>()?, [4.0, 14.0, 2.0]); Ok(()) } test_device!( simple_grad, simple_grad_cpu, simple_grad_gpu, simple_grad_metal ); test_device!(sum_grad, sum_grad_cpu, sum_grad_gpu, sum_grad_metal); test_device!( matmul_grad, matmul_grad_cpu, matmul_grad_gpu, matmul_grad_metal ); test_device!( grad_descent, grad_descent_cpu, grad_descent_gpu, grad_descent_metal ); test_device!(unary_grad, unary_grad_cpu, unary_grad_gpu, unary_grad_metal); test_device!( binary_grad, binary_grad_cpu, binary_grad_gpu, binary_grad_metal );
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/pool_tests.rs
use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor}; // https://github.com/huggingface/candle/issues/364 fn avg_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[0.5f32, 1.], [1., 1.]]); let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 2, 8), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[5. / 4., 6. / 4., 6. / 4., 1.]]); Ok(()) } fn max_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2f32, 3.], [5., 1.]]); let t = t.reshape((1, 1, 2, 8))?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2.0, 3.0, 5.0, 1.0]]); Ok(()) } /* This test corresponds to the following PyTorch script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 4)) print(t.flatten()) res = torch.nn.functional.avg_pool2d(t, 2) print(res) */ fn avg_pool2d_pytorch(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, ], dev, )? .reshape((1, 2, 4, 4))?; let pool = t.avg_pool2d(2)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [ [[-1.1926, -0.0395], [0.2688, 0.1871]], [[0.1835, -0.1606], [0.6249, 0.3217]] ] ); let pool = t.avg_pool2d(3)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [[[0.085]], [[0.0078]]] ); let t = t.reshape((1, 1, 4, 8))?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!( test_utils::to_vec2_round(&pool, 4)?, [ [0.7745, 0.0276, -1.6983, 0.12], [0.3542, 0.1625, 0.4542, -0.0014] ] ); Ok(()) } fn upsample_nearest2d(dev: &Device) -> Result<()> { let t = Tensor::arange(0f32, 6f32, dev)?.reshape((1, 1, 2, 3))?; let upsampled = t.upsample_nearest2d(4, 6)?.i(0)?.i(0)?; assert_eq!( t.i(0)?.i(0)?.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]] ); assert_eq!( upsampled.to_vec2::<f32>()?, [ [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0] ] ); Ok(()) } test_device!(avg_pool2d, avg_pool2d_cpu, avg_pool2d_gpu, avg_pool2d_metal); test_device!( avg_pool2d_pytorch, avg_pool2d_pytorch_cpu, avg_pool2d_pytorch_gpu, avg_pool2d_pytorch_metal ); test_device!(max_pool2d, max_pool2d_cpu, max_pool2d_gpu, max_pool2d_metal); test_device!( upsample_nearest2d, upsample_nearest2d_cpu, upsample_nearest2d_gpu, upsample_nearest2d_metal );
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/layout_tests.rs
use candle::{test_device, Device, IndexOp, Result, Tensor}; use candle_core as candle; fn contiguous(device: &Device) -> Result<()> { let tensor = Tensor::arange(0u32, 24u32, device)?.reshape((2, 3, 4))?; assert_eq!( tensor.to_vec3::<u32>()?, &[ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]] ] ); assert_eq!( tensor.t()?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]], [[12, 16, 20], [13, 17, 21], [14, 18, 22], [15, 19, 23]] ] ); assert_eq!( tensor.transpose(0, 1)?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 1, 2, 3], [12, 13, 14, 15]], [[4, 5, 6, 7], [16, 17, 18, 19]], [[8, 9, 10, 11], [20, 21, 22, 23]] ] ); assert_eq!( tensor.transpose(0, 1)?.flatten_all()?.to_vec1::<u32>()?, &[0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23] ); assert_eq!( tensor .i(1..)? .transpose(0, 1)? .contiguous()? .to_vec3::<u32>()?, &[[[12, 13, 14, 15]], [[16, 17, 18, 19]], [[20, 21, 22, 23]]] ); assert_eq!( tensor.transpose(0, 2)?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 12], [4, 16], [8, 20]], [[1, 13], [5, 17], [9, 21]], [[2, 14], [6, 18], [10, 22]], [[3, 15], [7, 19], [11, 23]] ] ); Ok(()) } test_device!(contiguous, contiguous_cpu, contiguous_gpu, contiguous_metal); #[test] fn strided_blocks() -> Result<()> { use candle::Device::Cpu; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 0); assert_eq!(len, 24); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 26u32, &Cpu)? .i(2..)? .reshape((2, 3, 4))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 2); assert_eq!(len, 24); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; let tensor = tensor.i(1)?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 12); assert_eq!(len, 12); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; let tensor = tensor.i((.., 1))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 0); assert_eq!(len, 8); assert_eq!(tensor.to_vec2::<u32>()?, &[[4, 5, 6, 7], [16, 17, 18, 19]]); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.t()?.strided_blocks() { candle::StridedBlocks::SingleBlock { .. } => { panic!("unexpected block structure") } candle::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { assert_eq!(block_len, 1); assert_eq!( block_start_index.collect::<Vec<_>>(), &[ 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23 ] ) } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.transpose(0, 1)?.strided_blocks() { candle::StridedBlocks::SingleBlock { .. } => { panic!("unexpected block structure") } candle::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { assert_eq!(block_len, 4); assert_eq!( block_start_index.collect::<Vec<_>>(), &[0, 12, 4, 16, 8, 20] ) } }; Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/display_tests.rs
use anyhow::Result; use candle_core::{DType, Device::Cpu, Tensor}; #[test] fn display_scalar() -> Result<()> { let t = Tensor::new(1234u32, &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[1234]\nTensor[[], u32]"); let t = t.to_dtype(DType::F32)?.neg()?; let s = format!("{}", (&t / 10.0)?); assert_eq!(&s, "[-123.4000]\nTensor[[], f32]"); let s = format!("{}", (&t / 1e8)?); assert_eq!(&s, "[-1.2340e-5]\nTensor[[], f32]"); let s = format!("{}", (&t * 1e8)?); assert_eq!(&s, "[-1.2340e11]\nTensor[[], f32]"); let s = format!("{}", (&t * 0.)?); assert_eq!(&s, "[0.]\nTensor[[], f32]"); Ok(()) } #[test] fn display_vector() -> Result<()> { let t = Tensor::new::<&[u32; 0]>(&[], &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[]\nTensor[[0], u32]"); let t = Tensor::new(&[0.1234567, 1.0, -1.2, 4.1, f64::NAN], &Cpu)?; let s = format!("{t}"); assert_eq!( &s, "[ 0.1235, 1.0000, -1.2000, 4.1000, NaN]\nTensor[[5], f64]" ); let t = (Tensor::ones(50, DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.] Tensor[[50], f32]"#; assert_eq!(&s, expected); let t = (Tensor::ones(11000, DType::F32, &Cpu)? * 42.)?; let s = format!("{t}"); assert_eq!( &s, "[42., 42., 42., ..., 42., 42., 42.]\nTensor[[11000], f32]" ); Ok(()) } #[test] fn display_multi_dim() -> Result<()> { let t = (Tensor::ones((200, 100), DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]] Tensor[[200, 100], f32]"#; assert_eq!(&s, expected); let t = t.reshape(&[2, 1, 1, 100, 100])?; let t = format!("\n{t}"); let expected = r#" [[[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]], [[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]]] Tensor[[2, 1, 1, 100, 100], f32]"#; assert_eq!(&t, expected); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/quantized_tests.rs
use candle_core::{ quantized::{self, GgmlDType}, test_utils::to_vec2_round, Device, Module, Result, Tensor, }; use quantized::{k_quants, GgmlType}; use rand::prelude::*; const GGML_TEST_SIZE: usize = 32 * 128; const GGML_MAX_QUANTIZATION_TOTAL_ERROR: f32 = 0.002; const GGML_MAX_QUANTIZATION_TOTAL_ERROR_2BITS: f32 = 0.0075; const GGML_MAX_QUANTIZATION_TOTAL_ERROR_3BITS: f32 = 0.0040; const GGML_MAX_DOT_PRODUCT_ERROR: f32 = 0.02; #[test] fn quantized_matmul() -> Result<()> { let cpu = &Device::Cpu; let (m, k, n) = (3, 64, 4); let lhs = (0..(m * k)).map(|v| v as f32).collect::<Vec<_>>(); let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?; let mut dst = vec![42.; 3 * 4]; let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8]; let rhs = (0..(k * n)).map(|v| v as f32).collect::<Vec<_>>(); let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?; k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t)?; k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?; assert_eq!( dst.iter().map(|x| x.round()).collect::<Vec<_>>(), &[ 85120.0, 214562.0, 345455.0, 474748.0, 213475.0, 604465.0, 1000686.0, 1388317.0, 341876.0, 994283.0, 1655709.0, 2301518.0 ] ); let mm = tensor_lhs.matmul(&tensor_rhs)?; assert_eq!( mm.to_vec2::<f32>()?, &[ [85344.0, 214368.0, 343392.0, 472416.0], [214368.0, 605536.0, 996704.0, 1387872.0], [343392.0, 996704.0, 1650016.0, 2303328.0] ] ); let qtensor = quantized::QTensor::new(rhs_t, (4, 64))?; let matmul = quantized::QMatMul::from_qtensor(qtensor)?; let res = matmul.forward(&tensor_lhs)?; assert_eq!( to_vec2_round(&res, 0)?, &[ [85120.0, 214562.0, 345455.0, 474748.0], [213475.0, 604465.0, 1000686.0, 1388317.0], [341876.0, 994283.0, 1655709.0, 2301518.0] ] ); Ok(()) } #[test] fn quantized_matmul_neg() -> Result<()> { let cpu = &Device::Cpu; let (m, k, n) = (3, 64, 4); let lhs = (0..(m * k)) .map(|v| v as f32 - (m * k) as f32 / 2.0) .collect::<Vec<_>>(); let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?; let mut dst = vec![42.; 3 * 4]; let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8]; let rhs = (0..k * n) .map(|v| v as f32 - (k * n) as f32 / 3.0) .collect::<Vec<_>>(); let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?; k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t)?; k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?; assert_eq!( dst.iter().map(|x| x.round()).collect::<Vec<_>>(), &[ 243524.0, -19596.0, -285051.0, -549815.0, 23777.0, 21651.0, 19398.0, 18367.0, -196472.0, 63012.0, 324585.0, 587902.0 ] ); let mm = tensor_lhs.matmul(&tensor_rhs)?; assert_eq!( to_vec2_round(&mm, 0)?, &[ [244064.0, -20128.0, -284320.0, -548512.0], [23563.0, 21515.0, 19467.0, 17419.0], [-196939.0, 63157.0, 323253.0, 583349.0] ] ); let qtensor = quantized::QTensor::new(rhs_t, (4, 64))?; let matmul = quantized::QMatMul::from_qtensor(qtensor)?; let res = matmul.forward(&tensor_lhs)?; assert_eq!( to_vec2_round(&res, 0)?, &[ [243524.0, -19596.0, -285051.0, -549815.0], [23777.0, 21651.0, 19398.0, 18367.0], [-196472.0, 63012.0, 324585.0, 587902.0] ] ); Ok(()) } #[test] fn quantize_q4_0() -> Result<()> { use k_quants::BlockQ4_0; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ4_0::zeros(); 4]; BlockQ4_0::from_float(&src, &mut quant)?; BlockQ4_0::to_float(&quant, dst.as_mut_slice())?; assert_eq!( dst, &[ -0.0, -0.0, 3.875, 3.875, 3.875, 3.875, 7.75, 7.75, 7.75, 7.75, 11.625, 11.625, 11.625, 11.625, 15.5, 15.5, 15.5, 15.5, 19.375, 19.375, 19.375, 19.375, 23.25, 23.25, 23.25, 23.25, 27.125, 27.125, 27.125, 27.125, 31.0, 31.0, 31.5, 31.5, 31.5, 31.5, 39.375, 39.375, 39.375, 39.375, 39.375, 39.375, 39.375, 39.375, 47.25, 47.25, 47.25, 47.25, 47.25, 47.25, 47.25, 47.25, 55.125, 55.125, 55.125, 55.125, 55.125, 55.125, 55.125, 55.125, 63.0, 63.0, 63.0, 63.0, 59.375, 59.375, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.25, 95.25, 95.25, 95.25, 95.25, 95.25, 95.25, 95.25, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 127.0, 127.0, 127.0, 127.0, 127.0, 127.0, 127.0, 127.0 ] ); ggml_quantization_error_test::<BlockQ4_0>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q4_1() -> Result<()> { use k_quants::BlockQ4_1; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ4_1::zeros(); 4]; BlockQ4_1::from_float(&src, &mut quant)?; BlockQ4_1::to_float(&quant, dst.as_mut_slice())?; assert_eq!( round_vector(&dst), &[ 0.0, 0.0, 2.066, 2.066, 4.133, 4.133, 6.199, 6.199, 8.266, 8.266, 10.332, 10.332, 12.398, 12.398, 14.465, 14.465, 16.531, 16.531, 18.598, 18.598, 20.664, 20.664, 22.73, 22.73, 24.797, 24.797, 26.863, 26.863, 28.93, 28.93, 30.996, 30.996, 32.0, 32.0, 34.066, 34.066, 36.133, 36.133, 38.199, 38.199, 40.266, 40.266, 42.332, 42.332, 44.398, 44.398, 46.465, 46.465, 48.531, 48.531, 50.598, 50.598, 52.664, 52.664, 54.73, 54.73, 56.797, 56.797, 58.863, 58.863, 60.93, 60.93, 62.996, 62.996, 64.0, 64.0, 66.066, 66.066, 68.133, 68.133, 70.199, 70.199, 72.266, 72.266, 74.332, 74.332, 76.398, 76.398, 78.465, 78.465, 80.531, 80.531, 82.598, 82.598, 84.664, 84.664, 86.73, 86.73, 88.797, 88.797, 90.863, 90.863, 92.93, 92.93, 94.996, 94.996, 96.0, 96.0, 98.066, 98.066, 100.133, 100.133, 102.199, 102.199, 104.266, 104.266, 106.332, 106.332, 108.398, 108.398, 110.465, 110.465, 112.531, 112.531, 114.598, 114.598, 116.664, 116.664, 118.73, 118.73, 120.797, 120.797, 122.863, 122.863, 124.93, 124.93, 126.996, 126.996 ] ); ggml_quantization_error_test::<BlockQ4_1>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q5_0() -> Result<()> { use k_quants::BlockQ5_0; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ5_0::zeros(); 4]; BlockQ5_0::from_float(&src, &mut quant)?; BlockQ5_0::to_float(&quant, dst.as_mut_slice())?; assert_eq!( round_vector(&dst), &[ -0.0, 1.938, 1.938, 3.875, 3.875, 5.813, 5.813, 7.75, 7.75, 9.688, 9.688, 11.625, 11.625, 13.563, 13.563, 15.5, 15.5, 17.438, 17.438, 19.375, 19.375, 21.313, 21.313, 23.25, 23.25, 25.188, 25.188, 27.125, 27.125, 29.063, 29.063, 31.0, 31.5, 31.5, 35.438, 35.438, 35.438, 35.438, 39.375, 39.375, 39.375, 39.375, 43.313, 43.313, 43.313, 43.313, 47.25, 47.25, 47.25, 47.25, 51.188, 51.188, 51.188, 51.188, 55.125, 55.125, 55.125, 55.125, 59.063, 59.063, 59.063, 59.063, 63.0, 63.0, 65.313, 65.313, 65.313, 65.313, 65.313, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 77.188, 77.188, 77.188, 77.188, 77.188, 77.188, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 89.063, 89.063, 89.063, 89.063, 89.063, 89.063, 95.0, 95.0, 95.0, 95.25, 95.25, 95.25, 95.25, 103.188, 103.188, 103.188, 103.188, 103.188, 103.188, 103.188, 103.188, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 119.063, 119.063, 119.063, 119.063, 119.063, 119.063, 119.063, 119.063, 127.0, 127.0, 127.0, 127.0 ] ); ggml_quantization_error_test::<BlockQ5_0>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q5_1() -> Result<()> { use k_quants::BlockQ5_1; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ5_1::zeros(); 4]; BlockQ5_1::from_float(&src, &mut quant)?; BlockQ5_1::to_float(&quant, dst.as_mut_slice())?; assert_eq!( dst, &[ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0 ] ); ggml_quantization_error_test::<BlockQ5_1>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } /// Generates a small test vector ranging from -`bound` to `bound` with `size` steps fn get_test_vector(bound: f32, size: usize) -> (Vec<f32>, Vec<f32>) { assert!( size % crate::quantized::k_quants::QK_K == 0, "size must be a multiple of {}", crate::quantized::k_quants::QK_K ); let src = (0..size) .map(|v| (v as f32 - size as f32 / 2.) * bound / (size as f32 / 2.)) .collect::<Vec<_>>(); let dst = vec![0f32; size]; assert_eq!([src[0], src[size / 2]], [-bound, 0.0]); (src, dst) } /// Round a vector fn round_vector(values: &[f32]) -> Vec<f32> { values .iter() .map(|x| (1000. * x).round() / 1000.) .collect::<Vec<_>>() } fn compare_with_error(values: &[f32], expected: &[f32], tolerance: f32) { for (i, (value, expected_value)) in values.iter().zip(expected.iter()).enumerate() { let difference = (value - expected_value).abs(); assert!( difference < tolerance, "Error at index {}: value = {}, expected = {}. Difference = {} exceeds tolerance = {}.", i, value, expected_value, difference, tolerance ); } } /// Creates a vector simillarly to the one used in GGML unit tests: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L26-L30 fn create_ggml_like_vector(offset: f32) -> Vec<f32> { (0..GGML_TEST_SIZE) .map(|i| 0.1 + 2.0 * (i as f32 + offset).cos()) .collect() } /// Calculates the root mean square error between two vectors fn calculate_rmse(a: &[f32], b: &[f32]) -> f32 { assert_eq!(a.len(), b.len()); let sum = a .iter() .zip(b) .map(|(a, b)| (a - b).powi(2)) .sum::<f32>() .sqrt(); sum / a.len() as f32 } /// Mirrores the GGML quanitzation unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L43-L50 fn ggml_quantization_error_test<T: GgmlType>(max_error: f32) -> Result<()> { let src = create_ggml_like_vector(0.0); let mut dst = vec![0.0; GGML_TEST_SIZE]; let _quant = quantize_roundtrip::<T>(src.as_slice(), dst.as_mut_slice())?; let error = calculate_rmse(src.as_slice(), dst.as_slice()); if error > max_error { candle_core::bail!( "Quantization error {} exceeds max error {}", error, max_error ); } Ok(()) } fn quantize_roundtrip<T: GgmlType>(src: &[f32], dst: &mut [f32]) -> Result<Vec<T>> { let mut quant = vec![T::zeros(); src.len() / T::BLCK_SIZE]; T::from_float(src, &mut quant)?; T::to_float(&quant, dst)?; Ok(quant) } #[test] fn quantize_q2k() -> Result<()> { use k_quants::BlockQ2K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ2K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.1); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.499, -0.366, -0.249, 0.0, 0.295, 0.492] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ2K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 6.0); ggml_quantization_error_test::<BlockQ2K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR_2BITS)?; Ok(()) } #[test] fn quantize_q3k() -> Result<()> { use k_quants::BlockQ3K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ3K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.03); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.493, -0.37, -0.243, -0.0, 0.292, 0.492] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ3K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 3.5); ggml_quantization_error_test::<BlockQ3K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR_3BITS)?; Ok(()) } #[test] fn quantize_q4k() -> Result<()> { use k_quants::BlockQ4K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ4K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.017); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.5, -0.373, -0.25, 0.0, 0.288, 0.498] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ4K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 4.5); ggml_quantization_error_test::<BlockQ4K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q5k() -> Result<()> { use k_quants::BlockQ5K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ5K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.008); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.499, -0.372, -0.249, 0.001, 0.279, 0.499] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ5K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.5); ggml_quantization_error_test::<BlockQ5K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q6k() -> Result<()> { use k_quants::BlockQ6K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ6K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.008); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.497, -0.372, -0.25, -0.0, 0.284, 0.5] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ6K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.0); ggml_quantization_error_test::<BlockQ6K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q8k() -> Result<()> { use k_quants::BlockQ8K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ8K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.003); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.5, -0.375, -0.25, -0.0, 0.281, 0.499] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ8K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 0.6); ggml_quantization_error_test::<BlockQ8K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } /// Very simple dot product implementation fn vec_dot_reference(a: &[f32], b: &[f32]) -> f32 { a.iter().zip(b).map(|(a, b)| a * b).sum() } /// Returns the error achieved by the GGML matmul unit test. fn ggml_reference_matmul_error(dtype: GgmlDType) -> Result<f32> { let err = match dtype { GgmlDType::F16 => 0.000010, GgmlDType::Q2K => 0.004086, GgmlDType::Q3K => 0.016148, GgmlDType::Q4K => 0.002425, GgmlDType::Q5K => 0.000740, GgmlDType::Q6K => 0.000952, GgmlDType::Q4_0 => 0.001143, GgmlDType::Q4_1 => 0.007784, GgmlDType::Q5_0 => 0.001353, GgmlDType::Q5_1 => 0.001363, GgmlDType::Q8_0 => 0.000092, // Not from the ggml repo. GgmlDType::Q8K => 0.00065, _ => candle_core::bail!("No GGML results for quantization type {dtype:?}",), }; Ok(err) } /// Mirrores the GGML matmul unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L76-L91 fn ggml_matmul_error_test<T: GgmlType>() -> Result<()> { let a = create_ggml_like_vector(0.0); let b = create_ggml_like_vector(1.0); let length = a.len(); let mut a_quant = vec![T::zeros(); length / T::BLCK_SIZE]; let mut b_quant = vec![T::VecDotType::zeros(); length / T::VecDotType::BLCK_SIZE]; T::from_float(&a, &mut a_quant)?; T::VecDotType::from_float(&b, &mut b_quant)?; let result = T::vec_dot(length, &a_quant, &b_quant)?; let result_unopt = T::vec_dot_unopt(length, &a_quant, &b_quant)?; let reference_result = vec_dot_reference(&a, &b); if (result - result_unopt).abs() / length as f32 > 1e-6 { candle_core::bail!( "the opt and unopt vec-dot returned different values, opt {result}, unopt {result_unopt}" ) } let error = (result - reference_result).abs() / length as f32; let ggml_error = ggml_reference_matmul_error(T::DTYPE)?; if !error.is_finite() || error > GGML_MAX_DOT_PRODUCT_ERROR { candle_core::bail!( "Dot product error {error} exceeds max error {GGML_MAX_DOT_PRODUCT_ERROR}", ); } // We diverge slightly due to different rounding behavior / f16 to f32 conversions in GGML // => we use a slightly higher error threshold const ERROR_LENIENCY: f32 = 0.00001; if error - ERROR_LENIENCY > ggml_error { candle_core::bail!( "Dot product error {} exceeds ggml reference error {}", error, ggml_error ); } Ok(()) } /// generates random tensors of size `m x k` and `n x k` and calculates their expected matrix multiplication result. fn get_random_tensors( m: usize, k: usize, n: usize, device: &Device, ) -> Result<(Tensor, Tensor, Tensor)> { let mut rng = StdRng::seed_from_u64(314159265358979); let lhs = (0..m * k) .map(|_| rng.gen::<f32>() - 0.5) .collect::<Vec<_>>(); let rhs = (0..n * k) .map(|_| rng.gen::<f32>() - 0.5) .collect::<Vec<_>>(); let lhs = Tensor::from_vec(lhs, (m, k), device)?; let rhs = Tensor::from_vec(rhs, (n, k), device)?; let mm = lhs.matmul(&rhs.t()?)?; Ok((lhs, rhs, mm)) } #[test] fn quantized_matmul_q2k() -> Result<()> { use k_quants::BlockQ2K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ2K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [0.916, 0.422, 0.215, 1.668]); ggml_matmul_error_test::<BlockQ2K>()?; Ok(()) } #[test] fn quantized_matmul_q3k() -> Result<()> { use k_quants::BlockQ3K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ3K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.029, 1.418, -0.314, 1.495]); ggml_matmul_error_test::<BlockQ3K>()?; Ok(()) } #[test] fn quantized_matmul_q4k() -> Result<()> { use k_quants::BlockQ4K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ4K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.125, 1.435, -0.201, 1.589]); ggml_matmul_error_test::<BlockQ4K>()?; Ok(()) } #[test] fn quantized_matmul_q5k() -> Result<()> { use k_quants::BlockQ5K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ5K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.192, 1.491, -0.18, 1.743]); //Expected: 0.000740408897 ggml_matmul_error_test::<BlockQ5K>()?; Ok(()) } #[test] fn quantized_matmul_q6k() -> Result<()> { use k_quants::BlockQ6K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ6K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.324, 1.49, -0.164, 1.741]); ggml_matmul_error_test::<BlockQ6K>()?; Ok(()) } #[test] fn quantized_matmul_q8k() -> Result<()> { use k_quants::BlockQ8K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ8K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.266, 1.504, -0.204, 1.7]); ggml_matmul_error_test::<BlockQ8K>()?; Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/tensor_tests.rs
use candle_core::{test_device, test_utils, DType, Device, IndexOp, Result, Tensor}; fn zeros(device: &Device) -> Result<()> { let tensor = Tensor::zeros((5, 2), DType::F32, device)?; let (dim1, dim2) = tensor.dims2()?; assert_eq!(dim1, 5); assert_eq!(dim2, 2); Ok(()) } fn ones(device: &Device) -> Result<()> { assert_eq!( Tensor::ones((2, 3), DType::U8, device)?.to_vec2::<u8>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::U32, device)?.to_vec2::<u32>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::I64, device)?.to_vec2::<i64>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::F32, device)?.to_vec2::<f32>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); assert_eq!( Tensor::ones((2, 3), DType::F64, device)?.to_vec2::<f64>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); Ok(()) } fn arange(device: &Device) -> Result<()> { assert_eq!( Tensor::arange(0u8, 5u8, device)?.to_vec1::<u8>()?, [0, 1, 2, 3, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 2, device)?.to_vec1::<u8>()?, [0, 2, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 3, device)?.to_vec1::<u8>()?, [0, 3], ); assert_eq!( Tensor::arange_step(5i64, 0i64, -1, device)?.to_vec1::<i64>()?, [5, 4, 3, 2, 1], ); Ok(()) } fn add_mul(device: &Device) -> Result<()> { let tensor = Tensor::new(&[3f32, 1., 4.], device)?; let dim1 = tensor.dims1()?; assert_eq!(dim1, 3); let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [3., 1., 4.]); let tensor = Tensor::add(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [6., 2., 8.]); let tensor = Tensor::mul(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [36., 4., 64.]); Ok(()) } fn tensor_2d(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content, data); Ok(()) } fn clamp(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let tensor = tensor.clamp(1.5, 6.2)?; assert_eq!( tensor.to_vec2::<f32>()?, [[3.0, 1.5, 4.0, 1.5, 5.0], [2.0, 1.5, 6.2, 6.2, 2.0]], ); Ok(()) } fn unary_op(device: &Device) -> Result<()> { let data = &[[-3f32, 1., 4., -0.1, 0.5], [2.7, -1.8, -0.28, 1.8, 2.8]]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.gelu()?, 4)?, [ [-0.0036, 0.8412, 3.9999, -0.046, 0.3457], [2.6911, -0.0647, -0.1091, 1.7353, 2.7933] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.gelu_erf()?, 4)?, [ [-0.004, 0.8413, 3.9999, -0.046, 0.3457], [2.6906, -0.0647, -0.1091, 1.7353, 2.7928] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.erf()?, 4)?, [ [-1.0, 0.8427, 1.0, -0.1125, 0.5205], [0.9999, -0.9891, -0.3079, 0.9891, 0.9999] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.ceil()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -1.0, -0.0, 2.0, 3.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.floor()?, 4)?, [[-3.0, 1.0, 4.0, -1.0, 0.0], [2.0, -2.0, -1.0, 1.0, 2.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.round()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -2.0, -0.0, 2.0, 3.0]] ); let tensor = Tensor::new(&[2997.9246, 314.15926f32], device)?; assert_eq!( test_utils::to_vec1_round(&tensor.round_to(2)?, 4)?, [2997.92, 314.16] ); assert_eq!( test_utils::to_vec1_round(&tensor.round_to(-2)?, 4)?, [3000.0, 300.] ); Ok(()) } fn binary_op(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 1., 7., 8., 2.]]; let tensor2 = Tensor::new(data2, device)?; let tensor = (&tensor1 + (&tensor1 * &tensor1)? / (&tensor1 + &tensor2))?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [4.125, 1.1666666, 5.7777777, 1.1666666, 7.5]); assert_eq!(content[1], [3.0, 1.5, 10.5, 12.0, 3.0]); #[allow(clippy::eq_op)] let tensor = (&tensor - &tensor)?; let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [0., 0., 0., 0., 0.]); let min = tensor1.minimum(&(&tensor2 * 0.5)?)?; let max = tensor1.maximum(&(&tensor2 * 0.5)?)?; assert_eq!( min.to_vec2::<f32>()?, [[2.5, 1.0, 2.5, 1.0, 2.5], [1.0, 0.5, 3.5, 4.0, 1.0]], ); assert_eq!( max.to_vec2::<f32>()?, [[3.0, 2.5, 4.0, 2.5, 5.0], [2.0, 1.0, 7.0, 8.0, 2.0]] ); Ok(()) } fn transpose(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?.t()?; let dims = tensor.dims2()?; assert_eq!(dims, (5, 2)); assert_eq!( tensor.to_vec2::<f32>()?, &[[3f32, 2.], [1., 1.], [4., 7.], [1., 8.], [5., 2.]] ); assert_eq!(tensor.t()?.to_vec2::<f32>()?, data); assert_eq!(tensor.contiguous()?.t()?.to_vec2::<f32>()?, data); assert_eq!(((tensor + 1.)?.t()? - 1.)?.to_vec2::<f32>()?, data); Ok(()) } fn var(device: &Device) -> Result<()> { // Values taken from https://pytorch.org/docs/stable/generated/torch.var.html let data = &[ [0.2035f32, 1.2959, 1.8101, -0.4644], [1.5027, -0.3270, 0.5905, 0.6538], [-1.5745, 1.3330, -0.5596, -0.6548], [0.1264, -0.5080, 1.6420, 0.1992], ]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.var_keepdim(1)?, 4)?, &[[1.0631], [0.559], [1.4893], [0.8258]] ); Ok(()) } fn sum(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.sum_keepdim(2)?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[[5, 2, 11], [9, 7, 17]]], ); assert_eq!(tensor.sum_keepdim((0, 2, 1))?.to_vec3::<u32>()?, &[[[51]]],); assert_eq!( tensor.t()?.sum_keepdim(1)?.t()?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim((2, 1))?.to_vec3::<u32>()?, &[[[8 + 15]], [[10 + 18]]] ); let data: Vec<u32> = (0..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.sum_keepdim(0)?.to_vec1::<u32>()?, &[7998000]); let tensor = tensor.reshape((2000, 2))?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); let t1 = tensor.reshape((200, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor.sum_keepdim((0, 1, 2))?.to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim(2)? .sum_keepdim(1)? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim((1, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(1)? .sum_keepdim((0, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[ [398000, 398200, 398400, 398600], [398800, 399000, 399200, 399400], [399600, 399800, 400000, 400200], [400400, 400600, 400800, 401000], [401200, 401400, 401600, 401800] ]] ); } Ok(()) } fn min(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.min_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [1]], [[1], [2]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[[2, 1, 4], [1, 2, 8]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.min_keepdim(0)?.to_vec1::<u32>()?, &[200]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .min_keepdim(0)? .min_keepdim(2)? .min_keepdim(1)? .to_vec3::<u32>()?, &[[[200]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[ [200, 201, 202, 203], [204, 205, 206, 207], [208, 209, 210, 211], [212, 213, 214, 215], [216, 217, 218, 219] ]] ); } Ok(()) } fn max(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.max_keepdim(2)?.to_vec3::<u32>()?, &[[[4], [9]], [[7], [8]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[[3, 1, 7], [8, 5, 9]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.max_keepdim(0)?.to_vec1::<u32>()?, &[3999]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .max_keepdim(0)? .max_keepdim(2)? .max_keepdim(1)? .to_vec3::<u32>()?, &[[[3999]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[ [3980, 3981, 3982, 3983], [3984, 3985, 3986, 3987], [3988, 3989, 3990, 3991], [3992, 3993, 3994, 3995], [3996, 3997, 3998, 3999] ]] ); } Ok(()) } fn argmin(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmin_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [0]], [[1], [1]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[[1, 0, 0], [0, 1, 1]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmin_keepdim(0)?.to_vec1::<u32>()?, &[0]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(2)? .argmin_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], ]] ); } Ok(()) } fn argmax(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmax_keepdim(2)?.to_vec3::<u32>()?, &[[[2], [2]], [[2], [0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[[0, 0, 1], [1, 0, 0]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmax_keepdim(0)?.to_vec1::<u32>()?, &[3799]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(2)? .argmax_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[ [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], ]] ); } Ok(()) } fn narrow(device: &Device) -> Result<()> { let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.narrow(2, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 4.0], [5.0, 9.0]], [[1.0, 7.0], [2.0, 8.0]]], ); assert_eq!( tensor.narrow(1, 1, 1)?.to_vec3::<f32>()?, &[[[1.0, 5.0, 9.0]], [[8.0, 2.0, 8.0]]], ); assert_eq!( tensor.narrow(0, 0, 1)?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]], ); assert_eq!( tensor.narrow(0, 1, 1)?.to_vec3::<f32>()?, &[[[2.0, 1.0, 7.0], [8.0, 2.0, 8.0]]], ); // The following has been checked against PyTorch via: // import torch // t = torch.tensor([[[3., 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]) // t.transpose(-1, -2).narrow(1, 1, 2) assert_eq!( tensor.t()?.narrow(1, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 5.0], [4.0, 9.0]], [[1.0, 2.0], [7.0, 8.0]]], ); Ok(()) } fn broadcast(device: &Device) -> Result<()> { let data = &[3f32, 1., 4.]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.broadcast_left((3, 1))?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]]] ); Ok(()) } fn cat(device: &Device) -> Result<()> { // 1D let t1 = Tensor::new(&[3f32, 1., 4.], device)?; let t2 = Tensor::new(&[1f32, 5., 9., 2.], device)?; let t3 = Tensor::new(&[6f32, 5., 3., 5., 8., 9.], device)?; assert_eq!(Tensor::cat(&[&t1], 0)?.to_vec1::<f32>()?, [3f32, 1., 4.],); assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2.], ); assert_eq!( Tensor::cat(&[&t1, &t2, &t3], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2., 6., 5., 3., 5., 8., 9.], ); // 2D let data = &[[3f32, 1., 4., 1., 5.], [2., 7., 1., 8., 2.]]; let t1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 7., 1., 8., 2.]]; let t2 = Tensor::new(data2, device)?; assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); // PyTorch equivalent: // import torch // t1 = torch.tensor([[3, 1, 4, 1, 5], [2, 7, 1, 8, 2]]) // t2 = torch.tensor([[5]*5, [2, 7, 1, 8, 2]]) // torch.cat([t1.t(), t2.t()], dim=1).t() assert_eq!( Tensor::cat(&[&t1.t()?, &t2.t()?], 1)? .t()? .to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); assert_eq!( Tensor::cat(&[&t1, &t2], 1)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0, 2.0, 7.0, 1.0, 8.0, 2.0] ] ); Ok(()) } fn embeddings(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let hs = t.embedding(&ids)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); let hs = t.index_select(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); Ok(()) } fn cmp(device: &Device) -> Result<()> { let t1 = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let t2 = Tensor::new(&[[1f32, 0f32], [3f32, 3f32], [4f32, 7f32]], device)?; assert_eq!(t1.eq(&t2)?.to_vec2::<u8>()?, &[[0, 0], [0, 1], [1, 0]]); assert_eq!(t1.ne(&t2)?.to_vec2::<u8>()?, &[[1, 1], [1, 0], [0, 1]]); assert_eq!(t1.le(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 1], [1, 1]]); assert_eq!(t1.lt(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 0], [0, 1]]); assert_eq!(t1.gt(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 0], [0, 0]]); assert_eq!(t1.ge(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 1], [1, 0]]); Ok(()) } fn index_select(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let hs = t.index_select(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 2.0, 1.0], [3.0, 5.0, 4.0], [6.0, 8.0, 7.0], [9.0, 11.0, 10.0] ] ); let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0]] ); // Prior to https://github.com/huggingface/candle/pull/1022 // There would be a bug where the last values in the result tensor would be set to 0. let ids = Tensor::new(&[0u32, 2u32, 1u32, 0u32, 2u32, 1u32], device)?; let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], ] ); // Test when selecting dim > 0 with ids size different from elem count of // target dim in source/input. let ids = Tensor::new(&[1u32, 0u32, 1u32], device)?; let t = Tensor::arange(1f32, 5f32, device)?.reshape((2, 2))?; assert_eq!(t.to_vec2::<f32>()?, &[[1.0, 2.0], [3.0, 4.0]]); let hs = t.index_select(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[2.0, 1.0, 2.0], [4.0, 3.0, 4.0]]); Ok(()) } fn index_add(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 1u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let init = Tensor::ones((4, 2), DType::F32, device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[1.0, 4.0], [4.0, 10.0], [7.0, 16.0], [10.0, 22.0]], ); let init = Tensor::zeros((4, 2), DType::F32, device)?; let ids = Tensor::new(&[1u32, 0u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[3.0, 0.0], [9.0, 3.0], [15.0, 6.0], [21.0, 9.0]], ); let init = Tensor::zeros((6, 3), DType::F32, device)?; let ids = Tensor::new(&[5u32, 0u32, 1u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [12.0, 14.0, 16.0], [6.0, 7.0, 8.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 2.0] ] ); Ok(()) } fn slice_scatter(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let src = Tensor::arange(100f32, 106f32, device)?.reshape((2, 3))?; assert_eq!( t.slice_scatter0(&src, 0)?.to_vec2::<f32>()?, &[ [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 1)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 2)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], ] ); Ok(()) } fn scatter_add(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let ids = Tensor::new(&[[0u32, 1, 2], [3, 4, 0], [3, 3, 1], [2, 0, 4]], device)?; let init = Tensor::ones((4, 5), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 2.0, 3.0, 1.0, 1.0], [6.0, 1.0, 1.0, 4.0, 5.0], [1.0, 9.0, 1.0, 14.0, 1.0], [11.0, 1.0, 10.0, 1.0, 12.0] ] ); let init = Tensor::ones((6, 3), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 11.0, 6.0], [1.0, 2.0, 9.0], [10.0, 1.0, 3.0], [10.0, 8.0, 1.0], [1.0, 5.0, 12.0], [1.0, 1.0, 1.0] ] ); Ok(()) } fn gather(device: &Device) -> Result<()> { let ids = Tensor::new(&[[0u32], [2u32], [1u32], [0u32]], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let hs = t.gather(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0], [5.0], [7.0], [9.0]]); let ids = Tensor::new( &[[0u32, 0u32], [2u32, 0u32], [1u32, 1u32], [0u32, 2u32]], device, )?; let hs = t.gather(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 0.0], [5.0, 3.0], [7.0, 7.0], [9.0, 11.0]] ); let ids = Tensor::new(&[[0u32, 2u32, 0u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0]]); let ids = Tensor::new(&[[0u32, 2u32, 0u32], [0u32, 1u32, 1u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0], [0.0, 4.0, 5.0]]); Ok(()) } fn matmul(device: &Device) -> Result<()> { let data = vec![1.0f32, 2.0, 3.0, 4.0]; let a = Tensor::from_slice(&data, (2, 2), device)?; let data = vec![1.0f32, 2.0, 3.0, 4.0]; let b = Tensor::from_slice(&data, (2, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); let data = vec![1.0f32, 2.0]; let a = Tensor::from_slice(&data, (2, 1), device)?; let data = vec![3.0f32, 4.0]; let b = Tensor::from_slice(&data, (1, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[3.0, 4.0], &[6.0, 8.0]]); let data: Vec<_> = (0..6).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 3), device)?; let data: Vec<_> = (0..6).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (3, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[16., 19.], &[52., 64.]]); let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (2, 3, 2), device)?; let expected = [[[16., 19.], [52., 64.]], [[214., 235.], [304., 334.]]]; let c = a.matmul(&b)?; assert_eq!(c.to_vec3::<f32>()?, &expected); // Also perform the matmul on contiguous transposed versions. let a_tt = a.t()?.contiguous()?.t()?; assert!(!a_tt.is_contiguous()); assert_eq!(a.dims(), a_tt.dims()); assert_eq!(a_tt.stride(), &[6, 1, 2]); let b_tt = b.t()?.contiguous()?.t()?; assert!(!b_tt.is_contiguous()); assert_eq!(b.dims(), b_tt.dims()); assert_eq!(b_tt.stride(), &[6, 1, 3]); assert_eq!(a_tt.matmul(&b)?.to_vec3::<f32>()?, &expected); assert_eq!(a.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); assert_eq!(a_tt.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); Ok(()) } fn broadcast_matmul(device: &Device) -> Result<()> { let lhs = Tensor::randn(0f32, 1f32, (3, 1, 4, 5), device)?; let rhs = Tensor::randn(0f32, 1f32, (6, 5, 2), device)?; let out = lhs.broadcast_matmul(&rhs)?; assert_eq!(out.dims(), &[3, 6, 4, 2]); for idx1 in 0..3 { for idx2 in 0..6 { let out = out.i((idx1, idx2))?; let lhs = lhs.i((idx1, 0))?; let rhs = rhs.i(idx2)?; let out2 = lhs.matmul(&rhs); let sum_diff2 = (out - out2)?.sqr()?.sum_all()?; // With cuda, we see errors of up to ~1e-12. assert!(sum_diff2.to_vec0::<f32>()? < 1e-6) } } Ok(()) } fn broadcasting(device: &Device) -> Result<()> { let t1 = Tensor::arange(0f32, 24f32, device)?.reshape((4, 2, 3))?; let t2 = Tensor::new(&[100f32, 200f32], device)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 101.0, 102.0], [203.0, 204.0, 205.0]], [[106.0, 107.0, 108.0], [209.0, 210.0, 211.0]], [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 203.0], [101.0, 204.0], [102.0, 205.0]], [[106.0, 209.0], [107.0, 210.0], [108.0, 211.0]], [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -99.0, -98.0], [-197.0, -196.0, -195.0]], [[-94.0, -93.0, -92.0], [-191.0, -190.0, -189.0]], [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -197.0], [-99.0, -196.0], [-98.0, -195.0]], [[-94.0, -191.0], [-93.0, -190.0], [-92.0, -189.0]], [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); // Test a narrowed version as this uses a layout start_offset. let t1 = t1.i(2..)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); let t3 = Tensor::new(1f32, device)?.broadcast_div(&t2)?; let s = t1.broadcast_mul(&t2.reshape((2, 1))?)?; let s_div = t1.broadcast_div(&t3.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 1300.0, 1400.0], [3000.0, 3200.0, 3400.0]], [[1800.0, 1900.0, 2000.0], [4200.0, 4400.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); let s = t1.t()?.broadcast_mul(&t2)?; let s_div = t1.t()?.broadcast_div(&t3)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 3000.0], [1300.0, 3200.0], [1400.0, 3400.0]], [[1800.0, 4200.0], [1900.0, 4400.0], [2000.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); Ok(()) } fn randn(device: &Device) -> Result<()> { let tensor = Tensor::randn(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); let tensor = Tensor::rand(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); Ok(()) } test_device!(zeros, zeros_cpu, zeros_gpu, zeros_metal); test_device!(ones, ones_cpu, ones_gpu, ones_metal); test_device!(arange, arange_cpu, arange_gpu, arange_metal); test_device!(add_mul, add_mul_cpu, add_mul_gpu, add_mul_metal); test_device!(tensor_2d, tensor_2d_cpu, tensor_2d_gpu, tensor_2d_metal); test_device!(narrow, narrow_cpu, narrow_gpu, narrow_metal); test_device!(broadcast, broadcast_cpu, broadcast_gpu, broadcast_metal); test_device!(cat, cat_cpu, cat_gpu, cat_metal); test_device!(sum, sum_cpu, sum_gpu, sum_metal); test_device!(min, min_cpu, min_gpu, min_metal); test_device!(max, max_cpu, max_gpu, max_metal); test_device!(argmax, argmax_cpu, argmax_gpu, argmax_metal); test_device!(argmin, argmin_cpu, argmin_gpu, argmin_metal); test_device!(transpose, transpose_cpu, transpose_gpu, transpose_metal); test_device!(unary_op, unary_op_cpu, unary_op_gpu, unary_op_metal); test_device!(binary_op, binary_op_cpu, binary_op_gpu, binary_op_metal); test_device!(embeddings, embeddings_cpu, embeddings_gpu, embeddings_metal); test_device!(cmp, cmp_cpu, cmp_gpu, cmp_metal); test_device!(matmul, matmul_cpu, matmul_gpu, matmul_metal); test_device!( broadcast_matmul, broadcast_matmul_cpu, broadcast_matmul_gpu, broadcast_matmul_metal ); test_device!( broadcasting, broadcasting_cpu, broadcasting_gpu, broadcasting_metal ); test_device!( index_select, index_select_cpu, index_select_gpu, index_select_metal ); test_device!(index_add, index_add_cpu, index_add_gpu, index_add_metal); test_device!(gather, gather_cpu, gather_gpu, gather_metal); test_device!( scatter_add, scatter_add_cpu, scatter_add_gpu, scatter_add_metal ); test_device!( slice_scatter, slice_scatter_cpu, slice_scatter_gpu, slice_scatter_metal ); test_device!(randn, randn_cpu, randn_gpu, randn_metal); test_device!(clamp, clamp_cpu, clamp_gpu, clamp_metal); test_device!(var, var_cpu, var_gpu, var_metal); // There was originally a bug on the CPU implementation for randn // https://github.com/huggingface/candle/issues/381 #[test] fn randn_hasneg() -> Result<()> { let t = Tensor::randn(0f32, 1f32, 200, &Device::Cpu)?.to_vec1::<f32>()?; if t.iter().all(|&v| v >= 0.) { candle_core::bail!("all values in tensors are non-negative") } Ok(()) } #[test] fn pad_with_same() -> Result<()> { let t = Tensor::arange(1f32, 5f32, &Device::Cpu)?.reshape((2, 2))?; let t0 = t.pad_with_same(0, 1, 2)?; assert_eq!( t0.to_vec2::<f32>()?, [[1.0, 2.0], [1.0, 2.0], [3.0, 4.0], [3.0, 4.0], [3.0, 4.0]] ); let t1 = t.pad_with_same(1, 1, 2)?; assert_eq!( t1.to_vec2::<f32>()?, [[1.0, 1.0, 2.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 4.0]] ); Ok(()) } #[test] fn i64_abs() -> Result<()> { let t = Tensor::new(&[-42i64, 1337], &Device::Cpu)?; let t = t.abs()?; assert_eq!(t.to_vec1::<i64>()?, [42, 1337]); Ok(()) } #[test] fn tril_triu_eye() -> Result<()> { let t = Tensor::tril2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0] ], ); let t = Tensor::triu2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0] ] ); let t = Tensor::eye(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0] ] ); Ok(()) } #[test] fn cumsum() -> Result<()> { let t = &[3f32, 1., 4., 1., 5.]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!(t.cumsum(0)?.to_vec1::<f32>()?, [3., 4., 8., 9., 14.]); let t = t.unsqueeze(1)?; assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0], [4.0], [8.0], [9.0], [14.0]] ); assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0], [1.0], [4.0], [1.0], [5.0]] ); let t = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0, 4.0, 8.0, 9.0, 14.0], [2.0, 3.0, 10.0, 18.0, 20.0]], ); assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0, 1.0, 4.0, 1.0, 5.0], [5.0, 2.0, 11.0, 9.0, 7.0]] ); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/conv_tests.rs
use anyhow::Result; use candle_core::{test_device, test_utils, Device, IndexOp, Tensor}; /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5)) w = torch.randn((2, 4, 3)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv1d(t, w) print(res.flatten()) res = torch.nn.functional.conv1d(t, w, padding=1) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose1d(t, w_t) print(res.shape) print(res) */ fn conv1d(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145, 1.8025, -0.1536, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278, -1.0124, 0.5599, ], dev, )? .reshape((1, 4, 5))?; let w = Tensor::new( &[ -0.8404f32, -0.3490, 0.0130, 1.3123, 0.1763, -1.9249, 1.4270, 0.9421, 0.8670, -0.7181, -1.1111, 0.8869, -1.2429, 1.8357, 1.6052, -1.3844, 0.3951, -1.2036, 0.6686, 1.6261, -0.6451, -0.0840, -1.4247, 0.5512, ], dev, )? .reshape((2, 4, 3))?; let res = t.conv1d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.6357, -1.3336, 4.1393, -1.1784, 3.5675, 0.5069] ); let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 5]); // Same as pytorch default padding: use zeros. assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.4509, 2.6357, -1.3336, 4.1393, 0.5657, 1.8091, -1.1784, 3.5675, 0.5069, 3.3352] ); if dev.is_cpu() { let res = t.conv_transpose1d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 2, 7]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ 0.0699, -1.2899, 8.3018, 5.5873, 2.4572, -2.6143, -0.0706, 1.8765, 4.8318, 1.1538, 4.7076, -5.9745, -0.8276, 1.621 ], ); } Ok(()) } fn conv1d_small(dev: &Device) -> Result<()> { let t = Tensor::new(&[0.4056f32, -0.8689, -0.0773, -1.5630], dev)?.reshape((1, 1, 4))?; let w = Tensor::new(&[1f32, 0., 0.], dev)?.reshape((1, 1, 3))?; let res = t.conv1d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 2]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.4056, -0.8689] ); let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 4]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.0, 0.4056, -0.8689, -0.0773], ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5, 5)) w = torch.randn((2, 4, 3, 3)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t, w_t) print(res.shape) print(res) res = torch.nn.functional.conv2d(t, w, dilation=2) print(res.shape) print(res[0]) res = torch.nn.functional.conv_transpose2d(t, w_t, dilation=2) print(res.shape) print(res) */ fn conv2d(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006, -0.8000, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085, ], dev, )?; let w = Tensor::new( &[ -0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273, -2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514, -0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027, 0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667, 0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679, -0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646, 1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, ], dev, )?; let t = t.reshape((1, 4, 5, 5))?; let w = w.reshape((2, 4, 3, 3))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ -4.2812, 2.0923, 5.2187, 7.5184, 0.752, -14.9426, 10.0087, 4.391, 0.2918, 1.6715, 10.389, 3.6023, -4.2808, 0.2672, 5.3646, -5.2023, -2.1955, -9.4075 ] ); let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 2, 7, 7]); assert_eq!( test_utils::to_vec3_round(&res.i(0)?, 4)?, [ [ [-1.9918, 2.6797, -0.4599, -1.6037, 1.4131, -2.4012, 2.9277], [1.8016, -3.5361, 1.0757, 3.5395, -8.2168, -3.2023, 0.5375], [0.8243, 1.8675, 7.8929, -4.0746, -6.4415, 5.1139, 1.6889], [0.2722, 8.9679, 3.3477, 1.8514, -4.2896, -3.8228, -7.5632], [-8.5412, -5.8142, -7.1587, -1.6095, 0.4651, 0.2748, -2.0985], [2.0833, -0.6482, -12.1692, -4.1284, -2.9765, -0.0656, -4.5114], [5.307, 2.6957, 2.3087, 1.0478, 0.7808, -1.1519, -0.9579] ], [ [1.089, 0.1872, -0.6408, -0.9897, 0.8503, 1.1019, -0.9211], [-0.1741, -0.2915, 4.2472, 1.9417, 1.65, 0.6303, -4.7131], [1.6555, 2.4026, -2.9293, 2.9953, 0.5328, 3.5873, -0.9621], [-1.4289, -3.2787, 4.1747, -6.0341, -4.6341, -5.7945, 4.142], [7.5973, 6.4431, 5.9872, 2.1639, -8.6566, 3.3143, -3.4059], [-0.8775, -3.048, 11.6543, 0.6442, 2.3218, -0.4765, 1.1516], [-5.5423, -2.5188, 1.0754, -0.0563, -2.9386, -1.1504, 1.0171] ] ] ); // Dilations. let res = t.conv2d(&w, 0, 1, 2, 1)?; assert_eq!(res.dims(), [1, 2, 1, 1]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.45, -2.3504], ); // Transpose and dilations. let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 2)?; assert_eq!(res.dims(), [1, 2, 9, 9]); assert_eq!( test_utils::to_vec3_round(&res.i(0)?, 4)?, [ [ [-1.9918, 3.1652, -0.6778, -4.3442, 4.4351, 0.6652, -3.0124, -0.6031, 2.9277], [2.7036, -1.7156, -0.3969, 1.0516, 1.6381, -2.8886, -0.205, 2.4682, -1.0499], [-0.9459, 3.1631, 3.707, -4.8369, -8.5166, -1.4496, -2.7559, -3.2698, 1.4376], [-0.2157, 3.7786, -2.0252, -4.2633, 3.6731, -1.5142, 5.9391, -0.2622, -0.141], [-6.8121, -3.1744, 1.5945, 3.0637, -9.6088, 1.4446, 2.9489, -3.0082, -7.3822], [0.2371, 3.3303, 0.3861, 2.2646, -4.6784, 4.1235, -0.0109, 0.3176, -0.03], [-2.5339, -2.9564, -3.4518, -4.4594, -9.1873, -1.9709, -0.4676, 0.51, -3.5024], [4.007, 0.3067, -2.2954, 1.1105, -0.1992, 1.6372, -2.9268, 0.2807, -1.2787], [5.307, 1.1317, 1.3518, 0.9049, 3.8116, -0.4075, -0.8874, -0.2241, -0.9579] ], [ [1.089, -0.6483, 0.0726, -0.4752, -1.3283, 1.7103, 1.0703, 0.1076, -0.9211], [-0.8629, 0.1376, 0.3202, 2.0955, 0.9696, 2.8988, -1.0012, 1.5049, -0.1278], [1.9286, -1.5255, -2.9563, 2.4589, 3.3611, -0.6951, 0.3525, -1.7724, -5.9861], [1.1226, 2.1561, 3.6417, 4.7546, -0.692, 4.4126, -5.1902, 6.0805, 2.3185], [1.0111, 0.3604, 0.6432, -3.6605, 7.9517, -9.2955, -5.2988, -3.7803, -2.0642], [3.3172, -1.7967, -3.6576, -2.0942, 1.3158, 0.112, -1.7405, 2.9167, 0.7957], [5.1001, 1.8995, -1.8639, 1.1262, 9.9629, 2.683, -3.6319, -1.1607, 0.5856], [-4.8445, -0.5642, 4.2317, 0.0856, 1.2267, -0.5712, 1.736, 1.0997, 0.6908], [-5.5423, -1.1831, -1.2176, 0.0843, 0.0446, -0.7545, -2.4798, -0.0827, 1.0171] ] ] ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 3, 3)) w = torch.randn((1, 2, 1, 1)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t, w_t) print(res.shape) print(res.flatten()) t_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t_t, w) print(res.shape) print(res.flatten()) */ fn conv2d_small(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278, ], dev, )?; let w = Tensor::new(&[-0.9259f32, 1.3017], dev)?; let t = t.reshape((1, 2, 3, 3))?; let w = w.reshape((1, 2, 1, 1))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539] ); let res = t.conv2d(&w, 2, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 7, 7]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1640, -0.0111, -0.1742, 0.0000, 0.0000, 0.0000, 0.0000, 2.6437, -2.0268, 1.1823, 0.0000, 0.0000, 0.0000, 0.0000, 3.2855, -1.0324, 0.2539, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000 ] ); let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 1, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539], ); let res = t.transpose(0, 1)?.conv_transpose2d(&w, 0, 0, 1, 1)?; assert_eq!(res.dims(), [2, 2, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ -0.3755, 0.8045, -0.6336, -0.2218, -1.1369, 0.8599, 1.5768, -0.1268, -0.1728, 0.528, -1.131, 0.8908, 0.3118, 1.5984, -1.2089, -2.2168, 0.1783, 0.2429, -0.3838, 0.5802, -0.3268, -2.0382, 0.6329, -0.2293, -1.2154, 0.6441, -0.3035, 0.5396, -0.8156, 0.4594, 2.8654, -0.8898, 0.3224, 1.7087, -0.9056, 0.4267 ] ); Ok(()) } fn conv2d_smaller(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, ], dev, )?; let w = Tensor::new(&[1f32, 1., 1., 1., 1., 1., 1., 1., 1.], dev)?; let t = t.reshape((1, 1, 3, 3))?; let w = w.reshape((1, 1, 3, 3))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 1, 1]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [-0.6197] ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 2)) w = torch.randn((1, 2, 1, 1)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) */ fn conv2d_non_square(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, ], dev, )?; let w = Tensor::new(&[-1.1351f32, 1.3841], dev)?; let t = t.reshape((1, 2, 4, 2))?; let w = w.reshape((1, 2, 1, 1))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 4, 2]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.2312, 5.2238, 2.3772, 1.9076, 2.0256, -0.5776, -1.6028, -1.467] ); Ok(()) } /* import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5, 5), requires_grad=True) w = torch.randn((2, 4, 3, 3), requires_grad=True) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) loss = (res ** 2).sum() print(loss) loss.backward() print(t.grad.shape) print(t.grad.flatten()) print(w.grad.shape) print(w.grad.flatten()) t.grad.zero_() w.grad.zero_() res = torch.nn.functional.conv2d(t, w, stride=2) print(res.flatten()) loss = (res ** 2).sum() print(loss) loss.backward() print(t.grad.shape) print(t.grad[0]) print(w.grad.shape) print(w.grad[0]) */ fn conv2d_grad(dev: &Device) -> Result<()> { use candle_core::Var; let t = Var::from_slice( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006, -0.8000, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085, ], (1, 4, 5, 5), dev, )?; let w = Var::from_slice( &[ -0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273, -2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514, -0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027, 0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667, 0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679, -0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646, 1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, ], (2, 4, 3, 3), dev, )?; let res = t.conv2d(&w, 0, 1, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 741.12f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec1_round(&grad_t.flatten_all()?, 2)?, [ 9.29, -2.84, -5.71, 3.38, -7.71, -19.15, 7.02, 29.1, 9.34, 34.73, -22.87, 24.35, -39.88, -14.01, 21.08, 9.94, 13.63, -34.68, 11.21, -6.26, 7.72, -6.32, -16.64, -1.08, -20.22, 21.73, -0.37, -4.06, 5.82, -3.65, -30.73, 14.55, 87.7, 31.6, 4.53, -89.78, -75.37, -57.43, -7.56, 92.96, 18.79, -4.63, -159.75, -42.47, -47.26, 52.88, 37.32, 49.0, 12.82, 2.01, -8.98, 20.18, 16.62, 12.06, 15.38, 20.0, 2.57, -15.22, 72.62, -10.75, 2.25, -31.2, 3.75, -0.2, 9.76, -0.68, 5.21, -40.44, -22.59, -61.61, 17.28, 20.41, 37.55, 5.23, 6.81, 23.54, 23.62, -9.99, -9.13, 4.87, -35.06, -26.1, 63.48, 25.81, -39.21, -70.68, -46.96, 2.33, 41.81, 82.42, -28.63, -11.78, -35.33, -10.28, -28.57, -9.13, 7.21, -9.05, -9.62, -11.25 ] ); assert_eq!( test_utils::to_vec1_round(&grad_w.flatten_all()?, 2)?, [ -28.92, -22.88, -141.23, 73.35, 61.07, 47.81, -20.0, -73.71, -41.82, -13.59, 21.5, 28.72, 28.57, -46.85, -90.19, 143.61, 16.68, 7.43, 18.88, -90.81, -20.29, 54.79, 82.63, 22.94, 77.81, -16.39, -13.2, 9.34, -40.39, -26.62, 5.33, -60.91, 9.09, -59.37, 7.08, 58.64, 5.55, 20.52, 2.5, -17.25, -6.8, 22.21, 30.15, -7.52, -37.46, 5.67, 22.58, 9.03, 47.05, 17.61, 37.31, -98.13, -14.61, -4.8, -6.36, 44.69, 23.34, 8.37, -13.52, 80.05, -34.24, -16.36, -12.31, 1.92, -33.62, -14.1, -49.23, -7.39, 11.5, -9.98, 9.66, 29.6 ] ); // Same as before but with stride. let res = t.conv2d(&w, 0, 2, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 277.16f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 2)?, [ [ [9.29, -7.03, 0.94, 3.49, -7.71], [-1.8, -7.82, 8.9, 8.46, 7.43], [-25.84, 22.09, -19.27, -0.22, 1.69], [4.02, 18.53, -18.37, 2.3, -24.51], [7.72, -9.68, -12.34, 5.6, -20.22] ], [ [21.73, 3.39, -18.27, 3.86, -3.65], [8.25, 3.73, 30.73, -8.61, -11.93], [-72.15, -15.36, -17.53, -12.32, -1.61], [-22.32, -7.79, -91.82, 6.44, -37.69], [52.88, 14.44, 42.75, 9.88, 2.01] ], [ [-8.98, 9.91, 6.75, -4.68, 15.38], [4.93, -0.33, 9.94, -1.46, 14.78], [13.62, -30.63, 3.96, -3.58, -4.48], [-14.13, 1.19, -34.43, 3.08, -33.83], [17.28, 12.94, 31.83, -3.35, 6.81] ], [ [23.54, 6.98, -24.52, 0.52, 4.87], [9.65, 6.18, 1.71, -25.23, -4.93], [-54.99, -23.66, 3.19, -3.73, 18.58], [-21.35, -10.39, -39.88, 28.73, -30.76], [-9.13, 11.12, -14.0, -8.23, -11.25] ] ] ); assert_eq!( test_utils::to_vec3_round(&grad_w.i(0)?, 2)?, [ [ [28.34, -7.91, -45.75], [21.03, 3.86, 29.86], [0.72, -36.58, -35.28] ], [ [-16.04, 11.53, -16.38], [29.62, -16.32, -48.35], [57.5, 28.29, 25.81] ], [ [2.93, -19.6, 1.57], [27.15, 53.88, -24.64], [12.74, -22.6, -26.2] ], [ [-0.18, -14.86, -6.82], [-19.55, -2.72, 45.9], [-2.54, 36.97, 27.11] ] ] ); // Replicate the issue from https://github.com/huggingface/candle/issues/1212 let res = t.i((.., .., 0..4, 0..4))?.conv2d(&w, 0, 2, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 21.12f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 2)?, [ [ [9.29, -7.03, 7.87, 0.0, 0.0], [-1.8, -7.82, 5.9, 0.0, 0.0], [-3.12, 4.49, 5.52, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [21.73, 3.39, 4.77, 0.0, 0.0], [8.25, 3.73, 27.61, 0.0, 0.0], [-20.55, -5.61, -2.77, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [-8.98, 9.91, -7.15, 0.0, 0.0], [4.93, -0.33, 4.56, 0.0, 0.0], [-6.7, -5.76, -8.05, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [23.54, 6.98, -10.0, 0.0, 0.0], [9.65, 6.18, 18.72, 0.0, 0.0], [3.29, -5.27, 0.79, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ] ] ); assert_eq!( test_utils::to_vec3_round(&grad_w.i(0)?, 2)?, [ [ [-3.47, 7.44, 0.66], [12.89, -3.4, -9.29], [-14.16, -0.83, 7.14] ], [ [-3.23, 5.37, -3.02], [-2.12, -11.24, 1.94], [6.97, 7.2, 2.99] ], [ [-4.04, -3.31, 4.87], [-6.68, -5.68, 1.73], [-5.54, 4.32, 0.52] ], [[-4.72, 1.5, 4.72], [3.79, 4.04, 6.76], [-4.6, 5.8, 6.93]] ] ); Ok(()) } test_device!(conv1d, conv1d_cpu, conv1d_gpu, conv1d_metal); test_device!( conv1d_small, conv1d_small_cpu, conv1d_small_gpu, conv1d_small_metal ); test_device!(conv2d, conv2d_cpu, conv2d_gpu, conv2d_metal); test_device!( conv2d_non_square, conv2d_non_square_cpu, conv2d_non_square_gpu, conv2d_non_square_metal ); test_device!( conv2d_small, conv2d_small_cpu, conv2d_small_gpu, conv2d_small_metal ); test_device!( conv2d_smaller, conv2d_smaller_cpu, conv2d_smaller_gpu, conv2d_smaller_metal ); test_device!( conv2d_grad, conv2d_grad_cpu, conv2d_grad_gpu, conv2_grad_metal );
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/npy.py
import numpy as np x = np.arange(10) # Write a npy file. np.save("test.npy", x) # Write multiple values to a npz file. values = { "x": x, "x_plus_one": x + 1 } np.savez("test.npz", **values)
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/serialization_tests.rs
use candle_core::{DType, Result, Tensor}; #[test] fn npy() -> Result<()> { let npy = Tensor::read_npy("tests/test.npy")?; assert_eq!( npy.to_dtype(DType::U8)?.to_vec1::<u8>()?, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ); Ok(()) } #[test] fn npz() -> Result<()> { let npz = Tensor::read_npz("tests/test.npz")?; assert_eq!(npz.len(), 2); assert_eq!(npz[0].0, "x"); assert_eq!(npz[1].0, "x_plus_one"); assert_eq!( npz[1].1.to_dtype(DType::U8)?.to_vec1::<u8>()?, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/indexing_tests.rs
use anyhow::Result; use candle_core::{Device, IndexOp, Tensor}; #[test] fn integer_index() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(1)?; assert_eq!(result.dims(), &[3]); assert_eq!(result.to_vec1::<u32>()?, &[3, 4, 5]); let result = tensor.i((.., 2))?; assert_eq!(result.dims(), &[2]); assert_eq!(result.to_vec1::<u32>()?, &[2, 5]); Ok(()) } #[test] fn range_index() -> Result<()> { let dev = Device::Cpu; // RangeFull let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Range let tensor = Tensor::arange(0u32, 4 * 3, &dev)?.reshape((4, 3))?; let result = tensor.i(1..3)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeFrom let result = tensor.i(2..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[6, 7, 8], [9, 10, 11]]); // RangeTo let result = tensor.i(..2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // RangeInclusive let result = tensor.i(1..=2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeTo let result = tensor.i(..1)?; assert_eq!(result.dims(), &[1, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2]]); // RangeToInclusive let result = tensor.i(..=1)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Empty range let result = tensor.i(1..1)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); // Similar to PyTorch, allow empty ranges when the computed length is negative. #[allow(clippy::reversed_empty_ranges)] let result = tensor.i(1..0)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); Ok(()) } #[test] fn index_3d() -> Result<()> { let tensor = Tensor::from_iter(0..24u32, &Device::Cpu)?.reshape((2, 3, 4))?; assert_eq!(tensor.i((0, 0, 0))?.to_scalar::<u32>()?, 0); assert_eq!(tensor.i((1, 0, 0))?.to_scalar::<u32>()?, 12); assert_eq!(tensor.i((0, 1, 0))?.to_scalar::<u32>()?, 4); assert_eq!(tensor.i((0, 1, 3))?.to_scalar::<u32>()?, 7); assert_eq!(tensor.i((0..2, 0, 0))?.to_vec1::<u32>()?, &[0, 12]); assert_eq!( tensor.i((0..2, .., 0))?.to_vec2::<u32>()?, &[[0, 4, 8], [12, 16, 20]] ); assert_eq!( tensor.i((..2, .., 3))?.to_vec2::<u32>()?, &[[3, 7, 11], [15, 19, 23]] ); assert_eq!(tensor.i((1, .., 3))?.to_vec1::<u32>()?, &[15, 19, 23]); Ok(()) } #[test] fn slice_assign() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 4 * 5, &dev)?.reshape((4, 5))?; let src = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((3, 2))?; let out = tensor.slice_assign(&[1..4, 3..5], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [5, 6, 7, 0, 1], [10, 11, 12, 2, 3], [15, 16, 17, 4, 5] ] ); let out = tensor.slice_assign(&[0..3, 0..2], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [2, 3, 7, 8, 9], [4, 5, 12, 13, 14], [15, 16, 17, 18, 19] ] ); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/cuda_basics.rs
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { let device = Device::new_cuda(0)?; let in_t = Tensor::rand(-1f32, 1f32, (1, 3, 12, 7), &device)?; let k_t = Tensor::rand(-1f32, 1f32, (6, 3, 1, 1), &device)?; let out_t = in_t.conv2d(&k_t, 0, 1, 1, 1)?; println!("{out_t}"); let in_t = in_t.to_device(&Device::Cpu)?; let k_t = k_t.to_device(&Device::Cpu)?; let out_t2 = in_t.conv2d(&k_t, 0, 1, 1, 1)?; let diff = (out_t.to_device(&Device::Cpu)? - out_t2)? .sqr()? .sum_all()?; println!("{diff}"); let t = Tensor::randn(0f32, 1f32, (2, 4, 96, 96), &device)?; let w = Tensor::randn(0f32, 1f32, (320, 4, 3, 3), &device)?; let res = t.conv2d(&w, 1, 1, 1, 1)?; println!("{res:?}"); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/tensor-tools.rs
use candle_core::quantized::{gguf_file, k_quants, QTensor}; use candle_core::{Device, Result, Tensor}; use clap::{Parser, Subcommand, ValueEnum}; use rayon::prelude::*; #[derive(ValueEnum, Debug, Clone)] enum QuantizationMode { /// The default quantization includes all 2d tensors, except the output tensor which always /// uses Q6_K. Llama, } impl QuantizationMode { fn quantize( &self, name: &str, tensor: QTensor, default: fn(&Tensor) -> Result<QTensor>, ) -> Result<QTensor> { match self { Self::Llama => { // Same behavior as the llama.cpp quantization. let should_quantize = name.ends_with(".weight") && tensor.rank() == 2; if should_quantize { let tensor = tensor.dequantize(&Device::Cpu)?; if name == "output.weight" { QTensor::quantize::<k_quants::BlockQ6K>(&tensor) } else { default(&tensor) } } else { Ok(tensor) } } } } } #[derive(ValueEnum, Debug, Clone)] enum Quantization { #[value(name = "q4_0")] Q4_0, #[value(name = "q4_1")] Q4_1, #[value(name = "q5_0")] Q5_0, #[value(name = "q5_1")] Q5_1, #[value(name = "q8_0")] Q8_0, #[value(name = "q8_1")] Q8_1, Q2k, Q3k, Q4k, Q5k, Q6k, Q8k, F16, F32, } #[derive(ValueEnum, Debug, Clone)] enum Format { Safetensors, Npz, Ggml, Gguf, Pth, Pickle, } impl Format { fn infer<P: AsRef<std::path::Path>>(p: P) -> Option<Self> { p.as_ref() .extension() .and_then(|e| e.to_str()) .and_then(|e| match e { // We don't infer any format for .bin as it can be used for ggml/gguf or pytorch. "safetensors" | "safetensor" => Some(Self::Safetensors), "npz" => Some(Self::Npz), "pth" | "pt" => Some(Self::Pth), "ggml" => Some(Self::Ggml), "gguf" => Some(Self::Gguf), _ => None, }) } } #[derive(Subcommand, Debug, Clone)] enum Command { Ls { files: Vec<std::path::PathBuf>, /// The file format to use, if unspecified infer from the file extension. #[arg(long, value_enum)] format: Option<Format>, /// Enable verbose mode. #[arg(short, long)] verbose: bool, }, Quantize { /// The input file, in gguf format. in_file: Vec<std::path::PathBuf>, /// The output file, in gguf format. #[arg(long)] out_file: std::path::PathBuf, /// The quantization schema to apply. #[arg(long, value_enum)] quantization: Quantization, /// Which tensor to quantize. #[arg(long, value_enum, default_value_t = QuantizationMode::Llama)] mode: QuantizationMode, }, } #[derive(Parser, Debug, Clone)] struct Args { #[command(subcommand)] command: Command, } fn run_ls(file: &std::path::PathBuf, format: Option<Format>, verbose: bool) -> Result<()> { let format = match format { Some(format) => format, None => match Format::infer(file) { Some(format) => format, None => { println!( "{file:?}: cannot infer format from file extension, use the --format flag" ); return Ok(()); } }, }; match format { Format::Npz => { let tensors = candle_core::npy::NpzTensors::new(file)?; let mut names = tensors.names(); names.sort(); for name in names { let shape_dtype = match tensors.get_shape_and_dtype(name) { Ok((shape, dtype)) => format!("[{shape:?}; {dtype:?}]"), Err(err) => err.to_string(), }; println!("{name}: {shape_dtype}") } } Format::Safetensors => { let tensors = unsafe { candle_core::safetensors::MmapedSafetensors::new(file)? }; let mut tensors = tensors.tensors(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, view) in tensors.iter() { let dtype = view.dtype(); let dtype = match candle_core::DType::try_from(dtype) { Ok(dtype) => format!("{dtype:?}"), Err(_) => format!("{dtype:?}"), }; let shape = view.shape(); println!("{name}: [{shape:?}; {dtype}]") } } Format::Pth => { let mut tensors = candle_core::pickle::read_pth_tensor_info(file, verbose)?; tensors.sort_by(|a, b| a.name.cmp(&b.name)); for tensor_info in tensors.iter() { println!( "{}: [{:?}; {:?}]", tensor_info.name, tensor_info.layout.shape(), tensor_info.dtype, ); if verbose { println!(" {:?}", tensor_info); } } } Format::Pickle => { let file = std::fs::File::open(file)?; let mut reader = std::io::BufReader::new(file); let mut stack = candle_core::pickle::Stack::empty(); stack.read_loop(&mut reader)?; for (i, obj) in stack.stack().iter().enumerate() { println!("{i} {obj:?}"); } } Format::Ggml => { let mut file = std::fs::File::open(file)?; let content = candle_core::quantized::ggml_file::Content::read(&mut file)?; let mut tensors = content.tensors.into_iter().collect::<Vec<_>>(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, qtensor) in tensors.iter() { println!("{name}: [{:?}; {:?}]", qtensor.shape(), qtensor.dtype()); } } Format::Gguf => { let mut file = std::fs::File::open(file)?; let content = gguf_file::Content::read(&mut file)?; if verbose { let mut metadata = content.metadata.into_iter().collect::<Vec<_>>(); metadata.sort_by(|a, b| a.0.cmp(&b.0)); println!("metadata entries ({})", metadata.len()); for (key, value) in metadata.iter() { println!(" {key}: {value:?}"); } } let mut tensors = content.tensor_infos.into_iter().collect::<Vec<_>>(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, info) in tensors.iter() { println!("{name}: [{:?}; {:?}]", info.shape, info.ggml_dtype); } } } Ok(()) } fn run_quantize_safetensors( in_files: &[std::path::PathBuf], out_file: std::path::PathBuf, q: Quantization, ) -> Result<()> { let mut out_file = std::fs::File::create(out_file)?; let mut tensors = std::collections::HashMap::new(); for in_file in in_files.iter() { let in_tensors = candle_core::safetensors::load(in_file, &Device::Cpu)?; tensors.extend(in_tensors) } println!("tensors: {}", tensors.len()); let quantize_fn = match q { Quantization::Q4_0 => QTensor::quantize::<k_quants::BlockQ4_0>, Quantization::Q4_1 => QTensor::quantize::<k_quants::BlockQ4_1>, Quantization::Q5_0 => QTensor::quantize::<k_quants::BlockQ5_0>, Quantization::Q5_1 => QTensor::quantize::<k_quants::BlockQ5_1>, Quantization::Q8_0 => QTensor::quantize::<k_quants::BlockQ8_0>, Quantization::Q8_1 => QTensor::quantize::<k_quants::BlockQ8_1>, Quantization::Q2k => QTensor::quantize::<k_quants::BlockQ2K>, Quantization::Q3k => QTensor::quantize::<k_quants::BlockQ3K>, Quantization::Q4k => QTensor::quantize::<k_quants::BlockQ4K>, Quantization::Q5k => QTensor::quantize::<k_quants::BlockQ5K>, Quantization::Q6k => QTensor::quantize::<k_quants::BlockQ6K>, Quantization::Q8k => QTensor::quantize::<k_quants::BlockQ8K>, Quantization::F16 => QTensor::quantize::<half::f16>, Quantization::F32 => QTensor::quantize::<f32>, }; let block_size = match q { Quantization::Q4_0 => k_quants::QK4_0, Quantization::Q4_1 => k_quants::QK4_1, Quantization::Q5_0 => k_quants::QK5_0, Quantization::Q5_1 => k_quants::QK5_1, Quantization::Q8_0 => k_quants::QK8_0, Quantization::Q8_1 => k_quants::QK8_1, Quantization::Q2k | Quantization::Q3k | Quantization::Q4k | Quantization::Q5k | Quantization::Q6k | Quantization::Q8k => k_quants::QK_K, Quantization::F16 | Quantization::F32 => 1, }; let qtensors = tensors .into_par_iter() .map(|(name, tensor)| { let should_quantize = tensor.rank() == 2 && tensor.dim(1)? % block_size == 0; println!(" quantizing {name} {tensor:?} {should_quantize}"); let tensor = if should_quantize { quantize_fn(&tensor)? } else { QTensor::quantize::<f32>(&tensor)? }; Ok((name, tensor)) }) .collect::<Result<Vec<_>>>()?; let qtensors = qtensors .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); gguf_file::write(&mut out_file, &[], &qtensors)?; Ok(()) } fn run_quantize( in_files: &[std::path::PathBuf], out_file: std::path::PathBuf, q: Quantization, qmode: QuantizationMode, ) -> Result<()> { if in_files.is_empty() { candle_core::bail!("no specified input files") } if let Some(extension) = out_file.extension() { if extension == "safetensors" { candle_core::bail!("the generated file cannot use the safetensors extension") } } if let Some(extension) = in_files[0].extension() { if extension == "safetensors" { return run_quantize_safetensors(in_files, out_file, q); } } if in_files.len() != 1 { candle_core::bail!("only a single in-file can be used when quantizing gguf files") } // Open the out file early so as to fail directly on missing directories etc. let mut out_file = std::fs::File::create(out_file)?; let mut in_ = std::fs::File::open(&in_files[0])?; let content = gguf_file::Content::read(&mut in_)?; println!("tensors: {}", content.tensor_infos.len()); let quantize_fn = match q { Quantization::Q4_0 => QTensor::quantize::<k_quants::BlockQ4_0>, Quantization::Q4_1 => QTensor::quantize::<k_quants::BlockQ4_1>, Quantization::Q5_0 => QTensor::quantize::<k_quants::BlockQ5_0>, Quantization::Q5_1 => QTensor::quantize::<k_quants::BlockQ5_1>, Quantization::Q8_0 => QTensor::quantize::<k_quants::BlockQ8_0>, Quantization::Q8_1 => QTensor::quantize::<k_quants::BlockQ8_1>, Quantization::Q2k => QTensor::quantize::<k_quants::BlockQ2K>, Quantization::Q3k => QTensor::quantize::<k_quants::BlockQ3K>, Quantization::Q4k => QTensor::quantize::<k_quants::BlockQ4K>, Quantization::Q5k => QTensor::quantize::<k_quants::BlockQ5K>, Quantization::Q6k => QTensor::quantize::<k_quants::BlockQ6K>, Quantization::Q8k => QTensor::quantize::<k_quants::BlockQ8K>, Quantization::F16 => QTensor::quantize::<half::f16>, Quantization::F32 => QTensor::quantize::<f32>, }; let qtensors = content .tensor_infos .par_iter() .map(|(name, _)| { println!(" quantizing {name}"); let mut in_file = std::fs::File::open(&in_files[0])?; let tensor = content.tensor(&mut in_file, name)?; let tensor = qmode.quantize(name, tensor, quantize_fn)?; Ok((name, tensor)) }) .collect::<Result<Vec<_>>>()?; let qtensors = qtensors .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); let metadata = content .metadata .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); gguf_file::write(&mut out_file, metadata.as_slice(), &qtensors)?; Ok(()) } fn main() -> anyhow::Result<()> { let args = Args::parse(); match args.command { Command::Ls { files, format, verbose, } => { let multiple_files = files.len() > 1; for file in files.iter() { if multiple_files { println!("--- {file:?} ---"); } run_ls(file, format.clone(), verbose)? } } Command::Quantize { in_file, out_file, quantization, mode, } => run_quantize(&in_file, out_file, quantization, mode)?, } Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/cuda_sum_benchmark.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::str::FromStr; use anyhow::Result; use candle_core::{Device, Tensor}; fn cos_sin(n: usize, device: &Device) -> Result<Tensor> { let thetas: Vec<_> = (0..n).map(|i| (i as f32 / n as f32)).collect(); let xs: Vec<_> = thetas.iter().map(|t| t.cos().abs()).collect(); let ys: Vec<_> = thetas.iter().map(|t| t.sin().abs()).collect(); let xs = Tensor::from_vec(xs, (n, 1), device)?; let ys = Tensor::from_vec(ys, (1, n), device)?; let ys = Tensor::cat(&[&ys, &ys, &ys, &ys, &ys, &ys], 1)?; Ok(xs.matmul(&ys)?) } fn main() -> Result<()> { let device = Device::new_cuda(0)?; let args = std::env::args().collect::<Vec<String>>(); let n = if args.len() < 2 { 2000usize } else { usize::from_str(&args[1])? }; let xys_cpu = cos_sin(n, &Device::Cpu)?; let xys = cos_sin(n, &device)?; println!("{xys_cpu:?} {xys:?}"); let sum_keepdim_cpu = xys_cpu.sum_keepdim(1)?; println!("{sum_keepdim_cpu}"); let sum_keepdim = xys.sum_keepdim(1)?; println!("{sum_keepdim}"); let start = std::time::Instant::now(); let n_iters = 100; let mut v = 0f32; for _i in 0..n_iters { let sum_keepdim = xys.sum_keepdim(1)?; let sum_keepdim = sum_keepdim.sum_keepdim(0)?; let sum_keepdim: f32 = sum_keepdim.reshape(&[])?.to_scalar()?; v += sum_keepdim; } let elapsed = start.elapsed(); if v > 0. { println!( "ran {n_iters} iterations, time per iter: {:?} ({v})", elapsed.div_f64(n_iters as f64) ); } Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/basics.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { let a = Tensor::new(&[[0.0f32, 1.0, 2.0], [3.0, 4.0, 5.0]], &Device::Cpu)?; let b = Tensor::new(&[[88.0f32, 99.0]], &Device::Cpu)?; let new_a = a.slice_scatter(&b, 1, 2)?; assert_eq!(a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); assert_eq!(new_a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/display.rs
/// Pretty printing of tensors /// This implementation should be in line with the PyTorch version. /// https://github.com/pytorch/pytorch/blob/7b419e8513a024e172eae767e24ec1b849976b13/torch/_tensor_str.py use crate::{DType, Result, Tensor, WithDType}; use half::{bf16, f16}; impl Tensor { fn fmt_dt<T: WithDType + std::fmt::Display>( &self, f: &mut std::fmt::Formatter, ) -> std::fmt::Result { let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { format!(", cuda:{}", gpu_id) } crate::DeviceLocation::Metal { gpu_id } => { format!(", metal:{}", gpu_id) } }; write!(f, "Tensor[")?; match self.dims() { [] => { if let Ok(v) = self.to_scalar::<T>() { write!(f, "{v}")? } } [s] if *s < 10 => { if let Ok(vs) = self.to_vec1::<T>() { for (i, v) in vs.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{v}")?; } } } dims => { write!(f, "dims ")?; for (i, d) in dims.iter().enumerate() { if i > 0 { write!(f, ", ")?; } write!(f, "{d}")?; } } } write!(f, "; {}{}]", self.dtype().as_str(), device_str) } } impl std::fmt::Debug for Tensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { match self.dtype() { DType::U8 => self.fmt_dt::<u8>(f), DType::U32 => self.fmt_dt::<u32>(f), DType::I64 => self.fmt_dt::<i64>(f), DType::BF16 => self.fmt_dt::<bf16>(f), DType::F16 => self.fmt_dt::<f16>(f), DType::F32 => self.fmt_dt::<f32>(f), DType::F64 => self.fmt_dt::<f64>(f), } } } /// Options for Tensor pretty printing pub struct PrinterOptions { precision: usize, threshold: usize, edge_items: usize, line_width: usize, sci_mode: Option<bool>, } static PRINT_OPTS: std::sync::Mutex<PrinterOptions> = std::sync::Mutex::new(PrinterOptions::const_default()); impl PrinterOptions { // We cannot use the default trait as it's not const. const fn const_default() -> Self { Self { precision: 4, threshold: 1000, edge_items: 3, line_width: 80, sci_mode: None, } } } pub fn set_print_options(options: PrinterOptions) { *PRINT_OPTS.lock().unwrap() = options } pub fn set_print_options_default() { *PRINT_OPTS.lock().unwrap() = PrinterOptions::const_default() } pub fn set_print_options_short() { *PRINT_OPTS.lock().unwrap() = PrinterOptions { precision: 2, threshold: 1000, edge_items: 2, line_width: 80, sci_mode: None, } } pub fn set_print_options_full() { *PRINT_OPTS.lock().unwrap() = PrinterOptions { precision: 4, threshold: usize::MAX, edge_items: 3, line_width: 80, sci_mode: None, } } struct FmtSize { current_size: usize, } impl FmtSize { fn new() -> Self { Self { current_size: 0 } } fn final_size(self) -> usize { self.current_size } } impl std::fmt::Write for FmtSize { fn write_str(&mut self, s: &str) -> std::fmt::Result { self.current_size += s.len(); Ok(()) } } trait TensorFormatter { type Elem: WithDType; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result; fn max_width(&self, to_display: &Tensor) -> usize { let mut max_width = 1; if let Ok(vs) = to_display.flatten_all().and_then(|t| t.to_vec1()) { for &v in vs.iter() { let mut fmt_size = FmtSize::new(); let _res = self.fmt(v, 1, &mut fmt_size); max_width = usize::max(max_width, fmt_size.final_size()) } } max_width } fn write_newline_indent(i: usize, f: &mut std::fmt::Formatter) -> std::fmt::Result { writeln!(f)?; for _ in 0..i { write!(f, " ")? } Ok(()) } fn fmt_tensor( &self, t: &Tensor, indent: usize, max_w: usize, summarize: bool, po: &PrinterOptions, f: &mut std::fmt::Formatter, ) -> std::fmt::Result { let dims = t.dims(); let edge_items = po.edge_items; write!(f, "[")?; match dims { [] => { if let Ok(v) = t.to_scalar::<Self::Elem>() { self.fmt(v, max_w, f)? } } [v] if summarize && *v > 2 * edge_items => { if let Ok(vs) = t .narrow(0, 0, edge_items) .and_then(|t| t.to_vec1::<Self::Elem>()) { for v in vs.into_iter() { self.fmt(v, max_w, f)?; write!(f, ", ")?; } } write!(f, "...")?; if let Ok(vs) = t .narrow(0, v - edge_items, edge_items) .and_then(|t| t.to_vec1::<Self::Elem>()) { for v in vs.into_iter() { write!(f, ", ")?; self.fmt(v, max_w, f)?; } } } [_] => { let elements_per_line = usize::max(1, po.line_width / (max_w + 2)); if let Ok(vs) = t.to_vec1::<Self::Elem>() { for (i, v) in vs.into_iter().enumerate() { if i > 0 { if i % elements_per_line == 0 { write!(f, ",")?; Self::write_newline_indent(indent, f)? } else { write!(f, ", ")?; } } self.fmt(v, max_w, f)? } } } _ => { if summarize && dims[0] > 2 * edge_items { for i in 0..edge_items { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } write!(f, ",")?; Self::write_newline_indent(indent, f)? } write!(f, "...")?; Self::write_newline_indent(indent, f)?; for i in dims[0] - edge_items..dims[0] { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } if i + 1 != dims[0] { write!(f, ",")?; Self::write_newline_indent(indent, f)? } } } else { for i in 0..dims[0] { match t.get(i) { Ok(t) => self.fmt_tensor(&t, indent + 1, max_w, summarize, po, f)?, Err(e) => write!(f, "{e:?}")?, } if i + 1 != dims[0] { write!(f, ",")?; Self::write_newline_indent(indent, f)? } } } } } write!(f, "]")?; Ok(()) } } struct FloatFormatter<S: WithDType> { int_mode: bool, sci_mode: bool, precision: usize, _phantom: std::marker::PhantomData<S>, } impl<S> FloatFormatter<S> where S: WithDType + num_traits::Float + std::fmt::Display, { fn new(t: &Tensor, po: &PrinterOptions) -> Result<Self> { let mut int_mode = true; let mut sci_mode = false; // Rather than containing all values, this should only include // values that end up being displayed according to [threshold]. let values = t .flatten_all()? .to_vec1()? .into_iter() .filter(|v: &S| v.is_finite() && !v.is_zero()) .collect::<Vec<_>>(); if !values.is_empty() { let mut nonzero_finite_min = S::max_value(); let mut nonzero_finite_max = S::min_value(); for &v in values.iter() { let v = v.abs(); if v < nonzero_finite_min { nonzero_finite_min = v } if v > nonzero_finite_max { nonzero_finite_max = v } } for &value in values.iter() { if value.ceil() != value { int_mode = false; break; } } if let Some(v1) = S::from(1000.) { if let Some(v2) = S::from(1e8) { if let Some(v3) = S::from(1e-4) { sci_mode = nonzero_finite_max / nonzero_finite_min > v1 || nonzero_finite_max > v2 || nonzero_finite_min < v3 } } } } match po.sci_mode { None => {} Some(v) => sci_mode = v, } Ok(Self { int_mode, sci_mode, precision: po.precision, _phantom: std::marker::PhantomData, }) } } impl<S> TensorFormatter for FloatFormatter<S> where S: WithDType + num_traits::Float + std::fmt::Display + std::fmt::LowerExp, { type Elem = S; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result { if self.sci_mode { write!( f, "{v:width$.prec$e}", v = v, width = max_w, prec = self.precision ) } else if self.int_mode { if v.is_finite() { write!(f, "{v:width$.0}.", v = v, width = max_w - 1) } else { write!(f, "{v:max_w$.0}") } } else { write!( f, "{v:width$.prec$}", v = v, width = max_w, prec = self.precision ) } } } struct IntFormatter<S: WithDType> { _phantom: std::marker::PhantomData<S>, } impl<S: WithDType> IntFormatter<S> { fn new() -> Self { Self { _phantom: std::marker::PhantomData, } } } impl<S> TensorFormatter for IntFormatter<S> where S: WithDType + std::fmt::Display, { type Elem = S; fn fmt<T: std::fmt::Write>(&self, v: Self::Elem, max_w: usize, f: &mut T) -> std::fmt::Result { write!(f, "{v:max_w$}") } } fn get_summarized_data(t: &Tensor, edge_items: usize) -> Result<Tensor> { let dims = t.dims(); if dims.is_empty() { Ok(t.clone()) } else if dims.len() == 1 { if dims[0] > 2 * edge_items { Tensor::cat( &[ t.narrow(0, 0, edge_items)?, t.narrow(0, dims[0] - edge_items, edge_items)?, ], 0, ) } else { Ok(t.clone()) } } else if dims[0] > 2 * edge_items { let mut vs: Vec<_> = (0..edge_items) .map(|i| get_summarized_data(&t.get(i)?, edge_items)) .collect::<Result<Vec<_>>>()?; for i in (dims[0] - edge_items)..dims[0] { vs.push(get_summarized_data(&t.get(i)?, edge_items)?) } Tensor::cat(&vs, 0) } else { let vs: Vec<_> = (0..dims[0]) .map(|i| get_summarized_data(&t.get(i)?, edge_items)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&vs, 0) } } impl std::fmt::Display for Tensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { let po = PRINT_OPTS.lock().unwrap(); let summarize = self.elem_count() > po.threshold; let to_display = if summarize { match get_summarized_data(self, po.edge_items) { Ok(v) => v, Err(err) => return write!(f, "{err:?}"), } } else { self.clone() }; match self.dtype() { DType::U8 => { let tf: IntFormatter<u8> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::U32 => { let tf: IntFormatter<u32> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::I64 => { let tf: IntFormatter<i64> = IntFormatter::new(); let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } DType::BF16 => { if let Ok(tf) = FloatFormatter::<bf16>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F16 => { if let Ok(tf) = FloatFormatter::<f16>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F64 => { if let Ok(tf) = FloatFormatter::<f64>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } DType::F32 => { if let Ok(tf) = FloatFormatter::<f32>::new(&to_display, &po) { let max_w = tf.max_width(&to_display); tf.fmt_tensor(self, 1, max_w, summarize, &po, f)?; writeln!(f)?; } } }; let device_str = match self.device().location() { crate::DeviceLocation::Cpu => "".to_owned(), crate::DeviceLocation::Cuda { gpu_id } => { format!(", cuda:{}", gpu_id) } crate::DeviceLocation::Metal { gpu_id } => { format!(", metal:{}", gpu_id) } }; write!( f, "Tensor[{:?}, {}{}]", self.dims(), self.dtype().as_str(), device_str ) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/layout.rs
use crate::{Error, Result, Shape}; #[derive(Debug, PartialEq, Eq, Clone)] pub struct Layout { shape: Shape, // The strides are given in number of elements and not in bytes. stride: Vec<usize>, start_offset: usize, } impl Layout { pub fn new(shape: Shape, stride: Vec<usize>, start_offset: usize) -> Self { Self { shape, stride, start_offset, } } pub fn contiguous_with_offset<S: Into<Shape>>(shape: S, start_offset: usize) -> Self { let shape = shape.into(); let stride = shape.stride_contiguous(); Self { shape, stride, start_offset, } } pub fn contiguous<S: Into<Shape>>(shape: S) -> Self { Self::contiguous_with_offset(shape, 0) } pub fn dims(&self) -> &[usize] { self.shape.dims() } pub fn shape(&self) -> &Shape { &self.shape } pub fn stride(&self) -> &[usize] { &self.stride } pub fn start_offset(&self) -> usize { self.start_offset } /// Returns the appropriate start and stop offset if the data is stored in a C /// contiguous (aka row major) way. pub fn contiguous_offsets(&self) -> Option<(usize, usize)> { if self.is_contiguous() { let start_o = self.start_offset; Some((start_o, start_o + self.shape.elem_count())) } else { None } } /// Returns true if the data is stored in a C contiguous (aka row major) way. /// Note that this does not implies that the start offset is 0 or that there are no extra /// elements at the end of the storage. pub fn is_contiguous(&self) -> bool { self.shape.is_contiguous(&self.stride) } /// Returns true if the data is stored in a Fortran contiguous (aka column major) way. pub fn is_fortran_contiguous(&self) -> bool { self.shape.is_fortran_contiguous(&self.stride) } pub(crate) fn narrow(&self, dim: usize, start: usize, len: usize) -> Result<Self> { let dims = self.shape().dims(); if dim >= dims.len() { Err(Error::DimOutOfRange { shape: self.shape().clone(), dim: dim as i32, op: "narrow", } .bt())? } if start + len > dims[dim] { Err(Error::NarrowInvalidArgs { shape: self.shape.clone(), dim, start, len, msg: "start + len > dim_len", } .bt())? } let mut dims = dims.to_vec(); dims[dim] = len; Ok(Self { shape: Shape::from(dims), stride: self.stride.clone(), start_offset: self.start_offset + self.stride[dim] * start, }) } pub(crate) fn transpose(&self, dim1: usize, dim2: usize) -> Result<Self> { let rank = self.shape.rank(); if rank <= dim1 || rank <= dim2 { Err(Error::UnexpectedNumberOfDims { expected: usize::max(dim1, dim2), got: rank, shape: self.shape().clone(), } .bt())? } let mut stride = self.stride().to_vec(); let mut dims = self.shape().dims().to_vec(); dims.swap(dim1, dim2); stride.swap(dim1, dim2); Ok(Self { shape: Shape::from(dims), stride, start_offset: self.start_offset, }) } pub(crate) fn permute(&self, idxs: &[usize]) -> Result<Self> { let is_permutation = idxs.len() == self.shape.rank() && (0..idxs.len()).all(|i| idxs.contains(&i)); if !is_permutation { crate::bail!( "dimension mismatch in permute, tensor {:?}, dims: {:?}", self.dims(), idxs ) } let stride = self.stride(); let dims = self.shape().dims(); let mut perm_stride = stride.to_vec(); let mut perm_dims = dims.to_vec(); for (i, &idx) in idxs.iter().enumerate() { perm_stride[i] = stride[idx]; perm_dims[i] = dims[idx]; } Ok(Self { shape: Shape::from(perm_dims), stride: perm_stride, start_offset: self.start_offset, }) } pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> { let shape = shape.into(); if shape.rank() < self.shape().rank() { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } let added_dims = shape.rank() - self.shape().rank(); let mut stride = vec![0; added_dims]; for (&dst_dim, (&src_dim, &src_stride)) in shape.dims()[added_dims..] .iter() .zip(self.dims().iter().zip(self.stride())) { let s = if dst_dim == src_dim { src_stride } else if src_dim != 1 { return Err(Error::BroadcastIncompatibleShapes { src_shape: self.shape().clone(), dst_shape: shape, } .bt()); } else { 0 }; stride.push(s) } Ok(Self { shape, stride, start_offset: self.start_offset, }) } pub(crate) fn strided_index(&self) -> crate::StridedIndex { crate::StridedIndex::from_layout(self) } pub(crate) fn strided_blocks(&self) -> crate::StridedBlocks { let mut block_len = 1; let mut contiguous_dims = 0; // These are counted from the right. for (&stride, &dim) in self.stride().iter().zip(self.dims().iter()).rev() { if stride != block_len { break; } block_len *= dim; contiguous_dims += 1; } let index_dims = self.dims().len() - contiguous_dims; if index_dims == 0 { crate::StridedBlocks::SingleBlock { start_offset: self.start_offset, len: block_len, } } else { let block_start_index = crate::StridedIndex::new( &self.dims()[..index_dims], &self.stride[..index_dims], self.start_offset, ); crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } } } // Returns the contiguous offsets with broadcast if applicable. pub(crate) fn offsets_b(&self) -> Option<ContiguousOffsetsWithBroadcast> { let mut left_broadcast = 1; let mut right_broadcast = 1; let strides = self.stride(); let dims = self.dims(); let mut start_cont = 0; let mut end_cont = dims.len(); for (&s, &d) in strides.iter().zip(dims.iter()) { if s != 0 { break; } start_cont += 1; left_broadcast *= d; } if start_cont == dims.len() { return Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len: 1, left_broadcast, right_broadcast: 1, }); } for (&s, &d) in strides.iter().zip(dims.iter()).rev() { if s != 0 { break; } end_cont -= 1; right_broadcast *= d; } // Check that the inner dims are contiguous let strides = &strides[start_cont..end_cont]; let dims = &dims[start_cont..end_cont]; let mut len = 1; for (&stride, &dim) in strides.iter().zip(dims.iter()).rev() { if stride != len { return None; } len *= dim; } Some(ContiguousOffsetsWithBroadcast { start: self.start_offset, len, left_broadcast, right_broadcast, }) } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ContiguousOffsetsWithBroadcast { pub start: usize, pub len: usize, pub left_broadcast: usize, pub right_broadcast: usize, }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/mkl.rs
#![allow(dead_code)] use libc::{c_char, c_double, c_float, c_int}; mod ffi { use super::*; extern "C" { pub fn vsTanh(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdTanh(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsExp(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdExp(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsLn(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdLn(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsSin(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdSin(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsCos(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdCos(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsSqrt(n: c_int, a: *const c_float, y: *mut c_float); pub fn vdSqrt(n: c_int, a: *const c_double, y: *mut c_double); pub fn vsAdd(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdAdd(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsSub(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdSub(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsMul(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdMul(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsDiv(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdDiv(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsFmax(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdFmax(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn vsFmin(n: c_int, a: *const c_float, b: *const c_float, y: *mut c_float); pub fn vdFmin(n: c_int, a: *const c_double, b: *const c_double, y: *mut c_double); pub fn sgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_float, a: *const c_float, lda: *const c_int, b: *const c_float, ldb: *const c_int, beta: *const c_float, c: *mut c_float, ldc: *const c_int, ); pub fn dgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_double, a: *const c_double, lda: *const c_int, b: *const c_double, ldb: *const c_int, beta: *const c_double, c: *mut c_double, ldc: *const c_int, ); pub fn hgemm_( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const half::f16, a: *const half::f16, lda: *const c_int, b: *const half::f16, ldb: *const c_int, beta: *const half::f16, c: *mut half::f16, ldc: *const c_int, ); } } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn sgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f32, a: &[f32], lda: i32, b: &[f32], ldb: i32, beta: f32, c: &mut [f32], ldc: i32, ) { ffi::sgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn dgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f64, a: &[f64], lda: i32, b: &[f64], ldb: i32, beta: f64, c: &mut [f64], ldc: i32, ) { ffi::dgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn hgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: half::f16, a: &[half::f16], lda: i32, b: &[half::f16], ldb: i32, beta: half::f16, c: &mut [half::f16], ldc: i32, ) { ffi::hgemm_( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[inline] pub fn vs_exp(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_exp(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdExp(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_ln(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_ln(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdLn(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sin(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sin(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdSin(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_cos(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_cos(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdCos(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sqrt(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sqrt(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdSqrt(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_sqr(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_sqr(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdMul(a_len as i32, a.as_ptr(), a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_tanh(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vsTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_tanh(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vdTanh(a_len as i32, a.as_ptr(), y.as_mut_ptr()) } } // The vector functions from mkl can be performed in place by using the same array for input and // output. // https://www.intel.com/content/www/us/en/docs/onemkl/developer-reference-c/2023-2/vector-mathematical-functions.html #[inline] pub fn vs_tanh_inplace(y: &mut [f32]) { unsafe { ffi::vsTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vd_tanh_inplace(y: &mut [f64]) { unsafe { ffi::vdTanh(y.len() as i32, y.as_ptr(), y.as_mut_ptr()) } } #[inline] pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vs_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } #[inline] pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vd_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } macro_rules! binary_op { ($fn_name:ident, $ty:ty, $mkl_name:ident) => { #[inline] pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) { let a_len = a.len(); let b_len = b.len(); let y_len = y.len(); if a_len != y_len || b_len != y_len { panic!( "{} a,b,y len mismatch {a_len} {b_len} {y_len}", stringify!($fn_name) ); } unsafe { ffi::$mkl_name(a_len as i32, a.as_ptr(), b.as_ptr(), y.as_mut_ptr()) } } }; } binary_op!(vs_add, f32, vsAdd); binary_op!(vd_add, f64, vdAdd); binary_op!(vs_sub, f32, vsSub); binary_op!(vd_sub, f64, vdSub); binary_op!(vs_mul, f32, vsMul); binary_op!(vd_mul, f64, vdMul); binary_op!(vs_div, f32, vsDiv); binary_op!(vd_div, f64, vdDiv); binary_op!(vs_max, f32, vsFmax); binary_op!(vd_max, f64, vdFmax); binary_op!(vs_min, f32, vsFmin); binary_op!(vd_min, f64, vdFmin);
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/dtype.rs
//! Types for elements that can be stored and manipulated using tensors. #![allow(clippy::redundant_closure_call)] use crate::backend::BackendStorage; use crate::{CpuStorage, Error, Result}; /// The different types of elements allowed in tensors. #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum DType { // Unsigned 8 bits integer. U8, // Unsigned 32 bits integer. U32, // Signed 64 bits integer. I64, // Brain floating-point using half precision (16 bits). BF16, // Floating-point using half precision (16 bits). F16, // Floating-point using single precision (32 bits). F32, // Floating-point using double precision (64 bits). F64, } #[derive(Debug, PartialEq, Eq)] pub struct DTypeParseError; impl std::str::FromStr for DType { type Err = DTypeParseError; fn from_str(s: &str) -> std::result::Result<Self, Self::Err> { match s { "u8" => Ok(Self::U8), "u32" => Ok(Self::U32), "i64" => Ok(Self::I64), "bf16" => Ok(Self::BF16), "f16" => Ok(Self::F16), "f32" => Ok(Self::F32), "f64" => Ok(Self::F64), _ => Err(DTypeParseError), } } } impl DType { /// String representation for dtypes. pub fn as_str(&self) -> &'static str { match self { Self::U8 => "u8", Self::U32 => "u32", Self::I64 => "i64", Self::BF16 => "bf16", Self::F16 => "f16", Self::F32 => "f32", Self::F64 => "f64", } } /// The size used by each element in bytes, i.e. 1 for `U8`, 4 for `F32`. pub fn size_in_bytes(&self) -> usize { match self { Self::U8 => 1, Self::U32 => 4, Self::I64 => 8, Self::BF16 => 2, Self::F16 => 2, Self::F32 => 4, Self::F64 => 8, } } pub fn is_int(&self) -> bool { match self { Self::U8 | Self::U32 | Self::I64 => true, Self::BF16 | Self::F16 | Self::F32 | Self::F64 => false, } } pub fn is_float(&self) -> bool { match self { Self::U8 | Self::U32 | Self::I64 => false, Self::BF16 | Self::F16 | Self::F32 | Self::F64 => true, } } } pub trait WithDType: Sized + Copy + num_traits::NumAssign + std::cmp::PartialOrd + std::fmt::Display + 'static + Send + Sync + crate::cpu::kernels::VecOps { const DTYPE: DType; fn from_f64(v: f64) -> Self; fn to_f64(self) -> f64; fn to_cpu_storage_owned(data: Vec<Self>) -> CpuStorage; fn to_cpu_storage(data: &[Self]) -> CpuStorage { Self::to_cpu_storage_owned(data.to_vec()) } fn cpu_storage_as_slice(s: &CpuStorage) -> Result<&[Self]>; fn cpu_storage_data(s: CpuStorage) -> Result<Vec<Self>>; } macro_rules! with_dtype { ($ty:ty, $dtype:ident, $from_f64:expr, $to_f64:expr) => { impl WithDType for $ty { const DTYPE: DType = DType::$dtype; fn from_f64(v: f64) -> Self { $from_f64(v) } fn to_f64(self) -> f64 { $to_f64(self) } fn to_cpu_storage_owned(data: Vec<Self>) -> CpuStorage { CpuStorage::$dtype(data) } fn cpu_storage_data(s: CpuStorage) -> Result<Vec<Self>> { match s { CpuStorage::$dtype(data) => Ok(data), _ => Err(Error::UnexpectedDType { expected: DType::$dtype, got: s.dtype(), msg: "unexpected dtype", } .bt()), } } fn cpu_storage_as_slice(s: &CpuStorage) -> Result<&[Self]> { match s { CpuStorage::$dtype(data) => Ok(data), _ => Err(Error::UnexpectedDType { expected: DType::$dtype, got: s.dtype(), msg: "unexpected dtype", } .bt()), } } } }; } use half::{bf16, f16}; with_dtype!(u8, U8, |v: f64| v as u8, |v: u8| v as f64); with_dtype!(u32, U32, |v: f64| v as u32, |v: u32| v as f64); with_dtype!(i64, I64, |v: f64| v as i64, |v: i64| v as f64); with_dtype!(f16, F16, f16::from_f64, f16::to_f64); with_dtype!(bf16, BF16, bf16::from_f64, bf16::to_f64); with_dtype!(f32, F32, |v: f64| v as f32, |v: f32| v as f64); with_dtype!(f64, F64, |v: f64| v, |v: f64| v); pub trait IntDType: WithDType { fn is_true(&self) -> bool; fn as_usize(&self) -> usize; } impl IntDType for i64 { fn is_true(&self) -> bool { *self != 0 } fn as_usize(&self) -> usize { *self as usize } } impl IntDType for u32 { fn is_true(&self) -> bool { *self != 0 } fn as_usize(&self) -> usize { *self as usize } } impl IntDType for u8 { fn is_true(&self) -> bool { *self != 0 } fn as_usize(&self) -> usize { *self as usize } } pub trait FloatDType: WithDType {} impl FloatDType for f16 {} impl FloatDType for bf16 {} impl FloatDType for f32 {} impl FloatDType for f64 {}
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/device.rs
use crate::backend::BackendDevice; use crate::cpu_backend::CpuDevice; use crate::{CpuStorage, DType, Result, Shape, Storage, WithDType}; /// A `DeviceLocation` represents a physical device whereas multiple `Device` /// can live on the same location (typically for cuda devices). #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum DeviceLocation { Cpu, Cuda { gpu_id: usize }, Metal { gpu_id: usize }, } #[derive(Debug, Clone)] pub enum Device { Cpu, Cuda(crate::CudaDevice), Metal(crate::MetalDevice), } pub trait NdArray { fn shape(&self) -> Result<Shape>; fn to_cpu_storage(&self) -> CpuStorage; } impl<S: WithDType> NdArray for S { fn shape(&self) -> Result<Shape> { Ok(Shape::from(())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(&[*self]) } } impl<S: WithDType, const N: usize> NdArray for &[S; N] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self.as_slice()) } } impl<S: WithDType> NdArray for &[S] { fn shape(&self) -> Result<Shape> { Ok(Shape::from(self.len())) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage(self) } } impl<S: WithDType, const N: usize, const M: usize> NdArray for &[[S; N]; M] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((M, N))) } fn to_cpu_storage(&self) -> CpuStorage { S::to_cpu_storage_owned(self.concat()) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize> NdArray for &[[[S; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3); for i1 in 0..N1 { for i2 in 0..N2 { vec.extend(self[i1][i2]) } } S::to_cpu_storage_owned(vec) } } impl<S: WithDType, const N1: usize, const N2: usize, const N3: usize, const N4: usize> NdArray for &[[[[S; N4]; N3]; N2]; N1] { fn shape(&self) -> Result<Shape> { Ok(Shape::from((N1, N2, N3, N4))) } fn to_cpu_storage(&self) -> CpuStorage { let mut vec = Vec::with_capacity(N1 * N2 * N3 * N4); for i1 in 0..N1 { for i2 in 0..N2 { for i3 in 0..N3 { vec.extend(self[i1][i2][i3]) } } } S::to_cpu_storage_owned(vec) } } impl<S: NdArray> NdArray for Vec<S> { fn shape(&self) -> Result<Shape> { if self.is_empty() { crate::bail!("empty array") } let shape0 = self[0].shape()?; let n = self.len(); for v in self.iter() { let shape = v.shape()?; if shape != shape0 { crate::bail!("two elements have different shapes {shape:?} {shape0:?}") } } Ok(Shape::from([[n].as_slice(), shape0.dims()].concat())) } fn to_cpu_storage(&self) -> CpuStorage { // This allocates intermediary memory and shouldn't be necessary. let storages = self.iter().map(|v| v.to_cpu_storage()).collect::<Vec<_>>(); CpuStorage::concat(storages.as_slice()).unwrap() } } impl Device { pub fn new_cuda(ordinal: usize) -> Result<Self> { Ok(Self::Cuda(crate::CudaDevice::new(ordinal)?)) } pub fn new_metal(ordinal: usize) -> Result<Self> { Ok(Self::Metal(crate::MetalDevice::new(ordinal)?)) } pub fn set_seed(&self, seed: u64) -> Result<()> { match self { Self::Cpu => CpuDevice.set_seed(seed), Self::Cuda(c) => c.set_seed(seed), Self::Metal(m) => m.set_seed(seed), } } pub fn same_device(&self, rhs: &Self) -> bool { match (self, rhs) { (Self::Cpu, Self::Cpu) => true, (Self::Cuda(lhs), Self::Cuda(rhs)) => lhs.same_device(rhs), (Self::Metal(lhs), Self::Metal(rhs)) => lhs.same_device(rhs), _ => false, } } pub fn location(&self) -> DeviceLocation { match self { Self::Cpu => DeviceLocation::Cpu, Self::Cuda(device) => device.location(), Device::Metal(device) => device.location(), } } pub fn is_cpu(&self) -> bool { matches!(self, Self::Cpu) } pub fn is_cuda(&self) -> bool { matches!(self, Self::Cuda(_)) } pub fn is_metal(&self) -> bool { matches!(self, Self::Metal(_)) } pub fn cuda_if_available(ordinal: usize) -> Result<Self> { if crate::utils::cuda_is_available() { Self::new_cuda(ordinal) } else { Ok(Self::Cpu) } } pub(crate) fn rand_uniform_f64( &self, lo: f64, up: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_uniform(shape, DType::F32, lo, up)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_uniform(shape, dtype, lo, up)?; Ok(Storage::Cuda(storage)) } } Device::Metal(_device) => { // let storage = device.rand_uniform(shape, dtype, lo, up)?; // Ok(Storage::Metal(storage)) crate::bail!("Metal rand_uniform not implemented") } } } pub(crate) fn rand_uniform<T: crate::FloatDType>( &self, lo: T, up: T, shape: &Shape, ) -> Result<Storage> { self.rand_uniform_f64(lo.to_f64(), up.to_f64(), shape, T::DTYPE) } pub(crate) fn rand_normal_f64( &self, mean: f64, std: f64, shape: &Shape, dtype: DType, ) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { // TODO: Remove the special case if we start supporting generating f16/bf16 directly. if dtype == DType::F16 || dtype == DType::BF16 { let storage = device.rand_normal(shape, DType::F32, mean, std)?; Storage::Cuda(storage).to_dtype(&crate::Layout::contiguous(shape), dtype) } else { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Cuda(storage)) } } Device::Metal(device) => { let storage = device.rand_normal(shape, dtype, mean, std)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn rand_normal<T: crate::FloatDType>( &self, mean: T, std: T, shape: &Shape, ) -> Result<Storage> { self.rand_normal_f64(mean.to_f64(), std.to_f64(), shape, T::DTYPE) } pub(crate) fn ones(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.ones_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.ones_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn zeros(&self, shape: &Shape, dtype: DType) -> Result<Storage> { match self { Device::Cpu => { let storage = CpuDevice.zeros_impl(shape, dtype)?; Ok(Storage::Cpu(storage)) } Device::Cuda(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = device.zeros_impl(shape, dtype)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage<A: NdArray>(&self, array: A) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(array.to_cpu_storage())), Device::Cuda(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage(&storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = array.to_cpu_storage(); let storage = device.storage_from_cpu_storage(&storage)?; Ok(Storage::Metal(storage)) } } } pub(crate) fn storage_owned<S: WithDType>(&self, data: Vec<S>) -> Result<Storage> { match self { Device::Cpu => Ok(Storage::Cpu(S::to_cpu_storage_owned(data))), Device::Cuda(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage(&storage)?; Ok(Storage::Cuda(storage)) } Device::Metal(device) => { let storage = S::to_cpu_storage_owned(data); let storage = device.storage_from_cpu_storage(&storage)?; Ok(Storage::Metal(storage)) } } } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/convert.rs
//! Implement conversion traits for tensors use crate::{DType, Device, Error, Tensor, WithDType}; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::convert::TryFrom; impl<T: WithDType> TryFrom<&Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec1::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec2::<T>() } } impl<T: WithDType> TryFrom<&Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_vec3::<T>() } } impl<T: WithDType> TryFrom<Tensor> for Vec<T> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<T>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<T>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<T>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<Tensor> for Vec<Vec<Vec<T>>> { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { Vec::<Vec<Vec<T>>>::try_from(&tensor) } } impl<T: WithDType> TryFrom<&[T]> for Tensor { type Error = Error; fn try_from(v: &[T]) -> Result<Self, Self::Error> { Tensor::from_slice(v, v.len(), &Device::Cpu) } } impl<T: WithDType> TryFrom<Vec<T>> for Tensor { type Error = Error; fn try_from(v: Vec<T>) -> Result<Self, Self::Error> { let len = v.len(); Tensor::from_vec(v, len, &Device::Cpu) } } macro_rules! from_tensor { ($typ:ident) => { impl TryFrom<&Tensor> for $typ { type Error = Error; fn try_from(tensor: &Tensor) -> Result<Self, Self::Error> { tensor.to_scalar::<$typ>() } } impl TryFrom<Tensor> for $typ { type Error = Error; fn try_from(tensor: Tensor) -> Result<Self, Self::Error> { $typ::try_from(&tensor) } } impl TryFrom<$typ> for Tensor { type Error = Error; fn try_from(v: $typ) -> Result<Self, Self::Error> { Tensor::new(v, &Device::Cpu) } } }; } from_tensor!(f64); from_tensor!(f32); from_tensor!(f16); from_tensor!(bf16); from_tensor!(i64); from_tensor!(u32); from_tensor!(u8); impl Tensor { pub fn write_bytes<W: std::io::Write>(&self, f: &mut W) -> crate::Result<()> { use byteorder::{LittleEndian, WriteBytesExt}; let vs = self.flatten_all()?; match self.dtype() { DType::BF16 => { let vs = vs.to_vec1::<bf16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F16 => { let vs = vs.to_vec1::<f16>()?; for &v in vs.reinterpret_cast() { f.write_u16::<LittleEndian>(v)? } } DType::F32 => { // TODO: Avoid using a buffer when data is already on the CPU. for v in vs.to_vec1::<f32>()? { f.write_f32::<LittleEndian>(v)? } } DType::F64 => { for v in vs.to_vec1::<f64>()? { f.write_f64::<LittleEndian>(v)? } } DType::U32 => { for v in vs.to_vec1::<u32>()? { f.write_u32::<LittleEndian>(v)? } } DType::I64 => { for v in vs.to_vec1::<i64>()? { f.write_i64::<LittleEndian>(v)? } } DType::U8 => { let vs = vs.to_vec1::<u8>()?; f.write_all(&vs)?; } } Ok(()) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/scalar.rs
use crate::{Result, Tensor, WithDType}; pub enum TensorScalar { Tensor(Tensor), Scalar(Tensor), } pub trait TensorOrScalar { fn to_tensor_scalar(self) -> Result<TensorScalar>; } impl TensorOrScalar for &Tensor { fn to_tensor_scalar(self) -> Result<TensorScalar> { Ok(TensorScalar::Tensor(self.clone())) } } impl<T: WithDType> TensorOrScalar for T { fn to_tensor_scalar(self) -> Result<TensorScalar> { let scalar = Tensor::new(self, &crate::Device::Cpu)?; Ok(TensorScalar::Scalar(scalar)) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/error.rs
use crate::{DType, DeviceLocation, Layout, MetalError, Shape}; #[derive(Debug, Clone)] pub struct MatMulUnexpectedStriding { pub lhs_l: Layout, pub rhs_l: Layout, pub bmnk: (usize, usize, usize, usize), pub msg: &'static str, } /// Main library error type. #[derive(thiserror::Error, Debug)] pub enum Error { // === DType Errors === #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedDType { msg: &'static str, expected: DType, got: DType, }, #[error("dtype mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")] DTypeMismatchBinaryOp { lhs: DType, rhs: DType, op: &'static str, }, #[error("unsupported dtype {0:?} for op {1}")] UnsupportedDTypeForOp(DType, &'static str), // === Dimension Index Errors === #[error("{op}: dimension index {dim} out of range for shape {shape:?}")] DimOutOfRange { shape: Shape, dim: i32, op: &'static str, }, #[error("{op}: duplicate dim index {dims:?} for shape {shape:?}")] DuplicateDimIndex { shape: Shape, dims: Vec<usize>, op: &'static str, }, // === Shape Errors === #[error("unexpected rank, expected: {expected}, got: {got} ({shape:?})")] UnexpectedNumberOfDims { expected: usize, got: usize, shape: Shape, }, #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedShape { msg: String, expected: Shape, got: Shape, }, #[error( "Shape mismatch, got buffer of size {buffer_size} which is compatible with shape {shape:?}" )] ShapeMismatch { buffer_size: usize, shape: Shape }, #[error("shape mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")] ShapeMismatchBinaryOp { lhs: Shape, rhs: Shape, op: &'static str, }, #[error("shape mismatch in cat for dim {dim}, shape for arg 1: {first_shape:?} shape for arg {n}: {nth_shape:?}")] ShapeMismatchCat { dim: usize, first_shape: Shape, n: usize, nth_shape: Shape, }, #[error("Cannot divide tensor of shape {shape:?} equally along dim {dim} into {n_parts}")] ShapeMismatchSplit { shape: Shape, dim: usize, n_parts: usize, }, #[error("{op} can only be performed on a single dimension")] OnlySingleDimension { op: &'static str, dims: Vec<usize> }, #[error("empty tensor for {op}")] EmptyTensor { op: &'static str }, // === Device Errors === #[error("device mismatch in {op}, lhs: {lhs:?}, rhs: {rhs:?}")] DeviceMismatchBinaryOp { lhs: DeviceLocation, rhs: DeviceLocation, op: &'static str, }, // === Op Specific Errors === #[error("narrow invalid args {msg}: {shape:?}, dim: {dim}, start: {start}, len:{len}")] NarrowInvalidArgs { shape: Shape, dim: usize, start: usize, len: usize, msg: &'static str, }, #[error("conv1d invalid args {msg}: inp: {inp_shape:?}, k: {k_shape:?}, pad: {padding}, stride: {stride}")] Conv1dInvalidArgs { inp_shape: Shape, k_shape: Shape, padding: usize, stride: usize, msg: &'static str, }, #[error("{op} invalid index {index} with dim size {size}")] InvalidIndex { op: &'static str, index: usize, size: usize, }, #[error("cannot broadcast {src_shape:?} to {dst_shape:?}")] BroadcastIncompatibleShapes { src_shape: Shape, dst_shape: Shape }, #[error("cannot set variable {msg}")] CannotSetVar { msg: &'static str }, // Box indirection to avoid large variant. #[error("{0:?}")] MatMulUnexpectedStriding(Box<MatMulUnexpectedStriding>), #[error("{op} only supports contiguous tensors")] RequiresContiguous { op: &'static str }, #[error("{op} expects at least one tensor")] OpRequiresAtLeastOneTensor { op: &'static str }, #[error("{op} expects at least two tensors")] OpRequiresAtLeastTwoTensors { op: &'static str }, #[error("backward is not supported for {op}")] BackwardNotSupported { op: &'static str }, // === Other Errors === #[error("the candle crate has not been built with cuda support")] NotCompiledWithCudaSupport, #[error("the candle crate has not been built with metal support")] NotCompiledWithMetalSupport, #[error("cannot find tensor {path}")] CannotFindTensor { path: String }, // === Wrapped Errors === #[error(transparent)] Cuda(Box<dyn std::error::Error + Send + Sync>), #[error("Metal error {0}")] Metal(#[from] MetalError), #[error(transparent)] TryFromIntError(#[from] core::num::TryFromIntError), #[error("npy/npz error {0}")] Npy(String), /// Zip file format error. #[error(transparent)] Zip(#[from] zip::result::ZipError), /// Integer parse error. #[error(transparent)] ParseInt(#[from] std::num::ParseIntError), /// I/O error. #[error(transparent)] Io(#[from] std::io::Error), /// SafeTensor error. #[error(transparent)] SafeTensor(#[from] safetensors::SafeTensorError), #[error("unsupported safetensor dtype {0:?}")] UnsupportedSafeTensorDtype(safetensors::Dtype), /// Arbitrary errors wrapping. #[error(transparent)] Wrapped(Box<dyn std::error::Error + Send + Sync>), /// Adding path information to an error. #[error("path: {path:?} {inner}")] WithPath { inner: Box<Self>, path: std::path::PathBuf, }, #[error("{inner}\n{backtrace}")] WithBacktrace { inner: Box<Self>, backtrace: Box<std::backtrace::Backtrace>, }, /// User generated error message, typically created via `bail!`. #[error("{0}")] Msg(String), } pub type Result<T> = std::result::Result<T, Error>; impl Error { pub fn wrap(err: impl std::error::Error + Send + Sync + 'static) -> Self { Self::Wrapped(Box::new(err)).bt() } pub fn msg(err: impl std::error::Error + Send + Sync + 'static) -> Self { Self::Msg(err.to_string()).bt() } pub fn bt(self) -> Self { let backtrace = std::backtrace::Backtrace::capture(); match backtrace.status() { std::backtrace::BacktraceStatus::Disabled | std::backtrace::BacktraceStatus::Unsupported => self, _ => Self::WithBacktrace { inner: Box::new(self), backtrace: Box::new(backtrace), }, } } pub fn with_path<P: AsRef<std::path::Path>>(self, p: P) -> Self { Self::WithPath { inner: Box::new(self), path: p.as_ref().to_path_buf(), } } } #[macro_export] macro_rules! bail { ($msg:literal $(,)?) => { return Err($crate::Error::Msg(format!($msg).into()).bt()) }; ($err:expr $(,)?) => { return Err($crate::Error::Msg(format!($err).into()).bt()) }; ($fmt:expr, $($arg:tt)*) => { return Err($crate::Error::Msg(format!($fmt, $($arg)*).into()).bt()) }; } pub fn zip<T, U>(r1: Result<T>, r2: Result<U>) -> Result<(T, U)> { match (r1, r2) { (Ok(r1), Ok(r2)) => Ok((r1, r2)), (Err(e), _) => Err(e), (_, Err(e)) => Err(e), } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/cuda_backend.rs
use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Layout, Result, Shape, WithDType}; pub use candle_kernels as kernels; pub use cudarc; use cudarc::cublas::{Gemm, GemmConfig, StridedBatchedConfig}; use cudarc::driver::{ CudaFunction, CudaSlice, DevicePtr, DeviceRepr, DeviceSlice, LaunchAsync, LaunchConfig, ValidAsZeroBits, }; use half::{bf16, f16}; use std::sync::{Arc, Mutex}; /// cudarc related errors #[derive(thiserror::Error, Debug)] pub enum CudaError { #[error(transparent)] Cuda(#[from] cudarc::driver::DriverError), #[error(transparent)] Compiler(#[from] cudarc::nvrtc::CompileError), #[error(transparent)] Cublas(#[from] cudarc::cublas::result::CublasError), #[error(transparent)] Curand(#[from] cudarc::curand::result::CurandError), #[error("missing kernel '{module_name}'")] MissingKernel { module_name: String }, #[error("unsupported dtype {dtype:?} for {op}")] UnsupportedDtype { dtype: DType, op: &'static str }, #[error("internal error '{0}'")] InternalError(&'static str), #[error("matmul is only supported for contiguous tensors lstride: {lhs_stride:?} rstride: {rhs_stride:?} mnk: {mnk:?}")] MatMulNonContiguous { lhs_stride: Vec<usize>, rhs_stride: Vec<usize>, mnk: (usize, usize, usize), }, #[error("{msg}, expected: {expected:?}, got: {got:?}")] UnexpectedDType { msg: &'static str, expected: DType, got: DType, }, #[error("{cuda} when loading {module_name}")] Load { cuda: cudarc::driver::DriverError, module_name: String, }, } impl From<CudaError> for crate::Error { fn from(val: CudaError) -> Self { crate::Error::Cuda(Box::new(val)).bt() } } /// Unique identifier for cuda devices. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct DeviceId(usize); impl DeviceId { fn new() -> Self { // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 use std::sync::atomic; static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) } } struct CudaRng(cudarc::curand::CudaRng); unsafe impl Send for CudaRng {} #[derive(Clone)] pub struct CudaDevice { id: DeviceId, device: Arc<cudarc::driver::CudaDevice>, blas: Arc<cudarc::cublas::CudaBlas>, curand: Arc<Mutex<CudaRng>>, } impl std::fmt::Debug for CudaDevice { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "CudaDevice({:?})", self.id) } } impl std::ops::Deref for CudaDevice { type Target = Arc<cudarc::driver::CudaDevice>; fn deref(&self) -> &Self::Target { &self.device } } pub trait WrapErr<O> { fn w(self) -> std::result::Result<O, crate::Error>; } impl<O, E: Into<CudaError>> WrapErr<O> for std::result::Result<O, E> { fn w(self) -> std::result::Result<O, crate::Error> { self.map_err(|e| crate::Error::Cuda(Box::new(e.into()))) } } impl CudaDevice { pub fn cuda_device(&self) -> Arc<cudarc::driver::CudaDevice> { self.device.clone() } pub fn id(&self) -> DeviceId { self.id } fn const_impl(&self, v: f64, shape: &Shape, dtype: DType) -> Result<CudaStorage> { let elem_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(elem_count as u32); let slice = match dtype { DType::U8 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<u8>(elem_count) }.w()?; let func = self.get_or_load_func("fill_u8", kernels::FILL)?; let params = (&data, v as u8, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U8(data) } DType::U32 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<u32>(elem_count) }.w()?; let func = self.get_or_load_func("fill_u32", kernels::FILL)?; let params = (&data, v as u32, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U32(data) } DType::I64 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<i64>(elem_count) }.w()?; let func = self.get_or_load_func("fill_i64", kernels::FILL)?; let params = (&data, v as i64, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::I64(data) } DType::BF16 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<bf16>(elem_count) }.w()?; let func = self.get_or_load_func("fill_bf16", kernels::FILL)?; let params = (&data, bf16::from_f64(v), elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::BF16(data) } DType::F16 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<f16>(elem_count) }.w()?; let func = self.get_or_load_func("fill_f16", kernels::FILL)?; let params = (&data, f16::from_f64(v), elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F16(data) } DType::F32 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<f32>(elem_count) }.w()?; let func = self.get_or_load_func("fill_f32", kernels::FILL)?; let params = (&data, v as f32, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F32(data) } DType::F64 => { // SAFETY: Set later by running the fill kernel. let data = unsafe { self.alloc::<f64>(elem_count) }.w()?; let func = self.get_or_load_func("fill_f64", kernels::FILL)?; let params = (&data, v, elem_count); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } pub fn get_or_load_func(&self, module_name: &str, ptx: &'static str) -> Result<CudaFunction> { if !self.has_func(module_name, module_name) { // Leaking the string here is a bit sad but we need a &'static str and this is only // done once per kernel name. let static_module_name = Box::leak(module_name.to_string().into_boxed_str()); self.load_ptx(ptx.into(), module_name, &[static_module_name]) .map_err(|cuda| CudaError::Load { cuda, module_name: module_name.to_string(), }) .w()?; } self.get_func(module_name, module_name) // Clippy recommends this `ok_or` rather than `ok_or_else` so hopefully the compiler is // able to only build the error value if needed. .ok_or(CudaError::MissingKernel { module_name: module_name.to_string(), }) .w() } } impl BackendDevice for CudaDevice { type Storage = CudaStorage; fn new(ordinal: usize) -> Result<Self> { let device = cudarc::driver::CudaDevice::new(ordinal).w()?; let blas = cudarc::cublas::CudaBlas::new(device.clone()).w()?; let curand = cudarc::curand::CudaRng::new(299792458, device.clone()).w()?; Ok(Self { id: DeviceId::new(), device, blas: Arc::new(blas), curand: Arc::new(Mutex::new(CudaRng(curand))), }) } fn set_seed(&self, seed: u64) -> Result<()> { // We do not call set_seed but instead create a new curand object. This ensures that the // state will be identical and the same random numbers will be generated. let mut curand = self.curand.lock().unwrap(); curand.0 = cudarc::curand::CudaRng::new(seed, self.device.clone()).w()?; Ok(()) } fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Cuda { gpu_id: self.device.ordinal(), } } fn same_device(&self, rhs: &Self) -> bool { self.id == rhs.id } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<CudaStorage> { let elem_count = shape.elem_count(); let slice = match dtype { DType::U8 => { let data = self.alloc_zeros::<u8>(elem_count).w()?; CudaStorageSlice::U8(data) } DType::U32 => { let data = self.alloc_zeros::<u32>(elem_count).w()?; CudaStorageSlice::U32(data) } DType::I64 => { let data = self.alloc_zeros::<i64>(elem_count).w()?; CudaStorageSlice::I64(data) } DType::BF16 => { let data = self.alloc_zeros::<bf16>(elem_count).w()?; CudaStorageSlice::BF16(data) } DType::F16 => { let data = self.alloc_zeros::<f16>(elem_count).w()?; CudaStorageSlice::F16(data) } DType::F32 => { let data = self.alloc_zeros::<f32>(elem_count).w()?; CudaStorageSlice::F32(data) } DType::F64 => { let data = self.alloc_zeros::<f64>(elem_count).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn rand_uniform(&self, shape: &Shape, dtype: DType, lo: f64, up: f64) -> Result<CudaStorage> { let elem_count = shape.elem_count(); let curand = self.curand.lock().unwrap(); let slice = match dtype { // TODO: Add support for F16 and BF16 though this is likely to require some upstream // cudarc changes. DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 => { Err(CudaError::UnsupportedDtype { dtype, op: "rand_uniform", }) .w()? } DType::F32 => { let mut data = unsafe { self.alloc::<f32>(elem_count) }.w()?; curand.0.fill_with_uniform(&mut data).w()?; CudaStorageSlice::F32(data) } DType::F64 => { let mut data = unsafe { self.alloc::<f64>(elem_count) }.w()?; curand.0.fill_with_uniform(&mut data).w()?; CudaStorageSlice::F64(data) } }; let slice = if lo == 0. && up == 1.0 { slice } else { let layout = Layout::contiguous(shape); Affine(up - lo, lo).map(&slice, self, &layout)? }; Ok(CudaStorage { slice, device: self.clone(), }) } fn rand_normal(&self, shape: &Shape, dtype: DType, mean: f64, std: f64) -> Result<CudaStorage> { // TODO: Add support for F16 and BF16 though this is likely to require some upstream // cudarc changes. let elem_count = shape.elem_count(); let curand = self.curand.lock().unwrap(); // curand can only generate an odd number of values. // https://github.com/huggingface/candle/issues/734 let elem_count_round = if elem_count % 2 == 1 { elem_count + 1 } else { elem_count }; let slice = match dtype { DType::U8 | DType::U32 | DType::I64 | DType::F16 | DType::BF16 => { Err(CudaError::UnsupportedDtype { dtype, op: "rand_normal", }) .w()? } DType::F32 => { let mut data = unsafe { self.alloc::<f32>(elem_count_round) }.w()?; curand .0 .fill_with_normal(&mut data, mean as f32, std as f32) .w()?; CudaStorageSlice::F32(data) } DType::F64 => { let mut data = unsafe { self.alloc::<f64>(elem_count_round) }.w()?; curand.0.fill_with_normal(&mut data, mean, std).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<CudaStorage> { self.const_impl(1., shape, dtype) } fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<CudaStorage> { let slice = match storage { CpuStorage::U8(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::U8(data) } CpuStorage::U32(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::U32(data) } CpuStorage::I64(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::I64(data) } CpuStorage::BF16(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::BF16(data) } CpuStorage::F16(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F16(data) } CpuStorage::F32(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F32(data) } CpuStorage::F64(storage) => { let data = self.htod_sync_copy(storage).w()?; CudaStorageSlice::F64(data) } }; Ok(CudaStorage { slice, device: self.clone(), }) } } #[derive(Debug)] pub enum CudaStorageSlice { U8(CudaSlice<u8>), U32(CudaSlice<u32>), I64(CudaSlice<i64>), BF16(CudaSlice<bf16>), F16(CudaSlice<f16>), F32(CudaSlice<f32>), F64(CudaSlice<f64>), } type S = CudaStorageSlice; pub trait Map1 { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>>; fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> { let out = match s { S::U8(s) => S::U8(self.f(s, d, l)?), S::U32(s) => S::U32(self.f(s, d, l)?), S::I64(s) => S::I64(self.f(s, d, l)?), S::BF16(s) => S::BF16(self.f(s, d, l)?), S::F16(s) => S::F16(self.f(s, d, l)?), S::F32(s) => S::F32(self.f(s, d, l)?), S::F64(s) => S::F64(self.f(s, d, l)?), }; Ok(out) } } pub trait Map2 { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>>; fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> { let out = match (s1, s2) { (S::U8(s1), S::U8(s2)) => S::U8(self.f(s1, l1, s2, l2, d)?), (S::U32(s1), S::U32(s2)) => S::U32(self.f(s1, l1, s2, l2, d)?), (S::I64(s1), S::I64(s2)) => S::I64(self.f(s1, l1, s2, l2, d)?), (S::BF16(s1), S::BF16(s2)) => S::BF16(self.f(s1, l1, s2, l2, d)?), (S::F16(s1), S::F16(s2)) => S::F16(self.f(s1, l1, s2, l2, d)?), (S::F32(s1), S::F32(s2)) => S::F32(self.f(s1, l1, s2, l2, d)?), (S::F64(s1), S::F64(s2)) => S::F64(self.f(s1, l1, s2, l2, d)?), _ => Err(CudaError::InternalError("dtype mismatch in binary op"))?, }; Ok(out) } } pub trait Map2InPlace { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, dst: &mut CudaSlice<T>, dst_shape: &Shape, src: &CudaSlice<T>, src_l: &Layout, dev: &CudaDevice, ) -> Result<()>; fn map( &self, dst: &mut S, dst_s: &Shape, src: &S, src_l: &Layout, d: &CudaDevice, ) -> Result<()> { match (dst, src) { (S::U8(dst), S::U8(src)) => self.f(dst, dst_s, src, src_l, d), (S::U32(dst), S::U32(src)) => self.f(dst, dst_s, src, src_l, d), (S::I64(dst), S::I64(src)) => self.f(dst, dst_s, src, src_l, d), (S::BF16(dst), S::BF16(src)) => self.f(dst, dst_s, src, src_l, d), (S::F16(dst), S::F16(src)) => self.f(dst, dst_s, src, src_l, d), (S::F32(dst), S::F32(src)) => self.f(dst, dst_s, src, src_l, d), (S::F64(dst), S::F64(src)) => self.f(dst, dst_s, src, src_l, d), _ => Err(CudaError::InternalError("dtype mismatch in binary op"))?, } } } pub trait Map1Any { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, wrap: W, ) -> Result<S>; fn map(&self, s: &S, d: &CudaDevice, l: &Layout) -> Result<S> { let out = match s { S::U8(s) => self.f(s, d, l, S::U8)?, S::U32(s) => self.f(s, d, l, S::U32)?, S::I64(s) => self.f(s, d, l, S::I64)?, S::BF16(s) => self.f(s, d, l, S::BF16)?, S::F16(s) => self.f(s, d, l, S::F16)?, S::F32(s) => self.f(s, d, l, S::F32)?, S::F64(s) => self.f(s, d, l, S::F64)?, }; Ok(out) } } pub trait Map2Any { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src1: &CudaSlice<T>, layout1: &Layout, src2: &CudaSlice<T>, layout2: &Layout, dev: &CudaDevice, ) -> Result<S>; fn map(&self, s1: &S, l1: &Layout, s2: &S, l2: &Layout, d: &CudaDevice) -> Result<S> { let out = match (s1, s2) { (S::U8(s1), S::U8(s2)) => self.f(s1, l1, s2, l2, d)?, (S::U32(s1), S::U32(s2)) => self.f(s1, l1, s2, l2, d)?, (S::I64(s1), S::I64(s2)) => self.f(s1, l1, s2, l2, d)?, (S::BF16(s1), S::BF16(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F16(s1), S::F16(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F32(s1), S::F32(s2)) => self.f(s1, l1, s2, l2, d)?, (S::F64(s1), S::F64(s2)) => self.f(s1, l1, s2, l2, d)?, _ => Err(CudaError::InternalError("dtype mismatch in binary op")).w()?, }; Ok(out) } } struct Clone; impl Map1 for Clone { fn f<T: DeviceRepr>( &self, s: &CudaSlice<T>, _: &CudaDevice, _: &Layout, ) -> Result<CudaSlice<T>> { s.try_clone().w() } } pub fn kernel_name<T: WithDType>(root: &str) -> String { let dtype = T::DTYPE.as_str(); format!("{root}_{dtype}") } struct Affine(f64, f64); impl Map1 for Affine { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("affine"), kernels::AFFINE)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( el, dims.len(), &ds, src, &out, T::from_f64(self.0), T::from_f64(self.1), ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Elu(f64); impl Map1 for Elu { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("uelu"), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = (el, dims.len(), &ds, T::from_f64(self.0), src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Im2Col1D { l_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col1D { fn l_out(&self, l: usize) -> usize { (l + 2 * self.padding - self.dilation * (self.l_k - 1) - 1) / self.stride + 1 } } impl Map1 for Im2Col1D { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let l_out = self.l_out(dims[2]); let dst_el = dims[0] * l_out * dims[1] * self.l_k; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("im2col1d"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = ( dst_el, l_out, self.l_k, self.stride, self.padding, self.dilation, &ds, src, &dst, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } struct Im2Col { h_k: usize, w_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col { fn hw_out(&self, h: usize, w: usize) -> (usize, usize) { let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1; let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1; (h_out, w_out) } } impl Map1 for Im2Col { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let (h_out, w_out) = self.hw_out(dims[2], dims[3]); let dst_el = dims[0] * h_out * w_out * dims[1] * self.h_k * self.w_k; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("im2col"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let dst = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = ( dst_el, h_out, w_out, self.h_k, self.w_k, self.stride, self.padding, self.dilation, &ds, src, &dst, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(dst) } } struct Powf(f64); impl Map1 for Powf { fn f<T: DeviceRepr + WithDType>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("upowf"), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = (el, dims.len(), &ds, T::from_f64(self.0), src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Sum<'a>(&'a [usize]); impl<'a> Map1 for Sum<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let src_dims = shape.dims(); let el = shape.elem_count(); let mut dst_el = el; for &sum_dim in self.0.iter() { dst_el /= src_dims[sum_dim]; } let mut sum_dims = self.0.to_vec(); // Sort the sum_dims as they have to be processed from left to right when converting the // indexes. sum_dims.sort(); let sum_dims_l: Vec<usize> = sum_dims.iter().map(|&d| src_dims[d]).collect(); let sum_dims_s: Vec<usize> = sum_dims .iter() .map(|&d| src_dims[d + 1..].iter().product::<usize>()) .collect(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = dev .htod_copy([src_dims, layout.stride(), &sum_dims_l, &sum_dims_s].concat()) .w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>("sum"), kernels::REDUCE)?; let out = dev.alloc_zeros::<T>(dst_el).w()?; let params = (el, src_dims.len(), sum_dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct FastReduce<'a>(&'a [usize], ReduceOp); impl<'a> Map1Any for FastReduce<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits, W: Fn(CudaSlice<T>) -> S>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, wrap: W, ) -> Result<S> { let src_stride = layout.stride(); let src_dims = layout.shape().dims(); let src_el: usize = src_dims.iter().product(); // Source dims and strides with the sum dims at the end. let mut dims = vec![]; let mut stride = vec![]; let mut dst_el: usize = 1; for (dim_idx, &d) in src_dims.iter().enumerate() { if !self.0.contains(&dim_idx) { dst_el *= d; dims.push(d); stride.push(src_stride[dim_idx]); } } for &dim_idx in self.0.iter() { dims.push(src_dims[dim_idx]); stride.push(src_stride[dim_idx]); } let el_to_sum_per_block = src_el / dst_el; // The reduction loop requires the shared array to be properly initialized and for // this we want the number of threads to be a power of two. let block_dim = usize::min(1024, el_to_sum_per_block).next_power_of_two(); let cfg = LaunchConfig { // TODO: Maybe use grid_y if the output is too large? // TODO: Specialized implementation when reducing on no or all dimensions or when // reducing only aggregate a small number of elements together. grid_dim: (dst_el as u32, 1, 1), block_dim: (block_dim as u32, 1, 1), shared_mem_bytes: 0, }; let ds = dev .htod_copy([dims.as_slice(), stride.as_slice()].concat()) .w()?; let src = &src.slice(layout.start_offset()..); let (name, check_empty, return_index) = match self.1 { ReduceOp::Sum => ("fast_sum", false, false), ReduceOp::Min => ("fast_min", true, false), ReduceOp::Max => ("fast_max", true, false), ReduceOp::ArgMin => ("fast_argmin", true, true), ReduceOp::ArgMax => ("fast_argmax", true, true), }; if check_empty && layout.shape().elem_count() == 0 { Err(crate::Error::EmptyTensor { op: "reduce" }.bt())? } let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::REDUCE)?; if return_index { // SAFETY: filled in by the follow up kernel. let out = unsafe { dev.alloc::<u32>(dst_el) }.w()?; let params = (src_el, el_to_sum_per_block, src_dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(S::U32(out)) } else { // SAFETY: filled in by the follow up kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = (src_el, el_to_sum_per_block, src_dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(wrap(out)) } } } impl<U: UnaryOpT> Map1 for U { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, layout: &Layout, ) -> Result<CudaSlice<T>> { let shape = layout.shape(); let dims = shape.dims(); let el_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el_count as u32); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let src = &src.slice(layout.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>(U::KERNEL), kernels::UNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el_count) }.w()?; let params = (el_count, dims.len(), &ds, src, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct IndexSelect<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map1 for IndexSelect<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, src_l: &Layout, ) -> Result<CudaSlice<T>> { let ids_l = &self.1; let (name, ids) = match &self.0.slice { CudaStorageSlice::U32(slice) => { ("is_u32", *slice.slice(ids_l.start_offset()..).device_ptr()) } CudaStorageSlice::U8(slice) => { ("is_u8", *slice.slice(ids_l.start_offset()..).device_ptr()) } CudaStorageSlice::I64(slice) => { ("is_i64", *slice.slice(ids_l.start_offset()..).device_ptr()) } _ => Err(CudaError::UnexpectedDType { msg: "index_select ids should be u8 or u32", expected: DType::U32, got: self.0.dtype(), }) .w()?, }; let ids_shape = ids_l.shape(); let ids_dims = ids_shape.dims(); let ds = dev.htod_copy([ids_dims, ids_l.stride()].concat()).w()?; let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "index-select" }.bt())?, }; let left_size: usize = src_l.dims()[..self.2].iter().product(); let right_size: usize = src_l.dims()[self.2 + 1..].iter().product(); let src_dim_size = src_l.dims()[self.2]; let ids_dim_size = ids_shape.elem_count(); let dst_el = ids_shape.elem_count() * left_size * right_size; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let params = ( dst_el, ids_dims.len(), &ds, ids, &src, &out, left_size, src_dim_size, ids_dim_size, right_size, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Gather<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map1 for Gather<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, src: &CudaSlice<T>, dev: &CudaDevice, src_l: &Layout, ) -> Result<CudaSlice<T>> { let ids = &self.0; let ids_l = &self.1; let dim = self.2; let (ids_o1, ids_o2) = match ids_l.contiguous_offsets() { Some(o12) => o12, None => Err(crate::Error::RequiresContiguous { op: "gather" }.bt())?, }; let (name, ids) = match &ids.slice { CudaStorageSlice::U32(slice) => { ("gather_u32", *slice.slice(ids_o1..ids_o2).device_ptr()) } CudaStorageSlice::U8(slice) => ("gather_u8", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::I64(slice) => { ("gather_i64", *slice.slice(ids_o1..ids_o2).device_ptr()) } _ => Err(CudaError::UnexpectedDType { msg: "gather ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let el = ids_l.shape().elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "gather" }.bt())?, }; let left_sz: usize = src_l.dims()[..dim].iter().product(); let right_sz: usize = src_l.dims()[dim + 1..].iter().product(); let src_dim_sz = src_l.dims()[dim]; let ids_dim_sz = ids_l.dims()[dim]; let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = ( el, ids, &src, &out, left_sz, src_dim_sz, ids_dim_sz, right_sz, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct IndexAdd<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map2InPlace for IndexAdd<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, dst: &mut CudaSlice<T>, dst_shape: &Shape, src: &CudaSlice<T>, src_l: &Layout, dev: &CudaDevice, ) -> Result<()> { let ids = &self.0; let ids_l = &self.1; let dim = self.2; let (ids_o1, ids_o2) = match ids_l.contiguous_offsets() { Some(o12) => o12, None => Err(crate::Error::RequiresContiguous { op: "index-add" }.bt())?, }; let (name, ids) = match &ids.slice { CudaStorageSlice::U32(slice) => ("ia_u32", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::I64(slice) => ("ia_i64", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::U8(slice) => ("ia_u8", *slice.slice(ids_o1..ids_o2).device_ptr()), _ => Err(CudaError::UnexpectedDType { msg: "index-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "index-add" }.bt())?, }; let left_sz: usize = src_l.dims()[..dim].iter().product(); let right_sz: usize = src_l.dims()[dim + 1..].iter().product(); let src_dim_sz = src_l.dims()[dim]; let dst_dim_sz = dst_shape.dims()[dim]; let ids_dim_sz = ids_l.dims()[0]; let cfg = LaunchConfig::for_num_elems((left_sz * right_sz) as u32); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let params = ( ids, ids_dim_sz, &src, dst, left_sz, src_dim_sz, dst_dim_sz, right_sz, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(()) } } struct ScatterAdd<'a>(&'a CudaStorage, &'a Layout, usize); impl<'a> Map2InPlace for ScatterAdd<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, dst: &mut CudaSlice<T>, dst_shape: &Shape, src: &CudaSlice<T>, src_l: &Layout, dev: &CudaDevice, ) -> Result<()> { let ids = &self.0; let ids_l = &self.1; let dim = self.2; let (ids_o1, ids_o2) = match ids_l.contiguous_offsets() { Some(o12) => o12, None => Err(crate::Error::RequiresContiguous { op: "scatter-add" }.bt())?, }; let (name, ids) = match &ids.slice { CudaStorageSlice::U32(slice) => ("sa_u32", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::I64(slice) => ("sa_i64", *slice.slice(ids_o1..ids_o2).device_ptr()), CudaStorageSlice::U8(slice) => ("sa_u8", *slice.slice(ids_o1..ids_o2).device_ptr()), _ => Err(CudaError::UnexpectedDType { msg: "scatter-add ids should be u8/u32/i64", expected: DType::U32, got: ids.dtype(), })?, }; let src = match src_l.contiguous_offsets() { Some((o1, o2)) => src.slice(o1..o2), None => Err(crate::Error::RequiresContiguous { op: "scatter-add" }.bt())?, }; let left_sz: usize = src_l.dims()[..dim].iter().product(); let right_sz: usize = src_l.dims()[dim + 1..].iter().product(); let src_dim_sz = src_l.dims()[dim]; let dst_dim_sz = dst_shape.dims()[dim]; let cfg = LaunchConfig::for_num_elems((left_sz * right_sz) as u32); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::INDEXING)?; // SAFETY: Set later by running the kernel. let params = (ids, &src, dst, left_sz, src_dim_sz, dst_dim_sz, right_sz); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(()) } } struct Conv1D<'a>(&'a crate::conv::ParamsConv1D); impl<'a> Map2 for Conv1D<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, inp_l: &Layout, k: &CudaSlice<T>, k_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { // Kernel shape: (c_out, c_in_k, k_size) // Input shape: (b_size, c_in, l_in) or (c_in, l_in) let p = &self.0; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); let l_out = p.l_out(); let dst_el = p.c_out * l_out * p.b_size; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("conv1d"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let ds = if dims.len() == 3 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else if dims.len() == 2 { [&[1], dims, &[1], inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else { crate::bail!("unexpected input shape for conv1d {dims:?}") }; let ds = dev.htod_copy(ds).w()?; let params = ( el, l_out, p.stride, p.padding, p.dilation, &ds, inp, k, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Conv2D<'a>(&'a crate::conv::ParamsConv2D); impl<'a> Map2 for Conv2D<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, inp_l: &Layout, k: &CudaSlice<T>, k_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { // Kernel shape: (c_out, c_in_k, h_k, w_k) // Input shape: (b_size, c_in, h_in, w_in) let p = &self.0; let (out_w, out_h) = (p.out_w(), p.out_h()); let dst_el = p.c_out * out_w * out_h * p.b_size; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("conv2d"), kernels::CONV)?; let ds = if dims.len() == 4 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else { crate::bail!("unexpected input shape for conv2d {dims:?}") }; let ds = dev.htod_copy(ds).w()?; let params = ( el, out_w, out_h, p.stride, p.padding, p.dilation, &ds, inp, k, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct ConvTranspose2D<'a>(&'a crate::conv::ParamsConvTranspose2D); impl<'a> Map2 for ConvTranspose2D<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, inp_l: &Layout, k: &CudaSlice<T>, k_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { // Kernel shape: (c_in_k, c_out, h_k, w_k) // Input shape: (b_size, c_in, h_in, w_in) let p = &self.0; let (out_w, out_h) = (p.out_w(), p.out_h()); let dst_el = p.c_out * out_w * out_h * p.b_size; let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(k_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("conv_transpose2d"), kernels::CONV)?; let ds = if dims.len() == 4 { [dims, inp_l.stride(), k_l.dims(), k_l.stride()].concat() } else { crate::bail!("unexpected input shape for conv_transpose2d {dims:?}") }; let ds = dev.htod_copy(ds).w()?; let params = ( el, out_w, out_h, p.stride, p.padding, p.output_padding, p.dilation, &ds, inp, k, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } enum PoolOp { Max, Avg, } struct Pool2D { w_k: usize, h_k: usize, w_stride: usize, h_stride: usize, op: PoolOp, } impl Map1 for Pool2D { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, dev: &CudaDevice, inp_l: &Layout, ) -> Result<CudaSlice<T>> { // Input shape: (b_size, c, h, w) let inp = &inp.slice(inp_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let ds = if dims.len() == 4 { [dims, inp_l.stride()].concat() } else { crate::bail!("unexpected input shape for pool {dims:?}") }; let el = shape.elem_count(); let out_w = (dims[2] - self.w_k) / self.w_stride + 1; let out_h = (dims[3] - self.h_k) / self.h_stride + 1; let dst_el = out_w * out_h * dims[0] * dims[1]; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let kname = match self.op { PoolOp::Max => "max_pool2d", PoolOp::Avg => "avg_pool2d", }; let func = dev.get_or_load_func(&kernel_name::<T>(kname), kernels::CONV)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let ds = dev.htod_copy(ds).w()?; let params = ( el, self.w_k, self.h_k, self.w_stride, self.h_stride, &ds, inp, &out, ); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct UpsampleNearest2D(usize, usize); impl Map1 for UpsampleNearest2D { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, inp: &CudaSlice<T>, dev: &CudaDevice, inp_l: &Layout, ) -> Result<CudaSlice<T>> { // Input shape: (b_size, c, h, w) let inp = &inp.slice(inp_l.start_offset()..); let shape = inp_l.shape(); let dims = shape.dims(); let ds = if dims.len() == 4 { [dims, inp_l.stride()].concat() } else { crate::bail!("unexpected input shape for upsample {dims:?}") }; let (out_w, out_h) = (self.0, self.1); let dst_el = out_w * out_h * dims[0] * dims[1]; let cfg = LaunchConfig::for_num_elems(dst_el as u32); let func = dev.get_or_load_func(&kernel_name::<T>("upsample_nearest2d"), kernels::CONV)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(dst_el) }.w()?; let ds = dev.htod_copy(ds).w()?; let scale_w = dims[2] as f64 / out_w as f64; let scale_h = dims[3] as f64 / out_h as f64; let params = (out_w, out_h, scale_w, scale_h, &ds, inp, &out); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct WhereCond<'a>(&'a CudaStorage, &'a Layout); impl<'a> Map2 for WhereCond<'a> { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, t: &CudaSlice<T>, layout_t: &Layout, f: &CudaSlice<T>, layout_f: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let ids_l = &self.1; let (ids, name) = match &self.0.slice { CudaStorageSlice::U8(slice) => { let ptr = *slice.slice(ids_l.start_offset()..).device_ptr(); (ptr, "where_u8") } CudaStorageSlice::U32(slice) => { let ptr = *slice.slice(ids_l.start_offset()..).device_ptr(); (ptr, "where_u32") } CudaStorageSlice::I64(slice) => { let ptr = *slice.slice(ids_l.start_offset()..).device_ptr(); (ptr, "where_i64") } _ => Err(CudaError::UnexpectedDType { msg: "where conditions should be u8/u32/i64", expected: DType::U32, got: self.0.dtype(), }) .w()?, }; let shape = ids_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let ds = dev .htod_copy([dims, ids_l.stride(), layout_t.stride(), layout_f.stride()].concat()) .w()?; let t = &t.slice(layout_t.start_offset()..); let f = &f.slice(layout_f.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::TERNARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(el) }.w()?; let params = (el, dims.len(), &ds, ids, t, f, &out); // SAFETY: ffi unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } impl<U: crate::op::BinaryOpT> Map2 for U { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, lhs: &CudaSlice<T>, lhs_l: &Layout, rhs: &CudaSlice<T>, rhs_l: &Layout, dev: &CudaDevice, ) -> Result<CudaSlice<T>> { let shape = lhs_l.shape(); let dims = shape.dims(); let elem_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(elem_count as u32); let dims_and_strides = dev .htod_copy([dims, lhs_l.stride(), rhs_l.stride()].concat()) .w()?; let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let func = dev.get_or_load_func(&kernel_name::<T>(U::KERNEL), kernels::BINARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<T>(elem_count) }.w()?; let params = (elem_count, dims.len(), &dims_and_strides, lhs, rhs, &out); // SAFETY: ffi unsafe { func.launch(cfg, params) }.w()?; Ok(out) } } struct Cmp(CmpOp); impl Map2Any for Cmp { fn f<T: DeviceRepr + WithDType + ValidAsZeroBits>( &self, lhs: &CudaSlice<T>, lhs_l: &Layout, rhs: &CudaSlice<T>, rhs_l: &Layout, dev: &CudaDevice, ) -> Result<S> { let shape = lhs_l.shape(); let dims = shape.dims(); let elem_count = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(elem_count as u32); let dims_and_strides = dev .htod_copy([dims, lhs_l.stride(), rhs_l.stride()].concat()) .w()?; let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let name = match self.0 { CmpOp::Eq => "eq", CmpOp::Ne => "ne", CmpOp::Lt => "lt", CmpOp::Le => "le", CmpOp::Gt => "gt", CmpOp::Ge => "ge", }; let func = dev.get_or_load_func(&kernel_name::<T>(name), kernels::BINARY)?; // SAFETY: Set later by running the kernel. let out = unsafe { dev.alloc::<u8>(elem_count) }.w()?; let params = (elem_count, dims.len(), &dims_and_strides, lhs, rhs, &out); // SAFETY: ffi unsafe { func.launch(cfg, params) }.w()?; Ok(S::U8(out)) } } fn slice_src_and_dst<'a, T>( src: &'a CudaSlice<T>, src_l: &Layout, dst: &'a mut CudaSlice<T>, dst_offset: usize, ) -> ( cudarc::driver::CudaView<'a, T>, cudarc::driver::CudaViewMut<'a, T>, ) { let src_offset = src_l.start_offset(); let to_copy = dst .len() .saturating_sub(dst_offset) .min(src.len().saturating_sub(src_offset)); let src = src.slice(src_offset..src_offset + to_copy); let dst = dst.slice_mut(dst_offset..dst_offset + to_copy); (src, dst) } #[derive(Debug)] pub struct CudaStorage { pub slice: CudaStorageSlice, pub device: CudaDevice, } pub trait CudaDType: Sized { fn as_cuda_slice(s: &CudaStorage) -> Result<&CudaSlice<Self>>; fn wrap_cuda_slice(s: CudaSlice<Self>, dev: CudaDevice) -> CudaStorage; } macro_rules! cuda_dtype { ($ty:ty, $dtype:ident) => { impl CudaDType for $ty { fn as_cuda_slice(s: &CudaStorage) -> Result<&CudaSlice<Self>> { match &s.slice { CudaStorageSlice::$dtype(data) => Ok(&data), _ => Err(crate::Error::UnexpectedDType { expected: DType::$dtype, got: s.dtype(), msg: "unexpected dtype", } .bt()), } } fn wrap_cuda_slice(slice: CudaSlice<Self>, device: CudaDevice) -> CudaStorage { let slice = CudaStorageSlice::$dtype(slice); CudaStorage { slice, device } } } }; } cuda_dtype!(u8, U8); cuda_dtype!(u32, U32); cuda_dtype!(i64, I64); cuda_dtype!(f16, F16); cuda_dtype!(bf16, BF16); cuda_dtype!(f32, F32); cuda_dtype!(f64, F64); impl CudaStorage { pub fn wrap_cuda_slice<T: CudaDType>(slice: CudaSlice<T>, device: CudaDevice) -> CudaStorage { T::wrap_cuda_slice(slice, device) } pub fn as_cuda_slice<T: CudaDType>(&self) -> Result<&CudaSlice<T>> { T::as_cuda_slice(self) } } fn gemm_config<T>( alpha: T, beta: T, (b, m, n, k): (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<StridedBatchedConfig<T>> { // https://docs.nvidia.com/cuda/cublas/index.html#cublas-t-gemm use cudarc::cublas::sys::cublasOperation_t; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; // The a tensor has dims batching, k, n (rhs) let (lda, transa) = if rhs_m1 == 1 && rhs_m2 == n { (n as i32, cublasOperation_t::CUBLAS_OP_N) } else if rhs_m1 == k && rhs_m2 == 1 { (k as i32, cublasOperation_t::CUBLAS_OP_T) } else { Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })? }; // The b tensor has dims batching, m, k (lhs) let (ldb, transb) = if lhs_m1 == 1 && lhs_m2 == k { (k as i32, cublasOperation_t::CUBLAS_OP_N) } else if lhs_m1 == m && lhs_m2 == 1 { (m as i32, cublasOperation_t::CUBLAS_OP_T) } else { Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })? }; // The setup below was copied from: // https://github.com/lebedov/scikit-cuda/blob/7e7300474286019c917a6c8a4bca59405c64fbce/tests/test_cublas.py#L531 let gemm = GemmConfig { alpha, beta, m: n as i32, n: m as i32, k: k as i32, lda, ldb, ldc: n as i32, transa, transb, }; let stride_b: usize = match lhs_stride[..lhs_stride.len() - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [stride] => stride, [] => m * k, _ => Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?, }; let stride_a: usize = match rhs_stride[..rhs_stride.len() - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [stride] => stride, [] => n * k, _ => Err(CudaError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })?, }; Ok(StridedBatchedConfig { batch_size: b as i32, gemm, stride_a: stride_a as i64, stride_b: stride_b as i64, stride_c: (m * n) as i64, }) } impl BackendStorage for CudaStorage { type Device = CudaDevice; fn try_clone(&self, layout: &Layout) -> Result<Self> { let slice = Clone.map(&self.slice, self.device(), layout)?; let device = self.device.clone(); Ok(Self { slice, device }) } fn dtype(&self) -> DType { match self.slice { CudaStorageSlice::U8(_) => DType::U8, CudaStorageSlice::U32(_) => DType::U32, CudaStorageSlice::I64(_) => DType::I64, CudaStorageSlice::BF16(_) => DType::BF16, CudaStorageSlice::F16(_) => DType::F16, CudaStorageSlice::F32(_) => DType::F32, CudaStorageSlice::F64(_) => DType::F64, } } fn device(&self) -> &CudaDevice { &self.device } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { let shape = layout.shape(); let dims = shape.dims(); let el = shape.elem_count(); let cfg = LaunchConfig::for_num_elems(el as u32); let dev = self.device(); let ds = dev.htod_copy([dims, layout.stride()].concat()).w()?; let start_o = layout.start_offset(); // This returns an i64 rather than a &i64, this is useful to get around some temporary // lifetime issue and is safe as long as self.slice does not go out of scope before inp // is used. let inp = match &self.slice { CudaStorageSlice::U8(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::U32(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::I64(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::BF16(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::F16(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::F32(inp) => *inp.slice(start_o..).device_ptr(), CudaStorageSlice::F64(inp) => *inp.slice(start_o..).device_ptr(), }; let inp = &inp; let kernel_name = format!("cast_{}_{}", self.dtype().as_str(), dtype.as_str()); let func = dev.get_or_load_func(&kernel_name, kernels::CAST)?; let slice = match dtype { DType::U8 => { let out = unsafe { dev.alloc::<u8>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U8(out) } DType::U32 => { let out = unsafe { dev.alloc::<u32>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::U32(out) } DType::I64 => { let out = unsafe { dev.alloc::<i64>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::I64(out) } DType::BF16 => { let out = unsafe { dev.alloc::<bf16>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::BF16(out) } DType::F16 => { let out = unsafe { dev.alloc::<f16>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F16(out) } DType::F32 => { let out = unsafe { dev.alloc::<f32>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F32(out) } DType::F64 => { let out = unsafe { dev.alloc::<f64>(el) }.w()?; let params = (el, dims.len(), &ds, *inp, &out); unsafe { func.launch(cfg, params) }.w()?; CudaStorageSlice::F64(out) } }; Ok(Self { slice, device: dev.clone(), }) } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { let device = self.device().clone(); let slice = Affine(mul, add).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn powf(&self, layout: &Layout, e: f64) -> Result<Self> { let device = self.device().clone(); let slice = Powf(e).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { let device = self.device().clone(); let slice = Elu(alpha).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> { let device = self.device().clone(); let slice = FastReduce(sum_dims, op).map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> { let device = self.device().clone(); let slice = Cmp(op).map(&self.slice, lhs_l, &rhs.slice, rhs_l, &device)?; Ok(Self { slice, device }) } fn unary_impl<U: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { let device = self.device().clone(); let slice = U::V.map(&self.slice, &device, layout)?; Ok(Self { slice, device }) } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let device = self.device().clone(); let slice = B::V.map(&self.slice, lhs_l, &rhs.slice, rhs_l, &device)?; Ok(Self { slice, device }) } fn to_cpu_storage(&self) -> Result<CpuStorage> { match &self.slice { CudaStorageSlice::U8(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::U8(cpu_storage)) } CudaStorageSlice::U32(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::U32(cpu_storage)) } CudaStorageSlice::I64(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::I64(cpu_storage)) } CudaStorageSlice::BF16(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::BF16(cpu_storage)) } CudaStorageSlice::F16(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::F16(cpu_storage)) } CudaStorageSlice::F32(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::F32(cpu_storage)) } CudaStorageSlice::F64(slice) => { let dev = slice.device(); let cpu_storage = dev.dtoh_sync_copy(slice).w()?; Ok(CpuStorage::F64(cpu_storage)) } } } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { let device = self.device().clone(); let slice = WhereCond(self, layout).map(&t.slice, t_l, &f.slice, f_l, &device)?; Ok(Self { slice, device }) } fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { const USE_IM2COL_CONV1D: bool = true; let device = self.device().clone(); if !USE_IM2COL_CONV1D { let slice = Conv1D(params).map(&self.slice, l, &kernel.slice, kernel_l, &device)?; return Ok(Self { slice, device }); } let col = Im2Col1D { l_k: params.k_size, stride: params.stride, dilation: params.dilation, padding: params.padding, } .map(&self.slice, &device, l)?; let col = Self { slice: col, device }; let l_out = params.l_out(); let b = params.b_size; let n = params.c_out; let k = params.k_size * params.c_in; let m = l_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, l_out, n)).transpose(1, 2)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose1d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { todo!() } #[cfg(not(feature = "cudnn"))] fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { const USE_IM2COL_CONV2D: bool = true; let device = self.device().clone(); if !USE_IM2COL_CONV2D { let slice = Conv2D(params).map(&self.slice, l, &kernel.slice, kernel_l, &device)?; return Ok(Self { slice, device }); } let col = Im2Col { h_k: params.k_h, w_k: params.k_w, stride: params.stride, dilation: params.dilation, padding: params.padding, } .map(&self.slice, &device, l)?; let col = Self { slice: col, device }; let h_out = params.out_h(); let w_out = params.out_w(); let b = params.b_size; let n = params.c_out; let k = params.k_h * params.k_w * params.c_in; let m = h_out * w_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, h_out, w_out, n)) .transpose(1, 2)? .transpose(1, 3)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } #[cfg(feature = "cudnn")] fn conv2d( &self, inp_l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { let device = self.device().clone(); if !kernel_l.is_contiguous() { let slice = Conv2D(params).map(&self.slice, inp_l, &kernel.slice, kernel_l, &device)?; return Ok(Self { slice, device }); } let (out_w, out_h) = (params.out_w(), params.out_h()); let dst_el = params.c_out * out_w * out_h * params.b_size; let slice = match (&self.slice, &kernel.slice) { (S::U8(inp), S::U8(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<u8>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<u8>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::U8(out) } (S::BF16(inp), S::BF16(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<bf16>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<bf16>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::BF16(out) } (S::F16(inp), S::F16(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<f16>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<f16>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::F16(out) } (S::F32(inp), S::F32(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<f32>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<f32>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::F32(out) } (S::F64(inp), S::F64(k)) => { let inp = &inp.slice(inp_l.start_offset()..); let k = &k.slice(kernel_l.start_offset()..); let mut out = unsafe { device.alloc::<f64>(dst_el) }.w()?; crate::cudnn::launch_conv2d::<f64>(inp, inp_l, k, &mut out, params, &device) .map_err(crate::Error::wrap)?; S::F64(out) } (S::U32(_), S::U32(_)) => Err(CudaError::InternalError("conv2d does not support u32"))?, (S::I64(_), S::I64(_)) => Err(CudaError::InternalError("conv2d does not support i64"))?, _ => Err(CudaError::InternalError("dtype mismatch in conv2d"))?, }; Ok(Self { slice, device }) } fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { let device = self.device().clone(); let slice = ConvTranspose2D(params).map(&self.slice, l, &kernel.slice, kernel_l, &device)?; Ok(Self { slice, device }) } fn avg_pool2d(&self, l: &Layout, k: (usize, usize), stride: (usize, usize)) -> Result<Self> { let device = self.device().clone(); let slice = Pool2D { w_k: k.0, h_k: k.1, w_stride: stride.0, h_stride: stride.1, op: PoolOp::Avg, } .map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn max_pool2d(&self, l: &Layout, k: (usize, usize), stride: (usize, usize)) -> Result<Self> { let device = self.device().clone(); let slice = Pool2D { w_k: k.0, h_k: k.1, w_stride: stride.0, h_stride: stride.1, op: PoolOp::Max, } .map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn upsample_nearest1d(&self, _: &Layout, _out_sz: usize) -> Result<Self> { crate::bail!("upsample-nearest1d is not supported on cuda") } fn upsample_nearest2d(&self, l: &Layout, out_w: usize, out_h: usize) -> Result<Self> { let device = self.device().clone(); let slice = UpsampleNearest2D(out_w, out_h).map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn index_select(&self, ids: &Self, l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { let device = self.device().clone(); let slice = IndexSelect(ids, ids_l, dim).map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn gather(&self, l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> { let device = self.device().clone(); let slice = Gather(ids, ids_l, dim).map(&self.slice, &device, l)?; Ok(Self { slice, device }) } fn scatter_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { let device = self.device().clone(); let mut acc = device.zeros_impl(l.shape(), self.dtype())?; self.copy_strided_src(&mut acc, 0, l)?; ScatterAdd(ids, ids_l, dim).map(&mut acc.slice, l.shape(), &src.slice, src_l, &device)?; Ok(acc) } fn index_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { let device = self.device().clone(); let mut acc = device.zeros_impl(l.shape(), self.dtype())?; self.copy_strided_src(&mut acc, 0, l)?; IndexAdd(ids, ids_l, dim).map(&mut acc.slice, l.shape(), &src.slice, src_l, &device)?; Ok(acc) } fn matmul( &self, rhs: &Self, (b, m, n, k): (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let elem_count = b * m * n; let dev = &self.device; let slice = match (&self.slice, &rhs.slice) { (CudaStorageSlice::BF16(lhs), CudaStorageSlice::BF16(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(bf16::ONE, bf16::ZERO, (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<bf16>(elem_count) }.w()?; unsafe { self.device .blas .gemm_strided_batched(cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::BF16(out) } (CudaStorageSlice::F16(lhs), CudaStorageSlice::F16(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(f16::ONE, f16::ZERO, (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<f16>(elem_count) }.w()?; unsafe { self.device .blas .gemm_strided_batched(cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::F16(out) } (CudaStorageSlice::F32(lhs), CudaStorageSlice::F32(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(1., 0., (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<f32>(elem_count) }.w()?; unsafe { self.device .blas .gemm_strided_batched(cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::F32(out) } (CudaStorageSlice::F64(lhs), CudaStorageSlice::F64(rhs)) => { let lhs = &lhs.slice(lhs_l.start_offset()..); let rhs = &rhs.slice(rhs_l.start_offset()..); let cfg = gemm_config(1., 0., (b, m, n, k), lhs_l, rhs_l)?; let mut out = unsafe { dev.alloc::<f64>(elem_count) }.w()?; unsafe { self.device .blas .gemm_strided_batched(cfg, rhs, lhs, &mut out) } .w()?; CudaStorageSlice::F64(out) } _ => Err(CudaError::InternalError("dtype mismatch in matmul op"))?, }; let device = dev.clone(); Ok(Self { slice, device }) } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { let src_shape = src_l.shape(); let dims = src_shape.dims(); let el_count = src_shape.elem_count(); if el_count == 0 { return Ok(()); } let cfg = LaunchConfig::for_num_elems(el_count as u32); let dev = &self.device; let ds = dev.htod_copy([dims, src_l.stride()].concat()).w()?; match (&self.slice, &mut dst.slice) { (CudaStorageSlice::BF16(src), CudaStorageSlice::BF16(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_bf16", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::F16(src), CudaStorageSlice::F16(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_f16", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::F32(src), CudaStorageSlice::F32(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_f32", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::U8(src), CudaStorageSlice::U8(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_u8", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::U32(src), CudaStorageSlice::U32(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_u32", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::I64(src), CudaStorageSlice::I64(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_i64", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()? } } (CudaStorageSlice::F64(src), CudaStorageSlice::F64(dst)) => { let (src, mut dst) = slice_src_and_dst(src, src_l, dst, dst_offset); if src_l.is_contiguous() { dev.dtod_copy(&src, &mut dst).w()? } else { let func = dev.get_or_load_func("ucopy_f64", kernels::UNARY)?; // SAFETY: Set later by running the kernel. let params = (el_count, dims.len(), &ds, &src, &mut dst); // SAFETY: ffi. unsafe { func.launch(cfg, params) }.w()?; } } _ => Err(CudaError::InternalError( "dtype mismatch in copy_strided op", ))?, } Ok(()) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/backend.rs
use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Layout, Result, Shape}; pub trait BackendStorage: Sized { type Device: BackendDevice; fn try_clone(&self, _: &Layout) -> Result<Self>; fn dtype(&self) -> DType; fn device(&self) -> &Self::Device; // Maybe this should return a Cow instead so that no copy is done on the cpu case. fn to_cpu_storage(&self) -> Result<CpuStorage>; fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self>; fn powf(&self, _: &Layout, _: f64) -> Result<Self>; fn elu(&self, _: &Layout, _: f64) -> Result<Self>; fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self>; fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self>; fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self>; fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self>; fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self>; fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self>; fn conv1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConv1D, ) -> Result<Self>; fn conv_transpose1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self>; fn conv2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConv2D, ) -> Result<Self>; fn conv_transpose2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self>; fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>; fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self>; fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self>; fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self>; fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self>; fn scatter_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self>; fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self>; fn index_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self>; fn matmul( &self, _: &Self, _: (usize, usize, usize, usize), _: &Layout, _: &Layout, ) -> Result<Self>; fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()>; } pub trait BackendDevice: Sized + std::fmt::Debug + Clone { type Storage: BackendStorage; // TODO: Make the usize generic and part of a generic DeviceLocation. fn new(_: usize) -> Result<Self>; fn location(&self) -> crate::DeviceLocation; fn same_device(&self, _: &Self) -> bool; fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; fn ones_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage>; fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage>; fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>; fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage>; fn set_seed(&self, _: u64) -> Result<()>; }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/lib.rs
//! ML framework for Rust //! //! ```rust //! use candle_core::{Tensor, DType, Device}; //! # use candle_core::Error; //! # fn main() -> Result<(), Error>{ //! //! let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?; //! let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?; //! //! let c = a.matmul(&b)?; //! # Ok(())} //! ``` //! //! ## Features //! //! - Simple syntax (looks and like PyTorch) //! - CPU and Cuda backends (and M1 support) //! - Enable serverless (CPU) small and fast deployments //! - Model training //! - Distributed computing (NCCL). //! - Models out of the box (Llama, Whisper, Falcon, ...) //! //! ## FAQ //! //! - Why Candle? //! //! Candle stems from the need to reduce binary size in order to *enable serverless* //! possible by making the whole engine smaller than PyTorch very large library volume //! //! And simply *removing Python* from production workloads. //! Python can really add overhead in more complex workflows and the [GIL](https://www.backblaze.com/blog/the-python-gil-past-present-and-future/) is a notorious source of headaches. //! //! Rust is cool, and a lot of the HF ecosystem already has Rust crates [safetensors](https://github.com/huggingface/safetensors) and [tokenizers](https://github.com/huggingface/tokenizers) #[cfg(feature = "accelerate")] mod accelerate; pub mod backend; pub mod backprop; mod conv; mod convert; pub mod cpu; pub mod cpu_backend; #[cfg(feature = "cuda")] pub mod cuda_backend; #[cfg(feature = "cudnn")] pub mod cudnn; mod device; pub mod display; mod dtype; mod dummy_cuda_backend; mod dummy_metal_backend; pub mod error; mod indexer; pub mod layout; #[cfg(feature = "metal")] pub mod metal_backend; #[cfg(feature = "mkl")] mod mkl; pub mod npy; mod op; pub mod pickle; pub mod quantized; pub mod safetensors; pub mod scalar; pub mod shape; mod storage; mod strided_index; mod tensor; pub mod test_utils; pub mod utils; mod variable; pub use cpu_backend::CpuStorage; pub use device::{Device, DeviceLocation}; pub use dtype::{DType, FloatDType, IntDType, WithDType}; pub use error::{Error, Result}; pub use indexer::IndexOp; pub use layout::Layout; pub use op::{CustomOp1, CustomOp2, CustomOp3}; pub use shape::{Shape, D}; pub use storage::Storage; pub use strided_index::{StridedBlocks, StridedIndex}; pub use tensor::{Tensor, TensorId}; pub use variable::Var; #[cfg(feature = "cuda")] pub use cuda_backend::{CudaDevice, CudaStorage}; #[cfg(not(feature = "cuda"))] pub use dummy_cuda_backend::{CudaDevice, CudaStorage}; #[cfg(feature = "metal")] pub use metal_backend::{MetalDevice, MetalError, MetalStorage}; #[cfg(not(feature = "metal"))] pub use dummy_metal_backend::{MetalDevice, MetalError, MetalStorage}; #[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; pub trait ToUsize2 { fn to_usize2(self) -> (usize, usize); } impl ToUsize2 for usize { fn to_usize2(self) -> (usize, usize) { (self, self) } } impl ToUsize2 for (usize, usize) { fn to_usize2(self) -> (usize, usize) { self } } // A simple trait defining a module with forward method using a single argument. pub trait Module { fn forward(&self, xs: &Tensor) -> Result<Tensor>; } impl<T: Fn(&Tensor) -> Result<Tensor>> Module for T { fn forward(&self, xs: &Tensor) -> Result<Tensor> { self(xs) } } // A trait defining a module with forward method using a single tensor argument and a flag to // separate the training and evaluation behaviors. pub trait ModuleT { fn forward_t(&self, xs: &Tensor, train: bool) -> Result<Tensor>; } impl<M: Module> ModuleT for M { fn forward_t(&self, xs: &Tensor, _train: bool) -> Result<Tensor> { self.forward(xs) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/dummy_metal_backend.rs
#![allow(dead_code)] use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Error, Layout, Result, Shape}; #[derive(Debug, Clone)] pub struct MetalDevice; #[derive(Debug)] pub struct MetalStorage; #[derive(thiserror::Error, Debug)] pub enum MetalError { #[error("{0}")] Message(String), } impl From<String> for MetalError { fn from(e: String) -> Self { MetalError::Message(e) } } macro_rules! fail { () => { unimplemented!("metal support has not been enabled, add `metal` feature to enable.") }; } impl crate::backend::BackendStorage for MetalStorage { type Device = MetalDevice; fn try_clone(&self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn dtype(&self) -> DType { fail!() } fn device(&self) -> &Self::Device { fail!() } fn to_cpu_storage(&self) -> Result<CpuStorage> { Err(Error::NotCompiledWithMetalSupport) } fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn powf(&self, _: &Layout, _: f64) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn elu(&self, _: &Layout, _: f64) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv1d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConv1D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv_transpose1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv2d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConv2D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn conv_transpose2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn scatter_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn index_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn matmul( &self, _: &Self, _: (usize, usize, usize, usize), _: &Layout, _: &Layout, ) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()> { Err(Error::NotCompiledWithMetalSupport) } fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } } impl crate::backend::BackendDevice for MetalDevice { type Storage = MetalStorage; fn new(_: usize) -> Result<Self> { Err(Error::NotCompiledWithMetalSupport) } fn set_seed(&self, _: u64) -> Result<()> { Err(Error::NotCompiledWithMetalSupport) } fn location(&self) -> crate::DeviceLocation { fail!() } fn same_device(&self, _: &Self) -> bool { fail!() } fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn ones_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> { Err(Error::NotCompiledWithMetalSupport) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/test_utils.rs
use crate::{Result, Tensor}; #[macro_export] macro_rules! test_device { // TODO: Switch to generating the two last arguments automatically once concat_idents is // stable. https://github.com/rust-lang/rust/issues/29599 ($fn_name: ident, $test_cpu: ident, $test_cuda: ident, $test_metal: ident) => { #[test] fn $test_cpu() -> Result<()> { $fn_name(&Device::Cpu) } #[cfg(feature = "cuda")] #[test] fn $test_cuda() -> Result<()> { $fn_name(&Device::new_cuda(0)?) } #[cfg(feature = "metal")] #[test] fn $test_metal() -> Result<()> { $fn_name(&Device::new_metal(0)?) } }; } pub fn to_vec0_round(t: &Tensor, digits: i32) -> Result<f32> { let b = 10f32.powi(digits); let t = t.to_vec0::<f32>()?; Ok(f32::round(t * b) / b) } pub fn to_vec1_round(t: &Tensor, digits: i32) -> Result<Vec<f32>> { let b = 10f32.powi(digits); let t = t.to_vec1::<f32>()?; let t = t.iter().map(|t| f32::round(t * b) / b).collect(); Ok(t) } pub fn to_vec2_round(t: &Tensor, digits: i32) -> Result<Vec<Vec<f32>>> { let b = 10f32.powi(digits); let t = t.to_vec2::<f32>()?; let t = t .iter() .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) .collect(); Ok(t) } pub fn to_vec3_round(t: &Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> { let b = 10f32.powi(digits); let t = t.to_vec3::<f32>()?; let t = t .iter() .map(|t| { t.iter() .map(|t| t.iter().map(|t| f32::round(t * b) / b).collect()) .collect() }) .collect(); Ok(t) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/backprop.rs
use crate::op::{BinaryOp, Op, ReduceOp, UnaryOp}; use crate::{Error, Result, Tensor, TensorId}; use std::collections::HashMap; // arg has been reduced to node via reduce_dims, expand it back to arg. // This has to handle keepdims. fn broadcast_back(arg: &Tensor, node: &Tensor, reduced_dims: &[usize]) -> Result<Tensor> { if arg.rank() == node.rank() { // keepdim = true node.broadcast_as(arg.shape()) } else { // keepdim = false // first expand the reduced dims. node.reshape(reduced_dims)?.broadcast_as(arg.shape()) } } thread_local! { static CANDLE_GRAD_DO_NOT_DETACH: bool = { match std::env::var("CANDLE_GRAD_DO_NOT_DETACH") { Ok(s) => { !s.is_empty() && s != "0" }, Err(_) => false, } } } impl Tensor { /// Return all the nodes that lead to this value in a topologically sorted vec, the first /// elements having dependencies on the latter ones, e.g. the first element if any is the /// argument. /// This assumes that the op graph is a DAG. fn sorted_nodes(&self) -> Vec<&Tensor> { // The vec of sorted nodes is passed as an owned value rather than a mutable reference // to get around some lifetime limitations. fn walk<'a>( node: &'a Tensor, nodes: Vec<&'a Tensor>, already_seen: &mut HashMap<TensorId, bool>, ) -> (bool, Vec<&'a Tensor>) { if let Some(&tg) = already_seen.get(&node.id()) { return (tg, nodes); } let mut track_grad = false; let mut nodes = if node.is_variable() { // Do not call recursively on the "leaf" nodes. track_grad = true; nodes } else if node.dtype().is_int() { nodes } else if let Some(op) = node.op() { match op { Op::IndexAdd(t1, t2, t3, _) | Op::ScatterAdd(t1, t2, t3, _) | Op::CustomOp3(t1, t2, t3, _) | Op::WhereCond(t1, t2, t3) => { let (tg, nodes) = walk(t1, nodes, already_seen); track_grad |= tg; let (tg, nodes) = walk(t2, nodes, already_seen); track_grad |= tg; let (tg, nodes) = walk(t3, nodes, already_seen); track_grad |= tg; nodes } Op::Conv1D { arg: lhs, kernel: rhs, .. } | Op::ConvTranspose1D { arg: lhs, kernel: rhs, .. } | Op::Conv2D { arg: lhs, kernel: rhs, .. } | Op::ConvTranspose2D { arg: lhs, kernel: rhs, .. } | Op::CustomOp2(lhs, rhs, _) | Op::Binary(lhs, rhs, _) | Op::Gather(lhs, rhs, _) | Op::IndexSelect(lhs, rhs, _) | Op::Matmul(lhs, rhs) | Op::SliceScatter0(lhs, rhs, _) => { let (tg, nodes) = walk(lhs, nodes, already_seen); track_grad |= tg; let (tg, nodes) = walk(rhs, nodes, already_seen); track_grad |= tg; nodes } Op::Cat(args, _) => args.iter().fold(nodes, |nodes, arg| { let (tg, nodes) = walk(arg, nodes, already_seen); track_grad |= tg; nodes }), Op::Affine { arg, mul, .. } => { if *mul == 0. { nodes } else { let (tg, nodes) = walk(arg, nodes, already_seen); track_grad |= tg; nodes } } Op::Unary(_node, UnaryOp::Ceil) | Op::Unary(_node, UnaryOp::Floor) | Op::Unary(_node, UnaryOp::Round) => nodes, Op::Reshape(node) | Op::UpsampleNearest1D(node) | Op::UpsampleNearest2D(node) | Op::AvgPool2D { arg: node, .. } | Op::MaxPool2D { arg: node, .. } | Op::Copy(node) | Op::Broadcast(node) | Op::Cmp(node, _) | Op::Reduce(node, ReduceOp::Min | ReduceOp::Sum | ReduceOp::Max, _) | Op::ToDevice(node) | Op::Transpose(node, _, _) | Op::Permute(node, _) | Op::Narrow(node, _, _, _) | Op::Unary(node, _) | Op::Elu(node, _) | Op::Powf(node, _) | Op::CustomOp1(node, _) => { let (tg, nodes) = walk(node, nodes, already_seen); track_grad |= tg; nodes } Op::ToDType(node) => { if node.dtype().is_float() { let (tg, nodes) = walk(node, nodes, already_seen); track_grad |= tg; nodes } else { nodes } } Op::Reduce(_, ReduceOp::ArgMin | ReduceOp::ArgMax, _) => nodes, } } else { nodes }; already_seen.insert(node.id(), track_grad); if track_grad { nodes.push(node); } (track_grad, nodes) } let (_tg, mut nodes) = walk(self, vec![], &mut HashMap::new()); nodes.reverse(); nodes } pub fn backward(&self) -> Result<GradStore> { let sorted_nodes = self.sorted_nodes(); let mut grads = GradStore::new(); grads.insert(self, self.ones_like()?.contiguous()?); for node in sorted_nodes.iter() { if node.is_variable() { continue; } let grad = grads .remove(node) .expect("candle internal error - grad not populated"); // https://github.com/huggingface/candle/issues/1241 // Ideally, we would make these operations in place where possible to ensure that we // do not have to allocate too often. Here we just call `.detach` to avoid computing // the backprop graph of the backprop itself. This would be an issue for second order // derivatives but these are out of scope at the moment. let do_not_detach = CANDLE_GRAD_DO_NOT_DETACH.with(|b| *b); let grad = if do_not_detach { grad } else { grad.detach()? }; if let Some(op) = node.op() { match op { Op::Binary(lhs, rhs, BinaryOp::Add) => { let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&grad)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&grad)?; } Op::Binary(lhs, rhs, BinaryOp::Sub) => { let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&grad)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.sub(&grad)?; } Op::Binary(lhs, rhs, BinaryOp::Mul) => { let lhs_grad = grad.mul(rhs)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = grad.mul(lhs)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; } Op::Binary(lhs, rhs, BinaryOp::Div) => { let lhs_grad = grad.div(rhs)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = grad.mul(lhs)?.div(&rhs.sqr()?)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.sub(&rhs_grad)?; } Op::Binary(lhs, rhs, BinaryOp::Minimum) | Op::Binary(lhs, rhs, BinaryOp::Maximum) => { let mask_lhs = node.eq(lhs)?.to_dtype(grad.dtype())?; let mask_rhs = node.eq(rhs)?.to_dtype(grad.dtype())?; // If both masks are 1 one the same point, we want to scale the // gradient by 0.5 rather than 1. let lhs_grad = mask_lhs.mul(&grad)?.div(&(&mask_rhs + 1.)?)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = mask_rhs.mul(&grad)?.div(&(&mask_lhs + 1.)?)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; } Op::WhereCond(pred, t, f) => { let zeros = grad.zeros_like()?; let t_sum_grad = grads.or_insert(t)?; let t_grad = pred.where_cond(&grad, &zeros)?; *t_sum_grad = t_sum_grad.add(&t_grad)?; let f_sum_grad = grads.or_insert(f)?; let f_grad = pred.where_cond(&zeros, &grad)?; *f_sum_grad = f_sum_grad.add(&f_grad)?; } Op::Conv1D { arg, kernel, padding, stride, dilation, } => { // The output height for conv_transpose1d is: // (l_in - 1) * stride - 2 * padding + dilation * (k_size - 1) + out_padding + 1 let grad_l_in = grad.dim(2)?; let k_size = kernel.dim(2)?; let out_size = (grad_l_in - 1) * stride + dilation * (k_size - 1) + 1 - 2 * padding; let out_padding = arg.dim(2)? - out_size; let grad_arg = grad.conv_transpose1d( kernel, *padding, out_padding, *stride, *dilation, )?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; let grad_kernel = arg .transpose(0, 1)? .conv1d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)? .transpose(0, 1)?; let sum_grad = grads.or_insert(kernel)?; let (_, _, k0) = kernel.dims3()?; let (_, _, g_k0) = grad_kernel.dims3()?; let grad_kernel = if g_k0 != k0 { grad_kernel.narrow(2, 0, k0)? } else { grad_kernel }; *sum_grad = sum_grad.add(&grad_kernel)?; } Op::Conv2D { arg, kernel, padding, stride, dilation, } => { // The output height for conv_transpose2d is: // (i_h - 1) * stride - 2 * padding + dilation * (k_h - 1) + out_padding + 1 let grad_h = grad.dim(2)?; let k_h = kernel.dim(2)?; let out_size = (grad_h - 1) * stride + dilation * (k_h - 1) + 1 - 2 * padding; let out_padding = arg.dim(2)? - out_size; let grad_arg = grad.conv_transpose2d( kernel, *padding, out_padding, *stride, *dilation, )?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; let grad_kernel = arg .transpose(0, 1)? .conv2d(&grad.transpose(0, 1)?, *padding, *dilation, *stride, 1)? .transpose(0, 1)?; let sum_grad = grads.or_insert(kernel)?; let (_, _, k0, k1) = kernel.dims4()?; let (_, _, g_k0, g_k1) = grad_kernel.dims4()?; let grad_kernel = if g_k0 != k0 || g_k1 != k1 { grad_kernel.narrow(2, 0, k0)?.narrow(3, 0, k1)? } else { grad_kernel }; *sum_grad = sum_grad.add(&grad_kernel)?; } Op::ConvTranspose1D { .. } => Err(Error::BackwardNotSupported { op: "conv-transpose1d", })?, Op::ConvTranspose2D { .. } => Err(Error::BackwardNotSupported { op: "conv-transpose2d", })?, Op::AvgPool2D { arg, kernel_size, stride, } => { if kernel_size != stride { crate::bail!("backward not supported for avgpool2d if ksize {kernel_size:?} != stride {stride:?}") } let (_n, _c, h, w) = arg.dims4()?; let grad_arg = grad.upsample_nearest2d(h, w)?; let grad_arg = (grad_arg * (1f64 / (kernel_size.0 * kernel_size.1) as f64))?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; } Op::MaxPool2D { arg, kernel_size, stride, } => { if kernel_size != stride { crate::bail!("backward not supported for maxpool2d if ksize {kernel_size:?} != stride {stride:?}") } let (_n, _c, h, w) = arg.dims4()?; // For computing the max-pool gradient, we compute a mask where a 1 means // that the element is the maximum, then we apply this mask to the // upsampled gradient (taking into account that multiple max may exist so // we scale the gradient for this case). let node_upsampled = node.upsample_nearest2d(h, w)?; let mask = arg.eq(&node_upsampled)?.to_dtype(arg.dtype())?; let avg = mask.avg_pool2d_with_stride(*kernel_size, *stride)?; let grad_arg = ((grad * avg)?.upsample_nearest2d(h, w)? * mask)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad_arg)?; } Op::UpsampleNearest1D { .. } => Err(Error::BackwardNotSupported { op: "upsample-nearest1d", })?, Op::UpsampleNearest2D { .. } => Err(Error::BackwardNotSupported { op: "upsample-nearest2d", })?, Op::SliceScatter0(lhs, rhs, start_rhs) => { let rhs_sum_grad = grads.or_insert(rhs)?; let rhs_grad = grad.narrow(0, *start_rhs, rhs.dim(0)?)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; let lhs_sum_grad = grads.or_insert(lhs)?; let lhs_grad = grad.slice_scatter0(&rhs.zeros_like()?, *start_rhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)? } Op::Gather(arg, indexes, dim) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.scatter_add(indexes, &grad, *dim)?; } Op::ScatterAdd(init, indexes, src, dim) => { let init_sum_grad = grads.or_insert(init)?; *init_sum_grad = init_sum_grad.add(&grad)?; let src_grad = grad.gather(indexes, *dim)?; let src_sum_grad = grads.or_insert(src)?; *src_sum_grad = src_sum_grad.add(&src_grad)?; } Op::IndexAdd(init, indexes, src, dim) => { let init_sum_grad = grads.or_insert(init)?; *init_sum_grad = init_sum_grad.add(&grad)?; let src_grad = grad.index_select(indexes, *dim)?; let src_sum_grad = grads.or_insert(src)?; *src_sum_grad = src_sum_grad.add(&src_grad)?; } Op::IndexSelect(arg, indexes, dim) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.index_add(indexes, &grad, *dim)?; } Op::Matmul(lhs, rhs) => { // Skipping checks, the op went ok, we can skip // the matmul size checks for now. let lhs_grad = grad.matmul(&rhs.t()?)?; let lhs_sum_grad = grads.or_insert(lhs)?; *lhs_sum_grad = lhs_sum_grad.add(&lhs_grad)?; let rhs_grad = lhs.t()?.matmul(&grad)?; let rhs_sum_grad = grads.or_insert(rhs)?; *rhs_sum_grad = rhs_sum_grad.add(&rhs_grad)?; } Op::Cat(args, dim) => { let mut start_idx = 0; for arg in args { let len = arg.dims()[*dim]; let arg_grad = grad.narrow(*dim, start_idx, len)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)?; start_idx += len; } } Op::Broadcast(arg) => { let arg_dims = arg.dims(); let node_dims = node.dims(); // The number of dims that have been inserted on the left. let left_dims = node_dims.len() - arg_dims.len(); let mut sum_dims: Vec<usize> = (0..left_dims).collect(); for (dim, (node_dim, arg_dim)) in node_dims[left_dims..] .iter() .zip(arg_dims.iter()) .enumerate() { if node_dim != arg_dim { sum_dims.push(dim + left_dims) } } let mut arg_grad = grad.sum_keepdim(sum_dims.as_slice())?; for _i in 0..left_dims { arg_grad = arg_grad.squeeze(0)? } let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad.broadcast_as(sum_grad.dims())?)?; } Op::Reduce(arg, ReduceOp::Sum, reduced_dims) => { let grad = broadcast_back(arg, &grad, reduced_dims)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad)?; } Op::Cmp(_args, _) => {} Op::Reduce(arg, ReduceOp::Max, reduced_dims) => { let node = broadcast_back(arg, node, reduced_dims)?; let grad = broadcast_back(arg, &grad, reduced_dims)?; let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?; } Op::Reduce(arg, ReduceOp::Min, reduced_dims) => { let node = broadcast_back(arg, node, reduced_dims)?; let grad = broadcast_back(arg, &grad, reduced_dims)?; let grad = node.eq(arg)?.to_dtype(grad.dtype())?.mul(&grad)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad.broadcast_as(sum_grad.dims())?)?; } Op::ToDType(arg) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad.to_dtype(arg.dtype())?)? } Op::Copy(arg) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&grad)? } Op::Affine { arg, mul, .. } => { let arg_grad = grad.affine(*mul, 0.)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Unary(arg, UnaryOp::Log) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&(grad / arg)?)? } Op::Unary(arg, UnaryOp::Sin) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&(&grad * arg.cos())?)? } Op::Unary(arg, UnaryOp::Cos) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.sub(&(&grad * arg.sin())?)? } Op::Unary(arg, UnaryOp::Tanh) => { let sum_grad = grads.or_insert(arg)?; let minus_dtanh = (node.sqr()? - 1.)?; *sum_grad = sum_grad.sub(&(&grad * &minus_dtanh)?)? } Op::Unary(arg, UnaryOp::Abs) => { let sum_grad = grads.or_insert(arg)?; let ones = arg.ones_like()?; let abs_grad = arg.ge(&arg.zeros_like()?)?.where_cond(&ones, &ones.neg()?); *sum_grad = sum_grad.add(&(&grad * abs_grad)?)? } Op::Unary(arg, UnaryOp::Exp) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&(&grad * *node)?)? } Op::Unary(arg, UnaryOp::Neg) => { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.sub(&grad)? } Op::Unary(arg, UnaryOp::Recip) => { let sum_grad = grads.or_insert(arg)?; let grad = (grad / arg.sqr()?)?; *sum_grad = sum_grad.sub(&grad)? } &Op::Narrow(ref arg, dim, start_idx, len) => { let arg_dims = arg.dims(); let left_pad = if start_idx == 0 { None } else { let mut dims = arg_dims.to_vec(); dims[dim] = start_idx; Some(Tensor::zeros(dims, grad.dtype(), grad.device())?) }; let right_pad = arg_dims[dim] - start_idx - len; let right_pad = if right_pad == 0 { None } else { let mut dims = arg_dims.to_vec(); dims[dim] = right_pad; Some(Tensor::zeros(dims, grad.dtype(), grad.device())?) }; let arg_grad = match (left_pad, right_pad) { (None, None) => grad, (Some(l), None) => Tensor::cat(&[&l, &grad], dim)?, (None, Some(r)) => Tensor::cat(&[&grad, &r], dim)?, (Some(l), Some(r)) => Tensor::cat(&[&l, &grad, &r], dim)?, }; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Reduce(_, ReduceOp::ArgMin, _) => {} Op::Reduce(_, ReduceOp::ArgMax, _) => {} Op::Reshape(arg) => { let arg_grad = grad.reshape(arg.dims())?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Unary(_, UnaryOp::Ceil) => Err(Error::BackwardNotSupported { op: "ceil" })?, Op::Unary(_, UnaryOp::Floor) => { Err(Error::BackwardNotSupported { op: "floor" })? } Op::Unary(_, UnaryOp::Round) => { Err(Error::BackwardNotSupported { op: "round" })? } Op::Unary(arg, UnaryOp::Gelu) => { let sum_grad = grads.or_insert(arg)?; let cube = arg.powf(3.)?; let tanh = (0.0356774 * &cube + (0.797885 * arg)?)?.tanh()?; let gelu_grad = (((0.5 * &tanh)? + (0.0535161 * cube + (0.398942 * arg)?)? * (1. - tanh.powf(2.)?))? + 0.5)?; *sum_grad = sum_grad.add(&(&grad * gelu_grad)?)? } Op::Unary(arg, UnaryOp::Erf) => { let sum_grad = grads.or_insert(arg)?; // d/dx erf(x) = 2/sqrt(pi) * e^(-x^2) let erf_grad = (2. / std::f64::consts::PI.sqrt()) * (arg.sqr()?.neg()?).exp()?; *sum_grad = sum_grad.add(&(&grad * erf_grad)?)? } Op::Unary(arg, UnaryOp::GeluErf) => { let sum_grad = grads.or_insert(arg)?; // d/dx gelu_erf(x) = 0.5 + 0.398942 e^(-x^2/2) x + 0.5 erf(x/sqrt(2)) let neg_half_square = (arg.sqr()?.neg()? / 2.)?; let scaled_exp_arg = (0.398942 * neg_half_square.exp()? * arg)?; let arg_scaled_sqrt = (arg / 2f64.sqrt())?; let erf_scaled_sqrt = (0.5 * arg_scaled_sqrt.erf()?)?; let gelu_erf_grad = (0.5 + scaled_exp_arg + erf_scaled_sqrt)?; *sum_grad = sum_grad.add(&(&grad * gelu_erf_grad)?)?; } Op::Unary(arg, UnaryOp::Relu) => { let sum_grad = grads.or_insert(arg)?; let relu_grad = arg.ge(&arg.zeros_like()?)?.to_dtype(arg.dtype())?; *sum_grad = sum_grad.add(&(&grad * relu_grad)?)? } Op::Elu(arg, alpha) => { // d/dx elu(x) = 1 for x > 0, alpha * e^x for x <= 0 let sum_grad = grads.or_insert(arg)?; let zeros = arg.zeros_like()?; let positive_mask = arg.gt(&zeros)?.to_dtype(arg.dtype())?; let negative_mask = arg.le(&zeros)?.to_dtype(arg.dtype())?; let negative_exp_mask = ((negative_mask * arg.exp())? * *alpha)?; let combined_mask = (positive_mask + negative_exp_mask)?; *sum_grad = sum_grad.add(&(grad * combined_mask)?)? } Op::Powf(arg, e) => { let arg_grad = (&(grad * arg.powf(e - 1.)?)? * *e)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::CustomOp1(arg, c) => { if let Some(arg_grad) = c.bwd(arg, node, &grad)? { let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } } Op::CustomOp2(arg1, arg2, c) => { let (arg_grad1, arg_grad2) = c.bwd(arg1, arg2, node, &grad)?; if let Some(arg_grad1) = arg_grad1 { let sum_grad = grads.or_insert(arg1)?; *sum_grad = sum_grad.add(&arg_grad1)? } if let Some(arg_grad2) = arg_grad2 { let sum_grad = grads.or_insert(arg2)?; *sum_grad = sum_grad.add(&arg_grad2)? } } Op::CustomOp3(arg1, arg2, arg3, c) => { let (arg_grad1, arg_grad2, arg_grad3) = c.bwd(arg1, arg2, arg3, node, &grad)?; if let Some(arg_grad1) = arg_grad1 { let sum_grad = grads.or_insert(arg1)?; *sum_grad = sum_grad.add(&arg_grad1)? } if let Some(arg_grad2) = arg_grad2 { let sum_grad = grads.or_insert(arg2)?; *sum_grad = sum_grad.add(&arg_grad2)? } if let Some(arg_grad3) = arg_grad3 { let sum_grad = grads.or_insert(arg3)?; *sum_grad = sum_grad.add(&arg_grad3)? } } Op::Unary(arg, UnaryOp::Sqr) => { let arg_grad = arg.mul(&grad)?.affine(2., 0.)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Unary(arg, UnaryOp::Sqrt) => { let arg_grad = grad.div(node)?.affine(0.5, 0.)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::ToDevice(arg) => { let sum_grad = grads.or_insert(arg)?; let arg_grad = grad.to_device(sum_grad.device())?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Transpose(arg, dim1, dim2) => { let arg_grad = grad.transpose(*dim1, *dim2)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } Op::Permute(arg, dims) => { let mut inv_dims = vec![0; dims.len()]; for (i, &dim_idx) in dims.iter().enumerate() { inv_dims[dim_idx] = i } let arg_grad = grad.permute(inv_dims)?; let sum_grad = grads.or_insert(arg)?; *sum_grad = sum_grad.add(&arg_grad)? } }; } } Ok(grads) } } #[derive(Debug)] pub struct GradStore(HashMap<TensorId, Tensor>); impl GradStore { fn new() -> Self { GradStore(HashMap::new()) } pub fn get_id(&self, id: TensorId) -> Option<&Tensor> { self.0.get(&id) } pub fn get(&self, tensor: &Tensor) -> Option<&Tensor> { self.0.get(&tensor.id()) } pub fn remove(&mut self, tensor: &Tensor) -> Option<Tensor> { self.0.remove(&tensor.id()) } pub fn insert(&mut self, tensor: &Tensor, grad: Tensor) -> Option<Tensor> { self.0.insert(tensor.id(), grad) } fn or_insert(&mut self, tensor: &Tensor) -> Result<&mut Tensor> { use std::collections::hash_map::Entry; let grad = match self.0.entry(tensor.id()) { Entry::Occupied(entry) => entry.into_mut(), Entry::Vacant(entry) => { let grad = tensor.zeros_like()?; entry.insert(grad) } }; Ok(grad) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/cudnn.rs
use crate::WithDType; use cudarc; use cudarc::cudnn::safe::{Conv2dForward, Cudnn}; use cudarc::driver::{CudaSlice, CudaView, DeviceRepr, ValidAsZeroBits}; use std::cell::RefCell; use std::collections::HashMap; use std::sync::Arc; // The cudnn handles are stored per thread here rather than on the CudaDevice as they are neither // send nor sync. thread_local! { static CUDNN: RefCell<HashMap<crate::cuda_backend::DeviceId, Arc<Cudnn>>> = HashMap::new().into(); } impl From<cudarc::cudnn::CudnnError> for crate::Error { fn from(err: cudarc::cudnn::CudnnError) -> Self { crate::Error::wrap(err) } } impl From<cudarc::driver::DriverError> for crate::Error { fn from(err: cudarc::driver::DriverError) -> Self { crate::Error::wrap(err) } } pub(crate) fn launch_conv2d< T: DeviceRepr + WithDType + ValidAsZeroBits + cudarc::cudnn::CudnnDataType, >( src: &CudaView<T>, src_l: &crate::Layout, filter: &CudaView<T>, dst: &mut CudaSlice<T>, params: &crate::conv::ParamsConv2D, dev: &crate::cuda_backend::CudaDevice, ) -> crate::Result<()> { use crate::conv::CudnnFwdAlgo as CandleAlgo; use cudarc::cudnn::sys::cudnnConvolutionFwdAlgo_t as A; let device_id = dev.id(); let cudnn = CUDNN.with(|cudnn| { if let Some(cudnn) = cudnn.borrow().get(&device_id) { return Ok(cudnn.clone()); } let c = Cudnn::new(dev.cuda_device()); if let Ok(c) = &c { cudnn.borrow_mut().insert(device_id, c.clone()); } c })?; let conv = cudnn.create_conv2d::<T>( /* pad */ [params.padding as i32, params.padding as i32], /* stride */ [params.stride as i32, params.stride as i32], /* dilation */ [params.dilation as i32, params.dilation as i32], cudarc::cudnn::sys::cudnnConvolutionMode_t::CUDNN_CROSS_CORRELATION, )?; let x_shape = [ params.b_size as i32, params.c_in as i32, params.i_h as i32, params.i_w as i32, ]; // Note that `src` already starts at the proper offset. let x = if src_l.is_contiguous() { cudnn.create_4d_tensor( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, x_shape, )? } else { let s = src_l.stride(); cudnn.create_4d_tensor_ex( x_shape, [s[0] as i32, s[1] as i32, s[2] as i32, s[3] as i32], )? }; let w = cudnn.create_4d_filter( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [ params.c_out as i32, params.c_in as i32, params.k_h as i32, params.k_w as i32, ], )?; let (w_out, h_out) = (params.out_w() as i32, params.out_h() as i32); let y = cudnn.create_4d_tensor( cudarc::cudnn::sys::cudnnTensorFormat_t::CUDNN_TENSOR_NCHW, [params.b_size as i32, params.c_out as i32, h_out, w_out], )?; let conv2d = Conv2dForward { conv: &conv, x: &x, w: &w, y: &y, }; let alg = match params.cudnn_fwd_algo { None => conv2d.pick_algorithm()?, Some(CandleAlgo::ImplicitGemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM, Some(CandleAlgo::ImplicitPrecompGemm) => { A::CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM } Some(CandleAlgo::Gemm) => A::CUDNN_CONVOLUTION_FWD_ALGO_GEMM, Some(CandleAlgo::Direct) => A::CUDNN_CONVOLUTION_FWD_ALGO_DIRECT, Some(CandleAlgo::Fft) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT, Some(CandleAlgo::FftTiling) => A::CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING, Some(CandleAlgo::Winograd) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD, Some(CandleAlgo::WinogradNonFused) => A::CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED, Some(CandleAlgo::Count) => A::CUDNN_CONVOLUTION_FWD_ALGO_COUNT, }; let workspace_size = conv2d.get_workspace_size(alg)?; let mut workspace = dev.cuda_device().alloc_zeros::<u8>(workspace_size)?; unsafe { conv2d.launch::<CudaSlice<u8>, _, _, _>( alg, Some(&mut workspace), (T::one(), T::zero()), src, filter, dst, )?; } Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/npy.rs
//! Numpy support for tensors. //! //! The spec for the npy format can be found in //! [npy-format](https://docs.scipy.org/doc/numpy-1.14.2/neps/npy-format.html). //! The functions from this module can be used to read tensors from npy/npz files //! or write tensors to these files. A npy file contains a single tensor (unnamed) //! whereas a npz file can contain multiple named tensors. npz files are also compressed. //! //! These two formats are easy to use in Python using the numpy library. //! //! ```python //! import numpy as np //! x = np.arange(10) //! //! # Write a npy file. //! np.save("test.npy", x) //! //! # Read a value from the npy file. //! x = np.load("test.npy") //! //! # Write multiple values to a npz file. //! values = { "x": x, "x_plus_one": x + 1 } //! np.savez("test.npz", **values) //! //! # Load multiple values from a npz file. //! values = np.loadz("test.npz") //! ``` use crate::{DType, Device, Error, Result, Shape, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use half::{bf16, f16, slice::HalfFloatSliceExt}; use std::collections::HashMap; use std::fs::File; use std::io::{BufReader, Read, Write}; use std::path::Path; const NPY_MAGIC_STRING: &[u8] = b"\x93NUMPY"; const NPY_SUFFIX: &str = ".npy"; fn read_header<R: Read>(reader: &mut R) -> Result<String> { let mut magic_string = vec![0u8; NPY_MAGIC_STRING.len()]; reader.read_exact(&mut magic_string)?; if magic_string != NPY_MAGIC_STRING { return Err(Error::Npy("magic string mismatch".to_string())); } let mut version = [0u8; 2]; reader.read_exact(&mut version)?; let header_len_len = match version[0] { 1 => 2, 2 => 4, otherwise => return Err(Error::Npy(format!("unsupported version {otherwise}"))), }; let mut header_len = vec![0u8; header_len_len]; reader.read_exact(&mut header_len)?; let header_len = header_len .iter() .rev() .fold(0_usize, |acc, &v| 256 * acc + v as usize); let mut header = vec![0u8; header_len]; reader.read_exact(&mut header)?; Ok(String::from_utf8_lossy(&header).to_string()) } #[derive(Debug, PartialEq)] struct Header { descr: DType, fortran_order: bool, shape: Vec<usize>, } impl Header { fn shape(&self) -> Shape { Shape::from(self.shape.as_slice()) } fn to_string(&self) -> Result<String> { let fortran_order = if self.fortran_order { "True" } else { "False" }; let mut shape = self .shape .iter() .map(|x| x.to_string()) .collect::<Vec<_>>() .join(","); let descr = match self.descr { DType::BF16 => Err(Error::Npy("bf16 is not supported".into()))?, DType::F16 => "f2", DType::F32 => "f4", DType::F64 => "f8", DType::I64 => "i8", DType::U32 => "u4", DType::U8 => "u1", }; if !shape.is_empty() { shape.push(',') } Ok(format!( "{{'descr': '<{descr}', 'fortran_order': {fortran_order}, 'shape': ({shape}), }}" )) } // Hacky parser for the npy header, a typical example would be: // {'descr': '<f8', 'fortran_order': False, 'shape': (128,), } fn parse(header: &str) -> Result<Header> { let header = header.trim_matches(|c: char| c == '{' || c == '}' || c == ',' || c.is_whitespace()); let mut parts: Vec<String> = vec![]; let mut start_index = 0usize; let mut cnt_parenthesis = 0i64; for (index, c) in header.chars().enumerate() { match c { '(' => cnt_parenthesis += 1, ')' => cnt_parenthesis -= 1, ',' => { if cnt_parenthesis == 0 { parts.push(header[start_index..index].to_owned()); start_index = index + 1; } } _ => {} } } parts.push(header[start_index..].to_owned()); let mut part_map: HashMap<String, String> = HashMap::new(); for part in parts.iter() { let part = part.trim(); if !part.is_empty() { match part.split(':').collect::<Vec<_>>().as_slice() { [key, value] => { let key = key.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let value = value.trim_matches(|c: char| c == '\'' || c.is_whitespace()); let _ = part_map.insert(key.to_owned(), value.to_owned()); } _ => return Err(Error::Npy(format!("unable to parse header {header}"))), } } } let fortran_order = match part_map.get("fortran_order") { None => false, Some(fortran_order) => match fortran_order.as_ref() { "False" => false, "True" => true, _ => return Err(Error::Npy(format!("unknown fortran_order {fortran_order}"))), }, }; let descr = match part_map.get("descr") { None => return Err(Error::Npy("no descr in header".to_string())), Some(descr) => { if descr.is_empty() { return Err(Error::Npy("empty descr".to_string())); } if descr.starts_with('>') { return Err(Error::Npy(format!("little-endian descr {descr}"))); } // the only supported types in tensor are: // float64, float32, float16, // complex64, complex128, // int64, int32, int16, int8, // uint8, and bool. match descr.trim_matches(|c: char| c == '=' || c == '<' || c == '|') { "e" | "f2" => DType::F16, "f" | "f4" => DType::F32, "d" | "f8" => DType::F64, // "i" | "i4" => DType::S32, "q" | "i8" => DType::I64, // "h" | "i2" => DType::S16, // "b" | "i1" => DType::S8, "B" | "u1" => DType::U8, "I" | "u4" => DType::U32, "?" | "b1" => DType::U8, // "F" | "F4" => DType::C64, // "D" | "F8" => DType::C128, descr => return Err(Error::Npy(format!("unrecognized descr {descr}"))), } } }; let shape = match part_map.get("shape") { None => return Err(Error::Npy("no shape in header".to_string())), Some(shape) => { let shape = shape.trim_matches(|c: char| c == '(' || c == ')' || c == ','); if shape.is_empty() { vec![] } else { shape .split(',') .map(|v| v.trim().parse::<usize>()) .collect::<std::result::Result<Vec<_>, _>>()? } } }; Ok(Header { descr, fortran_order, shape, }) } } impl Tensor { // TODO: Add the possibility to read directly to a device? pub(crate) fn from_reader<R: std::io::Read>( shape: Shape, dtype: DType, reader: &mut R, ) -> Result<Self> { let elem_count = shape.elem_count(); match dtype { DType::BF16 => { let mut data_t = vec![bf16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F16 => { let mut data_t = vec![f16::ZERO; elem_count]; reader.read_u16_into::<LittleEndian>(data_t.reinterpret_cast_mut())?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F32 => { let mut data_t = vec![0f32; elem_count]; reader.read_f32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::F64 => { let mut data_t = vec![0f64; elem_count]; reader.read_f64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U8 => { let mut data_t = vec![0u8; elem_count]; reader.read_exact(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::U32 => { let mut data_t = vec![0u32; elem_count]; reader.read_u32_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } DType::I64 => { let mut data_t = vec![0i64; elem_count]; reader.read_i64_into::<LittleEndian>(&mut data_t)?; Tensor::from_vec(data_t, shape, &Device::Cpu) } } } /// Reads a npy file and return the stored multi-dimensional array as a tensor. pub fn read_npy<T: AsRef<Path>>(path: T) -> Result<Self> { let mut reader = File::open(path.as_ref())?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } Self::from_reader(header.shape(), header.descr, &mut reader) } /// Reads a npz file and returns the stored multi-dimensional arrays together with their names. pub fn read_npz<T: AsRef<Path>>(path: T) -> Result<Vec<(String, Self)>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for i in 0..zip.len() { let mut reader = zip.by_index(i)?; let name = { let name = reader.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push((name, s)) } Ok(result) } /// Reads a npz file and returns the stored multi-dimensional arrays for some specified names. pub fn read_npz_by_name<T: AsRef<Path>>(path: T, names: &[&str]) -> Result<Vec<Self>> { let zip_reader = BufReader::new(File::open(path.as_ref())?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut result = vec![]; for name in names.iter() { let mut reader = match zip.by_name(&format!("{name}{NPY_SUFFIX}")) { Ok(reader) => reader, Err(_) => Err(Error::Npy(format!( "no array for {name} in {:?}", path.as_ref() )))?, }; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let s = Self::from_reader(header.shape(), header.descr, &mut reader)?; result.push(s) } Ok(result) } fn write<T: Write>(&self, f: &mut T) -> Result<()> { f.write_all(NPY_MAGIC_STRING)?; f.write_all(&[1u8, 0u8])?; let header = Header { descr: self.dtype(), fortran_order: false, shape: self.dims().to_vec(), }; let mut header = header.to_string()?; let pad = 16 - (NPY_MAGIC_STRING.len() + 5 + header.len()) % 16; for _ in 0..pad % 16 { header.push(' ') } header.push('\n'); f.write_all(&[(header.len() % 256) as u8, (header.len() / 256) as u8])?; f.write_all(header.as_bytes())?; self.write_bytes(f) } /// Writes a multi-dimensional array in the npy format. pub fn write_npy<T: AsRef<Path>>(&self, path: T) -> Result<()> { let mut f = File::create(path.as_ref())?; self.write(&mut f) } /// Writes multiple multi-dimensional arrays using the npz format. pub fn write_npz<S: AsRef<str>, T: AsRef<Tensor>, P: AsRef<Path>>( ts: &[(S, T)], path: P, ) -> Result<()> { let mut zip = zip::ZipWriter::new(File::create(path.as_ref())?); let options = zip::write::FileOptions::default().compression_method(zip::CompressionMethod::Stored); for (name, tensor) in ts.iter() { zip.start_file(format!("{}.npy", name.as_ref()), options)?; tensor.as_ref().write(&mut zip)? } Ok(()) } } /// Lazy tensor loader. pub struct NpzTensors { index_per_name: HashMap<String, usize>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl NpzTensors { pub fn new<T: AsRef<Path>>(path: T) -> Result<Self> { let path = path.as_ref().to_owned(); let zip_reader = BufReader::new(File::open(&path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut index_per_name = HashMap::new(); for i in 0..zip.len() { let file = zip.by_index(i)?; let name = { let name = file.name(); name.strip_suffix(NPY_SUFFIX).unwrap_or(name).to_owned() }; index_per_name.insert(name, i); } Ok(Self { index_per_name, path, }) } pub fn names(&self) -> Vec<&String> { self.index_per_name.keys().collect() } /// This only returns the shape and dtype for a named tensor. Compared to `get`, this avoids /// reading the whole tensor data. pub fn get_shape_and_dtype(&self, name: &str) -> Result<(Shape, DType)> { let index = match self.index_per_name.get(name) { None => crate::bail!("cannot find tensor {name}"), Some(index) => *index, }; let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; Ok((header.shape(), header.descr)) } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { let index = match self.index_per_name.get(name) { None => return Ok(None), Some(index) => *index, }; // We hope that the file has not changed since first reading it. let zip_reader = BufReader::new(File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_index(index)?; let header = read_header(&mut reader)?; let header = Header::parse(&header)?; if header.fortran_order { return Err(Error::Npy("fortran order not supported".to_string())); } let tensor = Tensor::from_reader(header.shape(), header.descr, &mut reader)?; Ok(Some(tensor)) } } #[cfg(test)] mod tests { use super::Header; #[test] fn parse() { let h = "{'descr': '<f8', 'fortran_order': False, 'shape': (128,), }"; assert_eq!( Header::parse(h).unwrap(), Header { descr: crate::DType::F64, fortran_order: false, shape: vec![128] } ); let h = "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128), }"; let h = Header::parse(h).unwrap(); assert_eq!( h, Header { descr: crate::DType::F32, fortran_order: true, shape: vec![256, 1, 128] } ); assert_eq!( h.to_string().unwrap(), "{'descr': '<f4', 'fortran_order': True, 'shape': (256,1,128,), }" ); let h = Header { descr: crate::DType::U32, fortran_order: false, shape: vec![], }; assert_eq!( h.to_string().unwrap(), "{'descr': '<u4', 'fortran_order': False, 'shape': (), }" ); } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/safetensors.rs
use crate::{DType, Device, Error, Result, Tensor, WithDType}; use safetensors::tensor as st; use safetensors::tensor::SafeTensors; use std::borrow::Cow; use std::collections::HashMap; use std::path::Path; impl From<DType> for st::Dtype { fn from(value: DType) -> Self { match value { DType::U8 => st::Dtype::U8, DType::U32 => st::Dtype::U32, DType::I64 => st::Dtype::I64, DType::BF16 => st::Dtype::BF16, DType::F16 => st::Dtype::F16, DType::F32 => st::Dtype::F32, DType::F64 => st::Dtype::F64, } } } impl TryFrom<st::Dtype> for DType { type Error = Error; fn try_from(value: st::Dtype) -> Result<Self> { match value { st::Dtype::U8 => Ok(DType::U8), st::Dtype::U32 => Ok(DType::U32), st::Dtype::I64 => Ok(DType::I64), st::Dtype::BF16 => Ok(DType::BF16), st::Dtype::F16 => Ok(DType::F16), st::Dtype::F32 => Ok(DType::F32), st::Dtype::F64 => Ok(DType::F64), dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)), } } } impl st::View for Tensor { fn dtype(&self) -> st::Dtype { self.dtype().into() } fn shape(&self) -> &[usize] { self.shape().dims() } fn data(&self) -> Cow<[u8]> { // This copies data from GPU to CPU. // TODO: Avoid the unwrap here. Cow::Owned(convert_back(self).unwrap()) } fn data_len(&self) -> usize { let n: usize = self.shape().elem_count(); let bytes_per_element = self.dtype().size_in_bytes(); n * bytes_per_element } } impl st::View for &Tensor { fn dtype(&self) -> st::Dtype { (*self).dtype().into() } fn shape(&self) -> &[usize] { self.dims() } fn data(&self) -> Cow<[u8]> { // This copies data from GPU to CPU. // TODO: Avoid the unwrap here. Cow::Owned(convert_back(self).unwrap()) } fn data_len(&self) -> usize { let n: usize = self.dims().iter().product(); let bytes_per_element = (*self).dtype().size_in_bytes(); n * bytes_per_element } } impl Tensor { pub fn save_safetensors<P: AsRef<Path>>(&self, name: &str, filename: P) -> Result<()> { let data = [(name, self.clone())]; Ok(st::serialize_to_file(data, &None, filename.as_ref())?) } } fn convert_slice<T: WithDType>(data: &[u8], shape: &[usize], device: &Device) -> Result<Tensor> { let size_in_bytes = T::DTYPE.size_in_bytes(); let elem_count = data.len() / size_in_bytes; if (data.as_ptr() as usize) % size_in_bytes == 0 { // SAFETY This is safe because we just checked that this // was correctly aligned. let data: &[T] = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) }; Tensor::from_slice(data, shape, device) } else { // XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast // Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access let mut c: Vec<T> = Vec::with_capacity(elem_count); // SAFETY: We just created c, so the allocated memory is necessarily // contiguous and non overlapping with the view's data. // We're downgrading the `c` pointer from T to u8, which removes alignment // constraints. unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len()); c.set_len(elem_count) } Tensor::from_slice(&c, shape, device) } } fn convert_slice_with_cast<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>( data: &[u8], shape: &[usize], device: &Device, conv: F, ) -> Result<Tensor> { let size_in_bytes = std::mem::size_of::<T>(); let elem_count = data.len() / size_in_bytes; if (data.as_ptr() as usize) % size_in_bytes == 0 { // SAFETY This is safe because we just checked that this // was correctly aligned. let data: &[T] = unsafe { std::slice::from_raw_parts(data.as_ptr() as *const T, elem_count) }; let data = data.iter().map(|t| conv(*t)).collect::<Result<Vec<_>>>()?; Tensor::from_vec(data, shape, device) } else { // XXX: We need to specify `T` here, otherwise the compiler will infer u8 because of the following cast // Making this vector too small to fit a full f16/f32/f64 weights, resulting in out-of-bounds access let mut c: Vec<T> = Vec::with_capacity(elem_count); // SAFETY: We just created c, so the allocated memory is necessarily // contiguous and non overlapping with the view's data. // We're downgrading the `c` pointer from T to u8, which removes alignment // constraints. unsafe { std::ptr::copy_nonoverlapping(data.as_ptr(), c.as_mut_ptr() as *mut u8, data.len()); c.set_len(elem_count) } let c = c.into_iter().map(conv).collect::<Result<Vec<_>>>()?; Tensor::from_vec(c, shape, device) } } fn convert_with_cast_<T: Sized + Copy, U: WithDType, F: Fn(T) -> Result<U>>( view: &st::TensorView<'_>, device: &Device, conv: F, ) -> Result<Tensor> { convert_slice_with_cast::<T, U, F>(view.data(), view.shape(), device, conv) } fn convert_<T: WithDType>(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> { convert_slice::<T>(view.data(), view.shape(), device) } fn convert_back_<T: WithDType>(mut vs: Vec<T>) -> Vec<u8> { let size_in_bytes = T::DTYPE.size_in_bytes(); let length = vs.len() * size_in_bytes; let capacity = vs.capacity() * size_in_bytes; let ptr = vs.as_mut_ptr() as *mut u8; // Don't run the destructor for Vec<T> std::mem::forget(vs); // SAFETY: // // Every T is larger than u8, so there is no issue regarding alignment. // This re-interpret the Vec<T> as a Vec<u8>. unsafe { Vec::from_raw_parts(ptr, length, capacity) } } pub trait Load { fn load(&self, device: &Device) -> Result<Tensor>; } impl<'a> Load for st::TensorView<'a> { fn load(&self, device: &Device) -> Result<Tensor> { convert(self, device) } } impl Tensor { pub fn from_raw_buffer( data: &[u8], dtype: DType, shape: &[usize], device: &Device, ) -> Result<Self> { match dtype { DType::U8 => convert_slice::<u8>(data, shape, device), DType::U32 => convert_slice::<u32>(data, shape, device), DType::I64 => convert_slice::<i64>(data, shape, device), DType::BF16 => convert_slice::<half::bf16>(data, shape, device), DType::F16 => convert_slice::<half::f16>(data, shape, device), DType::F32 => convert_slice::<f32>(data, shape, device), DType::F64 => convert_slice::<f64>(data, shape, device), } } } fn convert(view: &st::TensorView<'_>, device: &Device) -> Result<Tensor> { match view.dtype() { st::Dtype::U8 => convert_::<u8>(view, device), st::Dtype::U16 => { let conv = |x| Ok(u32::from(x)); convert_with_cast_::<u16, u32, _>(view, device, conv) } st::Dtype::U32 => convert_::<u32>(view, device), st::Dtype::I32 => { let conv = |x| Ok(i64::from(x)); convert_with_cast_::<i32, i64, _>(view, device, conv) } st::Dtype::I64 => convert_::<i64>(view, device), st::Dtype::BF16 => convert_::<half::bf16>(view, device), st::Dtype::F16 => convert_::<half::f16>(view, device), st::Dtype::F32 => convert_::<f32>(view, device), st::Dtype::F64 => convert_::<f64>(view, device), dtype => Err(Error::UnsupportedSafeTensorDtype(dtype)), } } fn convert_back(tensor: &Tensor) -> Result<Vec<u8>> { // TODO: This makes an unnecessary copy when the tensor is on the cpu. let tensor = tensor.flatten_all()?; match tensor.dtype() { DType::U8 => Ok(convert_back_::<u8>(tensor.to_vec1()?)), DType::U32 => Ok(convert_back_::<u32>(tensor.to_vec1()?)), DType::I64 => Ok(convert_back_::<i64>(tensor.to_vec1()?)), DType::F16 => Ok(convert_back_::<half::f16>(tensor.to_vec1()?)), DType::BF16 => Ok(convert_back_::<half::bf16>(tensor.to_vec1()?)), DType::F32 => Ok(convert_back_::<f32>(tensor.to_vec1()?)), DType::F64 => Ok(convert_back_::<f64>(tensor.to_vec1()?)), } } pub fn load<P: AsRef<Path>>(filename: P, device: &Device) -> Result<HashMap<String, Tensor>> { let data = std::fs::read(filename.as_ref())?; load_buffer(&data[..], device) } pub fn load_buffer(data: &[u8], device: &Device) -> Result<HashMap<String, Tensor>> { let st = safetensors::SafeTensors::deserialize(data)?; st.tensors() .into_iter() .map(|(name, view)| Ok((name, view.load(device)?))) .collect() } pub fn save<K: AsRef<str> + Ord + std::fmt::Display, P: AsRef<Path>>( tensors: &HashMap<K, Tensor>, filename: P, ) -> Result<()> { Ok(st::serialize_to_file(tensors, &None, filename.as_ref())?) } #[derive(yoke::Yokeable)] struct SafeTensors_<'a>(SafeTensors<'a>); pub struct MmapedSafetensors { safetensors: Vec<yoke::Yoke<SafeTensors_<'static>, memmap2::Mmap>>, routing: Option<HashMap<String, usize>>, } impl MmapedSafetensors { /// Creates a wrapper around a memory mapped file and deserialize the safetensors header. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let file = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; let safetensors = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart( file, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data) .map_err(|e| Error::from(e).with_path(p))?; Ok::<_, Error>(SafeTensors_(st)) }, )?; Ok(Self { safetensors: vec![safetensors], routing: None, }) } /// Creates a wrapper around multiple memory mapped file and deserialize the safetensors headers. /// /// If a tensor name appears in multiple files, the last entry is returned. /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn multi<P: AsRef<Path>>(paths: &[P]) -> Result<Self> { let mut routing = HashMap::new(); let mut safetensors = vec![]; for (index, p) in paths.iter().enumerate() { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let file = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; let data = yoke::Yoke::<SafeTensors_<'static>, memmap2::Mmap>::try_attach_to_cart( file, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data) .map_err(|e| Error::from(e).with_path(p))?; Ok::<_, Error>(SafeTensors_(st)) }, )?; for k in data.get().0.names() { routing.insert(k.to_string(), index); } safetensors.push(data) } Ok(Self { safetensors, routing: Some(routing), }) } pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> { self.get(name)?.load(dev) } pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> { let mut tensors = vec![]; for safetensors in self.safetensors.iter() { tensors.push(safetensors.get().0.tensors()) } tensors.into_iter().flatten().collect() } pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> { let index = match &self.routing { None => 0, Some(routing) => { let index = routing.get(name).ok_or_else(|| { Error::CannotFindTensor { path: name.to_string(), } .bt() })?; *index } }; Ok(self.safetensors[index].get().0.tensor(name)?) } } pub struct BufferedSafetensors { safetensors: yoke::Yoke<SafeTensors_<'static>, Vec<u8>>, } impl BufferedSafetensors { /// Creates a wrapper around a binary buffer and deserialize the safetensors header. pub fn new(buffer: Vec<u8>) -> Result<Self> { let safetensors = yoke::Yoke::<SafeTensors_<'static>, Vec<u8>>::try_attach_to_cart( buffer, |data: &[u8]| { let st = safetensors::SafeTensors::deserialize(data)?; Ok::<_, Error>(SafeTensors_(st)) }, )?; Ok(Self { safetensors }) } pub fn load(&self, name: &str, dev: &Device) -> Result<Tensor> { self.get(name)?.load(dev) } pub fn tensors(&self) -> Vec<(String, st::TensorView<'_>)> { self.safetensors.get().0.tensors() } pub fn get(&self, name: &str) -> Result<st::TensorView<'_>> { Ok(self.safetensors.get().0.tensor(name)?) } } pub struct MmapedFile { path: std::path::PathBuf, inner: memmap2::Mmap, } impl MmapedFile { /// Creates a wrapper around a memory mapped file from which you can retrieve /// tensors using [`MmapedFile::deserialize`] /// /// # Safety /// /// The unsafe is inherited from [`memmap2::MmapOptions`]. pub unsafe fn new<P: AsRef<Path>>(p: P) -> Result<Self> { let p = p.as_ref(); let file = std::fs::File::open(p).map_err(|e| Error::from(e).with_path(p))?; let inner = memmap2::MmapOptions::new() .map(&file) .map_err(|e| Error::from(e).with_path(p))?; Ok(Self { inner, path: p.to_path_buf(), }) } pub fn deserialize(&self) -> Result<SafeTensors<'_>> { let st = safetensors::SafeTensors::deserialize(&self.inner) .map_err(|e| Error::from(e).with_path(&self.path))?; Ok(st) } } #[cfg(test)] mod tests { use super::*; use std::collections::HashMap; #[test] fn save_single_tensor() { let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap(); t.save_safetensors("t", "t.safetensors").unwrap(); let bytes = std::fs::read("t.safetensors").unwrap(); assert_eq!(bytes, b"@\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); std::fs::remove_file("t.safetensors").unwrap(); } #[test] fn save_load_multiple_tensors() { let t = Tensor::zeros((2, 2), DType::F32, &Device::Cpu).unwrap(); let u = Tensor::zeros((1, 2), DType::F32, &Device::Cpu).unwrap(); let map: HashMap<_, _> = [("t", t), ("u", u)].into_iter().collect(); save(&map, "multi.safetensors").unwrap(); let weights = load("multi.safetensors", &Device::Cpu).unwrap(); assert_eq!(weights.get("t").unwrap().dims(), &[2, 2]); assert_eq!(weights.get("u").unwrap().dims(), &[1, 2]); let bytes = std::fs::read("multi.safetensors").unwrap(); assert_eq!(bytes, b"x\0\0\0\0\0\0\0{\"t\":{\"dtype\":\"F32\",\"shape\":[2,2],\"data_offsets\":[0,16]},\"u\":{\"dtype\":\"F32\",\"shape\":[1,2],\"data_offsets\":[16,24]}} \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0"); std::fs::remove_file("multi.safetensors").unwrap(); } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/conv.rs
use crate::{op::BackpropOp, op::Op, Error, Result, Tensor}; #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv1D { pub(crate) b_size: usize, // Maybe we should have a version without l_in as this bit depends on the input and not only on // the weights. pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConv1D { pub(crate) fn l_out(&self) -> usize { (self.l_in + 2 * self.padding - self.dilation * (self.k_size - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose1D { pub(crate) b_size: usize, pub(crate) l_in: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) k_size: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose1D { pub(crate) fn l_out(&self) -> usize { (self.l_in - 1) * self.stride - 2 * self.padding + self.dilation * (self.k_size - 1) + self.output_padding + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { let l_out = self.l_out(); vec![self.b_size, self.c_out, l_out] } } #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub enum CudnnFwdAlgo { ImplicitGemm, ImplicitPrecompGemm, Gemm, Direct, Fft, FftTiling, Winograd, WinogradNonFused, Count, } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConv2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, pub cudnn_fwd_algo: Option<CudnnFwdAlgo>, } impl ParamsConv2D { pub(crate) fn out_h(&self) -> usize { (self.i_h + 2 * self.padding - self.dilation * (self.k_h - 1) - 1) / self.stride + 1 } pub(crate) fn out_w(&self) -> usize { (self.i_w + 2 * self.padding - self.dilation * (self.k_w - 1) - 1) / self.stride + 1 } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct ParamsConvTranspose2D { pub(crate) b_size: usize, pub(crate) i_h: usize, pub(crate) i_w: usize, pub(crate) k_h: usize, pub(crate) k_w: usize, pub(crate) c_out: usize, pub(crate) c_in: usize, pub(crate) padding: usize, pub(crate) output_padding: usize, pub(crate) stride: usize, pub(crate) dilation: usize, } impl ParamsConvTranspose2D { pub(crate) fn out_h(&self) -> usize { (self.i_h - 1) * self.stride + self.dilation * (self.k_h - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_w(&self) -> usize { (self.i_w - 1) * self.stride + self.dilation * (self.k_w - 1) + self.output_padding + 1 - 2 * self.padding } pub(crate) fn out_dims(&self) -> Vec<usize> { vec![self.b_size, self.c_out, self.out_h(), self.out_w()] } } impl Tensor { fn conv1d_single_group(&self, kernel: &Self, params: &ParamsConv1D) -> Result<Self> { let storage = self.storage() .conv1d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv1D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 1D convolution over the input tensor. pub fn conv1d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (c_out, c_in_k, k_size) = kernel.dims3()?; let (b_size, c_in, l_in) = self.dims3()?; if c_in != c_in_k * groups { Err(Error::Conv1dInvalidArgs { inp_shape: self.shape().clone(), k_shape: kernel.shape().clone(), padding, stride, msg: "the number of in-channels on the input doesn't match the kernel size", } .bt())? } let params = ParamsConv1D { b_size, l_in, c_out: c_out / groups, c_in: c_in / groups, k_size, padding, stride, dilation, }; if groups == 1 { self.conv1d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv1d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } /// Applies a 1D transposed convolution over the input tensor. pub fn conv_transpose1d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, ) -> Result<Self> { let (b_size, c_in, l_in) = self.dims3()?; let (c_in_k, c_out, k_size) = kernel.dims3()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } let params = ParamsConvTranspose1D { b_size, l_in, k_size, c_out, c_in, padding, output_padding, stride, dilation, }; let storage = self.storage().conv_transpose1d( self.layout(), &kernel.storage(), kernel.layout(), &params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose1D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } fn conv2d_single_group(&self, kernel: &Self, params: &ParamsConv2D) -> Result<Self> { let storage = self.storage() .conv2d(self.layout(), &kernel.storage(), kernel.layout(), params)?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::Conv2D { arg, kernel, padding: params.padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } /// Applies a 2D convolution over the input tensor. pub fn conv2d( &self, kernel: &Self, padding: usize, stride: usize, dilation: usize, groups: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_out, c_in_k, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k * groups { crate::bail!( "in_channel mismatch between input ({c_in}, groups {groups}) and kernel ({c_in_k})" ) } let params = ParamsConv2D { b_size, i_h, i_w, k_h, k_w, c_out: c_out / groups, c_in: c_in / groups, padding, stride, dilation, cudnn_fwd_algo: None, }; if groups == 1 { self.conv2d_single_group(kernel, &params) } else { let blocks = self.chunk(groups, 1)?; let kernel = kernel.chunk(groups, 0)?; let blocks = blocks .iter() .zip(&kernel) .map(|(block, kernel)| block.conv2d_single_group(kernel, &params)) .collect::<Result<Vec<_>>>()?; Tensor::cat(&blocks, 1) } } /// Applies a 2D transposed convolution over the input tensor. pub fn conv_transpose2d( &self, kernel: &Self, padding: usize, output_padding: usize, stride: usize, dilation: usize, ) -> Result<Self> { let (b_size, c_in, i_h, i_w) = self.dims4()?; let (c_in_k, c_out, k_h, k_w) = kernel.dims4()?; if c_in != c_in_k { crate::bail!("in_channel mismatch between input ({c_in}) and kernel ({c_in_k})") } let params = ParamsConvTranspose2D { b_size, i_h, i_w, k_h, k_w, c_out, c_in, padding, output_padding, stride, dilation, }; let storage = self.storage().conv_transpose2d( self.layout(), &kernel.storage(), kernel.layout(), &params, )?; let op = BackpropOp::new2(self, kernel, |arg, kernel| Op::ConvTranspose2D { arg, kernel, padding: params.padding, output_padding: params.output_padding, stride: params.stride, dilation: params.dilation, }); let out_dims = params.out_dims(); Ok(crate::tensor::from_storage(storage, out_dims, op, false)) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/metal_backend.rs
use crate::backend::{BackendDevice, BackendStorage}; use crate::conv::{ParamsConv1D, ParamsConv2D, ParamsConvTranspose1D, ParamsConvTranspose2D}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Layout, Result, Shape}; use candle_metal_kernels; use candle_metal_kernels::Kernels; use core::mem; use half::{bf16, f16}; use metal; use metal::{Buffer, CommandQueue, MTLResourceOptions, NSUInteger}; use std::sync::Arc; /// Metal related errors #[derive(thiserror::Error, Debug)] pub enum MetalError { #[error("{0}")] Message(String), #[error(transparent)] KernelError(#[from] candle_metal_kernels::MetalKernelError), #[error("matmul is only supported for contiguous tensors lstride: {lhs_stride:?} rstride: {rhs_stride:?} mnk: {mnk:?}")] MatMulNonContiguous { lhs_stride: Vec<usize>, rhs_stride: Vec<usize>, mnk: (usize, usize, usize), }, } impl From<String> for MetalError { fn from(e: String) -> Self { MetalError::Message(e) } } #[derive(Clone)] pub struct MetalDevice { device: metal::Device, command_queue: metal::CommandQueue, kernels: Arc<candle_metal_kernels::Kernels>, } impl std::fmt::Debug for MetalDevice { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "MetalDevice({:?})", self.device.registry_id()) } } impl std::ops::Deref for MetalDevice { type Target = metal::DeviceRef; fn deref(&self) -> &Self::Target { &self.device } } impl MetalDevice { pub fn id(&self) -> NSUInteger { self.registry_id() } pub fn command_queue(&self) -> &CommandQueue { &self.command_queue } pub fn kernels(&self) -> &Kernels { &self.kernels } pub fn device(&self) -> &metal::Device { &self.device } pub fn new_buffer(&self, element_count: usize, dtype: DType) -> Buffer { let size = (element_count * dtype.size_in_bytes()) as NSUInteger; self.device .new_buffer(size, MTLResourceOptions::StorageModeManaged) } } #[derive(Debug, Clone)] pub struct MetalStorage { buffer: metal::Buffer, device: MetalDevice, dtype: DType, } impl BackendStorage for MetalStorage { type Device = MetalDevice; fn try_clone(&self, _: &Layout) -> Result<Self> { Ok(self.clone()) } fn dtype(&self) -> DType { self.dtype } fn device(&self) -> &Self::Device { &self.device } fn to_cpu_storage(&self) -> Result<CpuStorage> { let length = self.buffer.length() as usize; let size = self.dtype.size_in_bytes(); if length % size != 0 { crate::bail!( "The Metal buffer length is not aligned with dtype {:?}", self.dtype ); } match self.dtype { DType::U8 => Ok(CpuStorage::U8(self.buffer.read_to_vec(length / size))), DType::U32 => Ok(CpuStorage::U32(self.buffer.read_to_vec(length / size))), DType::I64 => Ok(CpuStorage::I64(self.buffer.read_to_vec(length / size))), DType::F16 => Ok(CpuStorage::F16(self.buffer.read_to_vec(length / size))), DType::BF16 => Ok(CpuStorage::BF16(self.buffer.read_to_vec(length / size))), DType::F32 => Ok(CpuStorage::F32(self.buffer.read_to_vec(length / size))), DType::F64 => Ok(CpuStorage::F64(self.buffer.read_to_vec(length / size))), } } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { let device = self.device().clone(); let shape = layout.shape(); let el = shape.elem_count(); let dtype = self.dtype; if layout.is_contiguous() || layout.start_offset() != 0 || dtype != DType::F32 { crate::bail!("Not contiguous, non-f32 affine is not implemented yet."); } let mut buffer = device.new_buffer(el, self.dtype); let command_buffer = self.device.command_queue.new_command_buffer(); candle_metal_kernels::call_affine( &device.device, &command_buffer, &device.kernels, el, &self.buffer, &mut buffer, mul as f32, add as f32, ) .map_err(MetalError::from)?; command_buffer.commit(); command_buffer.wait_until_completed(); return Ok(Self { buffer, device: device.clone(), dtype, }); } fn powf(&self, _: &Layout, _: f64) -> Result<Self> { crate::bail!("powf metal") } fn elu(&self, _: &Layout, _: f64) -> Result<Self> { crate::bail!("elu metal") } fn reduce_op(&self, op: ReduceOp, layout: &Layout, sum_dims: &[usize]) -> Result<Self> { if !(sum_dims.len() == 1 && sum_dims[0] == layout.shape().rank() - 1 && layout.is_contiguous() && layout.start_offset() == 0) { crate::bail!("Non contiguous reduce op not supported yet"); } let device = self.device.clone(); let src_stride = layout.stride(); let src_dims = layout.shape().dims(); let src_el: usize = src_dims.iter().product(); // Source dims and strides with the sum dims at the end. let mut dims = vec![]; let mut stride = vec![]; let mut dst_el: usize = 1; for (dim_idx, &d) in src_dims.iter().enumerate() { if !sum_dims.contains(&dim_idx) { dst_el *= d; dims.push(d); stride.push(src_stride[dim_idx]); } } for &dim_idx in sum_dims.iter() { dims.push(src_dims[dim_idx]); stride.push(src_stride[dim_idx]); } // The reduction loop requires the shared array to be properly initialized and for // this we want the number of threads to be a power of two. let (name, check_empty, return_index) = match (op, self.dtype) { (ReduceOp::Sum, DType::F32) => ("fast_sum_float", false, false), (ReduceOp::Min, DType::F32) => ("fast_min_float", true, false), (ReduceOp::Max, DType::F32) => ("fast_max_float", true, false), (ReduceOp::ArgMin, DType::F32) => ("fast_argmin_float", true, true), (ReduceOp::ArgMax, DType::F32) => ("fast_argmax_float", true, true), _ => crate::bail!("Reduce op for non float"), }; if check_empty && layout.shape().elem_count() == 0 { Err(crate::Error::EmptyTensor { op: "reduce" }.bt())? } let dtype = if return_index { DType::U32 } else { self.dtype }; let mut buffer = device.new_buffer(dst_el, dtype); let command_buffer = self.device.command_queue.new_command_buffer(); candle_metal_kernels::call_reduce_contiguous( &device.device, &command_buffer, &device.kernels, name, src_el, dst_el, &self.buffer, &mut buffer, ) .map_err(MetalError::from)?; command_buffer.commit(); command_buffer.wait_until_completed(); Ok(Self { buffer, device, dtype, }) } fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self> { crate::bail!("cmp metal") } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { let device = self.device(); let shape = layout.shape(); let el_count = shape.elem_count(); let mut buffer = device.new_buffer(el_count, dtype); let command_buffer = device.command_queue.new_command_buffer(); if layout.is_contiguous() { let kernel_name = match (self.dtype, dtype) { (DType::U32, DType::F32) => "cast_u32_f32", (left, right) => crate::bail!("to dtype {left:?} - {right:?}"), }; candle_metal_kernels::call_cast_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, &self.buffer, &mut buffer, ) .map_err(MetalError::from)?; } else { crate::bail!( "TODO Implement the kernel calling cast {:?}-{:?}", self.dtype, dtype ); } command_buffer.commit(); command_buffer.wait_until_completed(); Ok(Self { buffer, device: device.clone(), dtype, }) } fn unary_impl<B: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { let device = self.device(); let dtype = self.dtype; let shape = layout.shape(); let el_count = shape.elem_count(); let mut buffer = device.new_buffer(el_count, dtype); let command_buffer = device.command_queue.new_command_buffer(); if layout.is_contiguous() && layout.start_offset() == 0 { use candle_metal_kernels::unary::contiguous; let kernel_name = match (B::KERNEL, dtype) { ("ucos", DType::F32) => contiguous::cos::FLOAT, ("usin", DType::F32) => contiguous::sin::FLOAT, ("usqr", DType::F32) => contiguous::sqr::FLOAT, ("usqrt", DType::F32) => contiguous::sqrt::FLOAT, ("uneg", DType::F32) => contiguous::neg::FLOAT, ("uexp", DType::F32) => contiguous::exp::FLOAT, ("ulog", DType::F32) => contiguous::log::FLOAT, (name, dtype) => crate::bail!("Match {name} - {dtype:?}"), }; candle_metal_kernels::call_unary_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, &self.buffer, &mut buffer, ) .map_err(MetalError::from)?; } else { crate::bail!("TODO Implement the kernel calling {}", B::KERNEL); } command_buffer.commit(); command_buffer.wait_until_completed(); Ok(Self { buffer, device: device.clone(), dtype, }) } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { let device = self.device(); let dtype = self.dtype; let shape = lhs_l.shape(); let el_count = shape.elem_count(); let mut buffer = device.new_buffer(el_count, dtype); let command_buffer = device.command_queue.new_command_buffer(); if (lhs_l.is_contiguous() && lhs_l.start_offset() == 0) && (rhs_l.is_contiguous() && rhs_l.start_offset() == 0) { use candle_metal_kernels::binary::contiguous; let kernel_name = match (B::KERNEL, dtype) { ("add", DType::F32) => contiguous::add::FLOAT, ("badd", DType::F32) => contiguous::add::FLOAT, ("sub", DType::F32) => contiguous::sub::FLOAT, ("bsub", DType::F32) => contiguous::sub::FLOAT, ("mul", DType::F32) => contiguous::mul::FLOAT, ("bmul", DType::F32) => contiguous::mul::FLOAT, ("div", DType::F32) => contiguous::div::FLOAT, ("bdiv", DType::F32) => contiguous::div::FLOAT, (name, dtype) => crate::bail!("Match {name} - {dtype:?}"), }; candle_metal_kernels::call_binary_contiguous( &device.device, &command_buffer, &device.kernels, kernel_name, el_count, &self.buffer, &rhs.buffer, &mut buffer, ) .map_err(MetalError::from)?; } else { use candle_metal_kernels::binary::strided; let kernel_name = match (B::KERNEL, dtype) { ("badd", DType::F32) => strided::add::FLOAT, ("bsub", DType::F32) => strided::sub::FLOAT, ("bmul", DType::F32) => strided::mul::FLOAT, ("bdiv", DType::F32) => strided::div::FLOAT, (name, dtype) => crate::bail!("Match {name} - {dtype:?}"), }; candle_metal_kernels::call_binary_strided( &device.device, &command_buffer, &device.kernels, kernel_name, lhs_l.dims(), &self.buffer, &lhs_l.stride(), lhs_l.start_offset() * self.dtype.size_in_bytes(), &rhs.buffer, &rhs_l.stride(), rhs_l.start_offset() * rhs.dtype.size_in_bytes(), &mut buffer, ) .map_err(MetalError::from)?; } command_buffer.commit(); command_buffer.wait_until_completed(); Ok(Self { buffer, device: device.clone(), dtype, }) } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { let device = self.device.clone(); let shape = t_l.shape(); let dims = shape.dims(); let el = shape.elem_count(); let dtype = t.dtype; let mut buffer = self.device.new_buffer(el, dtype); let command_buffer = self.device.command_queue.new_command_buffer(); candle_metal_kernels::call_where_cond_strided( &device.device, &command_buffer, &device.kernels, "where_u8_f32", &dims, &self.buffer, ( layout.stride(), layout.start_offset() * self.dtype.size_in_bytes(), ), &t.buffer, (&t_l.stride(), t_l.start_offset() * t.dtype.size_in_bytes()), &f.buffer, (&f_l.stride(), f_l.start_offset() * f.dtype.size_in_bytes()), &mut buffer, ) .map_err(MetalError::from)?; command_buffer.commit(); command_buffer.wait_until_completed(); Ok(Self { buffer, device, dtype, }) } fn conv1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &ParamsConv1D, ) -> Result<Self> { crate::bail!("conv1d metal") } fn conv_transpose1d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &ParamsConvTranspose1D, ) -> Result<Self> { crate::bail!("conv_transpose1d metal") } fn conv2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &ParamsConv2D, ) -> Result<Self> { crate::bail!("conv2d metal") } fn conv_transpose2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &ParamsConvTranspose2D, ) -> Result<Self> { crate::bail!("conv_tranpose2d metal") } fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { crate::bail!("avg_pool2d metal") } fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { crate::bail!("max_pool2d metal") } fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> { crate::bail!("upsample_nearest1d metal") } fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self> { crate::bail!("upsample_nearest2d metal") } fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self> { crate::bail!("gather metal") } fn scatter_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { crate::bail!("scatter_add metal") } fn index_select(&self, ids: &Self, src_l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { if !(src_l.is_contiguous() && src_l.start_offset() == 0 && ids_l.is_contiguous() && ids_l.start_offset() == 0) { crate::bail!("Non contiguous index select not implemented"); } let left_size: usize = src_l.dims()[..dim].iter().product(); let right_size: usize = src_l.dims()[dim + 1..].iter().product(); let ids_el = ids_l.shape().elem_count(); let dst_el = ids_el * left_size * right_size; let dtype = self.dtype; let device = self.device(); let mut buffer = device.new_buffer(dst_el, dtype); let name = match (ids.dtype, self.dtype) { (DType::U32, DType::F32) => "is_u32_f32", (left, right) => crate::bail!("index select metal {left:?} {right:?}"), }; let command_buffer = self.device.command_queue.new_command_buffer(); candle_metal_kernels::call_index_select( &device.device, &command_buffer, &self.device.kernels, name, src_l.dims(), ids_el, dim, &self.buffer, &ids.buffer, &mut buffer, ) .map_err(MetalError::from)?; command_buffer.commit(); command_buffer.wait_until_completed(); Ok(Self { buffer, device: device.clone(), dtype, }) } fn index_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { crate::bail!("index_add metal") } fn matmul( &self, rhs: &Self, (b, m, n, k): (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { // Create descriptors use metal::mps::matrix::*; let type_id = metal::mps::MPS_FLOATBIT_ENCODING | 32; let size = core::mem::size_of::<f32>() as NSUInteger; let elem_count = b * m * n; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; // The a tensor has dims batching, k, n (rhs) let transpose_left = if lhs_m1 == 1 && lhs_m2 == k { false } else if lhs_m1 == m && lhs_m2 == 1 { true } else { Err(MetalError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })? }; let transpose_right = if rhs_m1 == 1 && rhs_m2 == n { false } else if rhs_m1 == k && rhs_m2 == 1 { true } else { Err(MetalError::MatMulNonContiguous { lhs_stride: lhs_stride.to_vec(), rhs_stride: rhs_stride.to_vec(), mnk: (m, n, k), })? }; let b = b as NSUInteger; let m = m as NSUInteger; let n = n as NSUInteger; let k = k as NSUInteger; let left_descriptor = if transpose_left { MatrixDescriptor::init_single(k, m, m * size, type_id) } else { MatrixDescriptor::init_single(m, k, k * size, type_id) }; let right_descriptor = if transpose_right { MatrixDescriptor::init_single(n, k, k * size, type_id) } else { MatrixDescriptor::init_single(k, n, n * size, type_id) }; let result_descriptor = MatrixDescriptor::init_single(m, n, n * size, type_id); // Create matrix objects let left_matrix = Matrix::init_with_buffer_descriptor(&self.buffer, 0, &left_descriptor) .ok_or_else(|| { MetalError::from("Failed to create matrix multiplication kernel".to_string()) })?; let right_matrix = Matrix::init_with_buffer_descriptor(&rhs.buffer, 0, &right_descriptor) .ok_or_else(|| { MetalError::from("Failed to create matrix multiplication kernel".to_string()) })?; let out_buffer = self.device.new_buffer(elem_count, self.dtype); let result_matrix = Matrix::init_with_buffer_descriptor(&out_buffer, 0, &result_descriptor) .ok_or_else(|| { MetalError::from("Failed to create matrix multiplication kernel".to_string()) })?; let alpha = 1.0f64; let beta = 0.0f64; // Create kernel let matrix_multiplication = MatrixMultiplication::init( &self.device, transpose_left, transpose_right, m, n, k, alpha, beta, ) .ok_or_else(|| { MetalError::from("Failed to create matrix multiplication kernel".to_string()) })?; matrix_multiplication.set_batch_size(b); // Encode kernel to command buffer let command_buffer = self.device.command_queue.new_command_buffer(); matrix_multiplication.encode_to_command_buffer( command_buffer, &left_matrix, &right_matrix, &result_matrix, ); command_buffer.commit(); command_buffer.wait_until_completed(); Ok(Self { buffer: out_buffer, device: self.device.clone(), dtype: self.dtype(), }) } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { let src_shape = src_l.shape(); let el_count = src_shape.elem_count(); if el_count == 0 { return Ok(()); } let command_buffer = self.device.command_queue.new_command_buffer(); let kernel_name = match self.dtype { DType::F32 => candle_metal_kernels::unary::strided::copy::FLOAT, DType::F16 => candle_metal_kernels::unary::strided::copy::HALF, DType::BF16 => candle_metal_kernels::unary::strided::copy::BFLOAT, dtype => crate::bail!("copy_strided not implemented for {dtype:?}"), }; candle_metal_kernels::call_unary_strided( &self.device.device, &command_buffer, &self.device.kernels, kernel_name, src_l.dims(), &self.buffer, &src_l.stride(), src_l.start_offset() * self.dtype.size_in_bytes(), &mut dst.buffer, dst_offset, ) .map_err(MetalError::from)?; command_buffer.commit(); command_buffer.wait_until_completed(); Ok(()) } } impl MetalStorage { pub fn new(buffer: Buffer, device: MetalDevice, dtype: DType) -> Self { Self { buffer, device, dtype, } } pub fn buffer(&self) -> &Buffer { &self.buffer } } impl BackendDevice for MetalDevice { type Storage = MetalStorage; fn new(ordinal: usize) -> Result<Self> { let device = metal::Device::all().swap_remove(ordinal); let command_queue = device.new_command_queue(); let kernels = Arc::new(Kernels::new()); Ok(Self { device, command_queue, kernels, }) } fn set_seed(&self, _seed: u64) -> Result<()> { crate::bail!("set_seed") } fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Metal { gpu_id: self.registry_id() as usize, } } fn same_device(&self, rhs: &Self) -> bool { self.device.registry_id() == rhs.device.registry_id() } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<MetalStorage> { // TODO Is there a faster way ? let cpu_storage = crate::cpu_backend::CpuDevice.zeros_impl(shape, dtype)?; self.storage_from_cpu_storage(&cpu_storage) } fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<Self::Storage> { // TODO Is there a faster way ? let cpu_storage = crate::cpu_backend::CpuDevice.ones_impl(shape, dtype)?; self.storage_from_cpu_storage(&cpu_storage) } fn storage_from_cpu_storage(&self, storage: &CpuStorage) -> Result<Self::Storage> { let option = metal::MTLResourceOptions::StorageModeManaged; let buffer = match storage { CpuStorage::U8(storage) => self.device.new_buffer_with_data( storage.as_ptr() as *const core::ffi::c_void, (storage.len() * mem::size_of::<u8>()) as NSUInteger, option, ), CpuStorage::U32(storage) => self.device.new_buffer_with_data( storage.as_ptr() as *const core::ffi::c_void, (storage.len() * mem::size_of::<u32>()) as NSUInteger, option, ), CpuStorage::I64(storage) => self.device.new_buffer_with_data( storage.as_ptr() as *const core::ffi::c_void, (storage.len() * mem::size_of::<i64>()) as NSUInteger, option, ), CpuStorage::BF16(storage) => self.device.new_buffer_with_data( storage.as_ptr() as *const core::ffi::c_void, (storage.len() * mem::size_of::<bf16>()) as NSUInteger, option, ), CpuStorage::F16(storage) => self.device.new_buffer_with_data( storage.as_ptr() as *const core::ffi::c_void, (storage.len() * mem::size_of::<f16>()) as NSUInteger, option, ), CpuStorage::F32(storage) => self.device.new_buffer_with_data( storage.as_ptr() as *const core::ffi::c_void, (storage.len() * mem::size_of::<f32>()) as NSUInteger, option, ), CpuStorage::F64(storage) => self.device.new_buffer_with_data( storage.as_ptr() as *const core::ffi::c_void, (storage.len() * mem::size_of::<f64>()) as NSUInteger, option, ), }; Ok(Self::Storage { buffer, device: self.clone(), dtype: storage.dtype(), }) } fn rand_uniform( &self, shape: &Shape, dtype: DType, mean: f64, stddev: f64, ) -> Result<Self::Storage> { // TODO is there a better way ? let cpu_storage = crate::cpu_backend::CpuDevice.rand_uniform(shape, dtype, mean, stddev)?; self.storage_from_cpu_storage(&cpu_storage) } fn rand_normal( &self, shape: &Shape, dtype: DType, mean: f64, stddev: f64, ) -> Result<Self::Storage> { // TODO is there a better way ? let cpu_storage = crate::cpu_backend::CpuDevice.rand_normal(shape, dtype, mean, stddev)?; self.storage_from_cpu_storage(&cpu_storage) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/storage.rs
use crate::backend::BackendStorage; use crate::op::{self, CmpOp, CustomOp1, CustomOp2, CustomOp3, ReduceOp}; use crate::{CpuStorage, CudaStorage, DType, Device, Error, Layout, MetalStorage, Result, Shape}; // We do not want to implement Clone on Storage as cloning may fail because of // out of memory. Instead try_clone should be used. #[derive(Debug)] pub enum Storage { Cpu(CpuStorage), Cuda(CudaStorage), Metal(MetalStorage), } impl Storage { pub fn try_clone(&self, layout: &Layout) -> Result<Self> { match self { Self::Cpu(storage) => Ok(Self::Cpu(storage.clone())), Self::Cuda(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.try_clone(layout)?; Ok(Self::Metal(storage)) } } } pub fn device(&self) -> Device { match self { Self::Cpu(_) => Device::Cpu, Self::Cuda(storage) => Device::Cuda(storage.device().clone()), Self::Metal(storage) => Device::Metal(storage.device().clone()), } } pub fn dtype(&self) -> DType { match self { Self::Cpu(storage) => storage.dtype(), Self::Cuda(storage) => storage.dtype(), Self::Metal(storage) => storage.dtype(), } } pub(crate) fn same_device(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs = self.device().location(); let rhs = rhs.device().location(); if lhs != rhs { Err(Error::DeviceMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn same_dtype(&self, rhs: &Self, op: &'static str) -> Result<()> { let lhs = self.dtype(); let rhs = rhs.dtype(); if lhs != rhs { Err(Error::DTypeMismatchBinaryOp { lhs, rhs, op }.bt()) } else { Ok(()) } } pub(crate) fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.affine(layout, mul, add)?; Ok(Self::Metal(storage)) } } } pub(crate) fn powf(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.powf(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.elu(layout, alpha)?; Ok(Self::Metal(storage)) } } } pub(crate) fn cmp( &self, op: CmpOp, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "cmp")?; self.same_dtype(rhs, "cmp")?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.cmp(op, rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "cmp", } .bt()) } } } pub(crate) fn reduce_op(&self, op: ReduceOp, layout: &Layout, s: &[usize]) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.reduce_op(op, layout, s)?; Ok(Self::Metal(storage)) } } } pub(crate) fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.to_dtype(layout, dtype)?; Ok(Self::Metal(storage)) } } } pub(crate) fn apply_op1(&self, l: &Layout, c: &dyn CustomOp1) -> Result<(Self, Shape)> { match self { Self::Cpu(storage) => { let (storage, shape) = c.cpu_fwd(storage, l)?; Ok((Self::Cpu(storage), shape)) } Self::Cuda(storage) => { let (storage, shape) = c.cuda_fwd(storage, l)?; Ok((Self::Cuda(storage), shape)) } Self::Metal(storage) => { let (storage, shape) = c.metal_fwd(storage, l)?; Ok((Self::Metal(storage), shape)) } } } pub(crate) fn apply_op2( &self, l1: &Layout, t2: &Self, l2: &Layout, c: &dyn CustomOp2, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; match (self, t2) { (Self::Cpu(s1), Self::Cpu(s2)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn apply_op3( &self, l1: &Layout, t2: &Self, l2: &Layout, t3: &Self, l3: &Layout, c: &dyn CustomOp3, ) -> Result<(Self, Shape)> { self.same_device(t2, c.name())?; self.same_device(t3, c.name())?; match (self, t2, t3) { (Self::Cpu(s1), Self::Cpu(s2), Self::Cpu(s3)) => { let (s, shape) = c.cpu_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cpu(s), shape)) } (Self::Cuda(s1), Self::Cuda(s2), Self::Cuda(s3)) => { let (s, shape) = c.cuda_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Cuda(s), shape)) } (Self::Metal(s1), Self::Metal(s2), Self::Metal(s3)) => { let (s, shape) = c.metal_fwd(s1, l1, s2, l2, s3, l3)?; Ok((Self::Metal(s), shape)) } _ => unreachable!(), } } pub(crate) fn unary_impl<B: op::UnaryOpT>(&self, layout: &Layout) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.unary_impl::<B>(layout)?; Ok(Self::Metal(storage)) } } } pub(crate) fn binary_impl<B: op::BinaryOpT>( &self, rhs: &Self, lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, B::NAME)?; self.same_dtype(rhs, B::NAME)?; match (self, rhs) { (Storage::Cpu(lhs), Storage::Cpu(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.binary_impl::<B>(rhs, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => { // Should not happen because of the same device check above but we're defensive // anyway. Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: B::NAME, } .bt()) } } } pub(crate) fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { self.same_device(kernel, "conv1d")?; self.same_dtype(kernel, "conv1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv1d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv1d", } .bt()), } } pub(crate) fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { self.same_device(kernel, "conv-transpose1d")?; self.same_dtype(kernel, "conv-transpose1d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose1d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv-transpose1d", } .bt()), } } pub(crate) fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { self.same_device(kernel, "conv2d")?; self.same_dtype(kernel, "conv2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv2d", } .bt()), } } pub(crate) fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { self.same_device(kernel, "conv_transpose2d")?; self.same_dtype(kernel, "conv_transpose2d")?; match (self, &kernel) { (Storage::Cpu(inp), Storage::Cpu(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cpu(s)) } (Storage::Cuda(inp), Storage::Cuda(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Cuda(s)) } (Storage::Metal(inp), Storage::Metal(kernel)) => { let s = inp.conv_transpose2d(l, kernel, kernel_l, params)?; Ok(Self::Metal(s)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "conv_transpose2d", } .bt()), } } pub(crate) fn avg_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.avg_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn max_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.max_pool2d(layout, kernel_size, stride)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest1d(layout, sz)?; Ok(Self::Metal(storage)) } } } pub(crate) fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> { match self { Storage::Cpu(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cpu(storage)) } Self::Cuda(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Cuda(storage)) } Self::Metal(storage) => { let storage = storage.upsample_nearest2d(layout, h, w)?; Ok(Self::Metal(storage)) } } } pub(crate) fn where_cond( &self, layout: &Layout, t: &Self, layout_t: &Layout, f: &Self, layout_f: &Layout, ) -> Result<Self> { self.same_device(t, "where")?; self.same_device(f, "where")?; t.same_dtype(f, "where")?; match (self, t, f) { (Storage::Cpu(cond), Storage::Cpu(t), Storage::Cpu(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cpu(storage)) } (Self::Cuda(cond), Self::Cuda(t), Self::Cuda(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Cuda(storage)) } (Self::Metal(cond), Self::Metal(t), Self::Metal(f)) => { let storage = cond.where_cond(layout, t, layout_t, f, layout_f)?; Ok(Self::Metal(storage)) } (_, lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "where", } .bt()), } } pub(crate) fn gather( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; match (self, indexes) { (Self::Cpu(s), Self::Cpu(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes)) => { let storage = s.gather(l, indexes, indexes_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn scatter_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "scatter-add")?; self.same_device(source, "scatter-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.scatter_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_add( &self, l: &Layout, indexes: &Self, indexes_l: &Layout, source: &Self, source_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(indexes, "index-add")?; self.same_device(source, "index-add")?; match (self, indexes, source) { (Self::Cpu(s), Self::Cpu(indexes), Self::Cpu(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(s), Self::Cuda(indexes), Self::Cuda(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(s), Self::Metal(indexes), Self::Metal(source)) => { let storage = s.index_add(l, indexes, indexes_l, source, source_l, d)?; Ok(Self::Metal(storage)) } _ => unreachable!(), } } pub(crate) fn index_select( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, d: usize, ) -> Result<Self> { self.same_device(rhs, "index-select")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.index_select(rhs, lhs_l, rhs_l, d)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "index-select", } .bt()), } } pub(crate) fn matmul( &self, rhs: &Self, bmnk: (usize, usize, usize, usize), lhs_layout: &Layout, rhs_layout: &Layout, ) -> Result<Self> { self.same_device(rhs, "matmul")?; self.same_dtype(rhs, "matmul")?; match (self, rhs) { (Self::Cpu(lhs), Self::Cpu(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cpu(storage)) } (Self::Cuda(lhs), Self::Cuda(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Cuda(storage)) } (Self::Metal(lhs), Self::Metal(rhs)) => { let storage = lhs.matmul(rhs, bmnk, lhs_layout, rhs_layout)?; Ok(Self::Metal(storage)) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "matmul", } .bt()), } } // self, the source can be strided whereas dst is contiguous. pub(crate) fn copy_strided_src( &self, dst: &mut Self, dst_offset: usize, src_l: &Layout, ) -> Result<()> { match (self, dst) { (Self::Cpu(src), Self::Cpu(dst)) => src.copy_strided_src(dst, dst_offset, src_l), (Self::Cuda(src), Self::Cuda(dst)) => Ok(src.copy_strided_src(dst, dst_offset, src_l)?), (Self::Metal(src), Self::Metal(dst)) => { Ok(src.copy_strided_src(dst, dst_offset, src_l)?) } (lhs, rhs) => Err(Error::DeviceMismatchBinaryOp { lhs: lhs.device().location(), rhs: rhs.device().location(), op: "copy", } .bt()), } } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/indexer.rs
use crate::{Error, Tensor}; use std::ops::{ Bound, Range, RangeBounds, RangeFrom, RangeFull, RangeInclusive, RangeTo, RangeToInclusive, }; impl Tensor { /// Intended to be use by the trait `.i()` /// /// ``` /// # use candle_core::{Tensor, DType, Device, IndexOp}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.i(0..1)?; /// assert_eq!(c.shape().dims(), &[1, 3]); /// /// let c = a.i(0)?; /// assert_eq!(c.shape().dims(), &[3]); /// /// let c = a.i((.., ..2) )?; /// assert_eq!(c.shape().dims(), &[2, 2]); /// /// let c = a.i((.., ..=2))?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// /// # Ok::<(), candle_core::Error>(()) /// ``` fn index(&self, indexers: &[TensorIndexer]) -> Result<Self, Error> { let mut x = self.clone(); let dims = self.shape().dims(); let mut current_dim = 0; for (i, indexer) in indexers.iter().enumerate() { x = match indexer { TensorIndexer::Select(n) => x.narrow(current_dim, *n, 1)?.squeeze(current_dim)?, TensorIndexer::Narrow(left_bound, right_bound) => { let start = match left_bound { Bound::Included(n) => *n, Bound::Excluded(n) => *n + 1, Bound::Unbounded => 0, }; let stop = match right_bound { Bound::Included(n) => *n + 1, Bound::Excluded(n) => *n, Bound::Unbounded => dims[i], }; let out = x.narrow(current_dim, start, stop.saturating_sub(start))?; current_dim += 1; out } TensorIndexer::IndexSelect(indexes) => { if indexes.rank() != 1 { crate::bail!("multi-dimensional tensor indexing is not supported") } let out = x.index_select(&indexes.to_device(x.device())?, current_dim)?; current_dim += 1; out } TensorIndexer::Err(e) => crate::bail!("indexing error {e:?}"), }; } Ok(x) } } #[derive(Debug)] /// Generic structure used to index a slice of the tensor pub enum TensorIndexer { /// This selects the elemnts for which an index has some specific value. Select(usize), /// This is a regular slice, purely indexing a chunk of the tensor Narrow(Bound<usize>, Bound<usize>), /// Indexing via a 1d tensor IndexSelect(Tensor), Err(Error), } impl From<usize> for TensorIndexer { fn from(index: usize) -> Self { TensorIndexer::Select(index) } } impl From<&[u32]> for TensorIndexer { fn from(index: &[u32]) -> Self { match Tensor::new(index, &crate::Device::Cpu) { Ok(tensor) => TensorIndexer::IndexSelect(tensor), Err(e) => TensorIndexer::Err(e), } } } impl From<Vec<u32>> for TensorIndexer { fn from(index: Vec<u32>) -> Self { let len = index.len(); match Tensor::from_vec(index, len, &crate::Device::Cpu) { Ok(tensor) => TensorIndexer::IndexSelect(tensor), Err(e) => TensorIndexer::Err(e), } } } impl From<&Tensor> for TensorIndexer { fn from(tensor: &Tensor) -> Self { TensorIndexer::IndexSelect(tensor.clone()) } } trait RB: RangeBounds<usize> {} impl RB for Range<usize> {} impl RB for RangeFrom<usize> {} impl RB for RangeFull {} impl RB for RangeInclusive<usize> {} impl RB for RangeTo<usize> {} impl RB for RangeToInclusive<usize> {} impl<T: RB> From<T> for TensorIndexer { fn from(range: T) -> Self { use std::ops::Bound::*; let start = match range.start_bound() { Included(idx) => Included(*idx), Excluded(idx) => Excluded(*idx), Unbounded => Unbounded, }; let end = match range.end_bound() { Included(idx) => Included(*idx), Excluded(idx) => Excluded(*idx), Unbounded => Unbounded, }; TensorIndexer::Narrow(start, end) } } /// Trait used to implement multiple signatures for ease of use of the slicing /// of a tensor pub trait IndexOp<T> { /// Returns a slicing iterator which are the chunks of data necessary to /// reconstruct the desired tensor. fn i(&self, index: T) -> Result<Tensor, Error>; } impl<T> IndexOp<T> for Tensor where T: Into<TensorIndexer>, { fn i(&self, index: T) -> Result<Tensor, Error> { self.index(&[index.into()]) } } macro_rules! index_op_tuple { ($($t:ident),+) => { #[allow(non_snake_case)] impl<$($t),*> IndexOp<($($t,)*)> for Tensor where $($t: Into<TensorIndexer>,)* { fn i(&self, ($($t,)*): ($($t,)*)) -> Result<Tensor, Error> { self.index(&[$($t.into(),)*]) } } }; } index_op_tuple!(A); index_op_tuple!(A, B); index_op_tuple!(A, B, C); index_op_tuple!(A, B, C, D); index_op_tuple!(A, B, C, D, E); index_op_tuple!(A, B, C, D, E, F); index_op_tuple!(A, B, C, D, E, F, G);
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/shape.rs
//! The shape of a tensor is a tuple with the size of each of its dimensions. #![allow(clippy::redundant_closure_call)] use crate::{Error, Result}; #[derive(Clone, PartialEq, Eq)] pub struct Shape(Vec<usize>); pub const SCALAR: Shape = Shape(vec![]); impl std::fmt::Debug for Shape { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{:?}", &self.dims()) } } impl<const C: usize> From<&[usize; C]> for Shape { fn from(dims: &[usize; C]) -> Self { Self(dims.to_vec()) } } impl From<&[usize]> for Shape { fn from(dims: &[usize]) -> Self { Self(dims.to_vec()) } } impl From<&Shape> for Shape { fn from(shape: &Shape) -> Self { Self(shape.0.to_vec()) } } impl From<()> for Shape { fn from(_: ()) -> Self { Self(vec![]) } } impl From<usize> for Shape { fn from(d1: usize) -> Self { Self(vec![d1]) } } impl From<(usize,)> for Shape { fn from(d1: (usize,)) -> Self { Self(vec![d1.0]) } } impl From<(usize, usize)> for Shape { fn from(d12: (usize, usize)) -> Self { Self(vec![d12.0, d12.1]) } } impl From<(usize, usize, usize)> for Shape { fn from(d123: (usize, usize, usize)) -> Self { Self(vec![d123.0, d123.1, d123.2]) } } impl From<(usize, usize, usize, usize)> for Shape { fn from(d1234: (usize, usize, usize, usize)) -> Self { Self(vec![d1234.0, d1234.1, d1234.2, d1234.3]) } } impl From<(usize, usize, usize, usize, usize)> for Shape { fn from(d12345: (usize, usize, usize, usize, usize)) -> Self { Self(vec![d12345.0, d12345.1, d12345.2, d12345.3, d12345.4]) } } impl From<(usize, usize, usize, usize, usize, usize)> for Shape { fn from(d123456: (usize, usize, usize, usize, usize, usize)) -> Self { Self(vec![ d123456.0, d123456.1, d123456.2, d123456.3, d123456.4, d123456.5, ]) } } impl From<Vec<usize>> for Shape { fn from(dims: Vec<usize>) -> Self { Self(dims) } } macro_rules! extract_dims { ($fn_name:ident, $cnt:tt, $dims:expr, $out_type:ty) => { pub fn $fn_name(dims: &[usize]) -> Result<$out_type> { if dims.len() != $cnt { Err(Error::UnexpectedNumberOfDims { expected: $cnt, got: dims.len(), shape: Shape::from(dims), } .bt()) } else { Ok($dims(dims)) } } impl Shape { pub fn $fn_name(&self) -> Result<$out_type> { $fn_name(self.0.as_slice()) } } impl crate::Tensor { pub fn $fn_name(&self) -> Result<$out_type> { self.shape().$fn_name() } } impl std::convert::TryInto<$out_type> for Shape { type Error = crate::Error; fn try_into(self) -> std::result::Result<$out_type, Self::Error> { self.$fn_name() } } }; } impl Shape { pub fn from_dims(dims: &[usize]) -> Self { Self(dims.to_vec()) } /// The rank is the number of dimensions, 0 for a scalar value, 1 for a vector, etc. pub fn rank(&self) -> usize { self.0.len() } pub fn into_dims(self) -> Vec<usize> { self.0 } /// The dimensions as a slice of `usize`. pub fn dims(&self) -> &[usize] { &self.0 } /// The total number of elements, this is the product of all dimension sizes. pub fn elem_count(&self) -> usize { self.0.iter().product() } /// The strides given in number of elements for a contiguous n-dimensional /// arrays using this shape. pub(crate) fn stride_contiguous(&self) -> Vec<usize> { let mut stride: Vec<_> = self .0 .iter() .rev() .scan(1, |prod, u| { let prod_pre_mult = *prod; *prod *= u; Some(prod_pre_mult) }) .collect(); stride.reverse(); stride } /// Returns true if the strides are C contiguous (aka row major). pub fn is_contiguous(&self, stride: &[usize]) -> bool { if self.0.len() != stride.len() { return false; } let mut acc = 1; for (&stride, &dim) in stride.iter().zip(self.0.iter()).rev() { if stride != acc { return false; } acc *= dim; } true } /// Returns true if the strides are Fortran contiguous (aka column major). pub fn is_fortran_contiguous(&self, stride: &[usize]) -> bool { if self.0.len() != stride.len() { return false; } let mut acc = 1; for (&stride, &dim) in stride.iter().zip(self.0.iter()) { if stride != acc { return false; } acc *= dim; } true } /// Modifies the shape by adding a list of additional dimensions at the end of the existing /// dimensions. pub fn extend(mut self, additional_dims: &[usize]) -> Self { self.0.extend(additional_dims); self } /// Check whether the two shapes are compatible for broadcast, and if it is the case return the /// broadcasted shape. This is to be used for binary pointwise ops. pub fn broadcast_shape_binary_op(&self, rhs: &Self, op: &'static str) -> Result<Shape> { let lhs = self; let lhs_dims = lhs.dims(); let rhs_dims = rhs.dims(); let lhs_ndims = lhs_dims.len(); let rhs_ndims = rhs_dims.len(); let bcast_ndims = usize::max(lhs_ndims, rhs_ndims); let mut bcast_dims = vec![0; bcast_ndims]; for (idx, bcast_value) in bcast_dims.iter_mut().enumerate() { let rev_idx = bcast_ndims - idx; let l_value = if lhs_ndims < rev_idx { 1 } else { lhs_dims[lhs_ndims - rev_idx] }; let r_value = if rhs_ndims < rev_idx { 1 } else { rhs_dims[rhs_ndims - rev_idx] }; *bcast_value = if l_value == r_value { l_value } else if l_value == 1 { r_value } else if r_value == 1 { l_value } else { Err(Error::ShapeMismatchBinaryOp { lhs: lhs.clone(), rhs: rhs.clone(), op, } .bt())? } } Ok(Shape::from(bcast_dims)) } pub(crate) fn broadcast_shape_matmul(&self, rhs: &Self) -> Result<(Shape, Shape)> { let lhs = self; let lhs_dims = lhs.dims(); let rhs_dims = rhs.dims(); if lhs_dims.len() < 2 || rhs_dims.len() < 2 { crate::bail!("only 2d matrixes are supported {lhs:?} {rhs:?}") } let (m, lhs_k) = (lhs_dims[lhs_dims.len() - 2], lhs_dims[lhs_dims.len() - 1]); let (rhs_k, n) = (rhs_dims[rhs_dims.len() - 2], rhs_dims[rhs_dims.len() - 1]); if lhs_k != rhs_k { crate::bail!("different inner dimensions in broadcast matmul {lhs:?} {rhs:?}") } let lhs_b = Self::from(&lhs_dims[..lhs_dims.len() - 2]); let rhs_b = Self::from(&rhs_dims[..rhs_dims.len() - 2]); let bcast = lhs_b.broadcast_shape_binary_op(&rhs_b, "broadcast_matmul")?; let bcast_dims = bcast.dims(); let bcast_lhs = [bcast_dims, &[m, lhs_k]].concat(); let bcast_rhs = [bcast_dims, &[rhs_k, n]].concat(); Ok((Shape::from(bcast_lhs), Shape::from(bcast_rhs))) } } pub trait Dim { fn to_index(&self, shape: &Shape, op: &'static str) -> Result<usize>; fn to_index_plus_one(&self, shape: &Shape, op: &'static str) -> Result<usize>; } impl Dim for usize { fn to_index(&self, shape: &Shape, op: &'static str) -> Result<usize> { let dim = *self; if dim >= shape.dims().len() { Err(Error::DimOutOfRange { shape: shape.clone(), dim: dim as i32, op, } .bt())? } else { Ok(dim) } } fn to_index_plus_one(&self, shape: &Shape, op: &'static str) -> Result<usize> { let dim = *self; if dim > shape.dims().len() { Err(Error::DimOutOfRange { shape: shape.clone(), dim: dim as i32, op, } .bt())? } else { Ok(dim) } } } #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum D { Minus1, Minus2, } impl D { fn out_of_range(&self, shape: &Shape, op: &'static str) -> Error { let dim = match self { Self::Minus1 => -1, Self::Minus2 => -2, }; Error::DimOutOfRange { shape: shape.clone(), dim, op, } .bt() } } impl Dim for D { fn to_index(&self, shape: &Shape, op: &'static str) -> Result<usize> { let rank = shape.rank(); match self { Self::Minus1 if rank >= 1 => Ok(rank - 1), Self::Minus2 if rank >= 2 => Ok(rank - 2), _ => Err(self.out_of_range(shape, op)), } } fn to_index_plus_one(&self, shape: &Shape, op: &'static str) -> Result<usize> { let rank = shape.rank(); match self { Self::Minus1 => Ok(rank), Self::Minus2 if rank >= 1 => Ok(rank - 1), _ => Err(self.out_of_range(shape, op)), } } } pub trait Dims: Sized { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>>; fn to_indexes(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let dims = self.to_indexes_internal(shape, op)?; for (i, &dim) in dims.iter().enumerate() { if dims[..i].contains(&dim) { Err(Error::DuplicateDimIndex { shape: shape.clone(), dims: dims.clone(), op, } .bt())? } if dim >= shape.rank() { Err(Error::DimOutOfRange { shape: shape.clone(), dim: dim as i32, op, } .bt())? } } Ok(dims) } } impl Dims for Vec<usize> { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(self) } } impl<const N: usize> Dims for [usize; N] { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(self.to_vec()) } } impl Dims for &[usize] { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(self.to_vec()) } } impl Dims for () { fn to_indexes_internal(self, _: &Shape, _: &'static str) -> Result<Vec<usize>> { Ok(vec![]) } } impl<D: Dim + Sized> Dims for D { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let dim = self.to_index(shape, op)?; Ok(vec![dim]) } } impl<D: Dim> Dims for (D,) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let dim = self.0.to_index(shape, op)?; Ok(vec![dim]) } } impl<D1: Dim, D2: Dim> Dims for (D1, D2) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; Ok(vec![d0, d1]) } } impl<D1: Dim, D2: Dim, D3: Dim> Dims for (D1, D2, D3) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; Ok(vec![d0, d1, d2]) } } impl<D1: Dim, D2: Dim, D3: Dim, D4: Dim> Dims for (D1, D2, D3, D4) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; let d3 = self.3.to_index(shape, op)?; Ok(vec![d0, d1, d2, d3]) } } impl<D1: Dim, D2: Dim, D3: Dim, D4: Dim, D5: Dim> Dims for (D1, D2, D3, D4, D5) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; let d3 = self.3.to_index(shape, op)?; let d4 = self.4.to_index(shape, op)?; Ok(vec![d0, d1, d2, d3, d4]) } } impl<D1: Dim, D2: Dim, D3: Dim, D4: Dim, D5: Dim, D6: Dim> Dims for (D1, D2, D3, D4, D5, D6) { fn to_indexes_internal(self, shape: &Shape, op: &'static str) -> Result<Vec<usize>> { let d0 = self.0.to_index(shape, op)?; let d1 = self.1.to_index(shape, op)?; let d2 = self.2.to_index(shape, op)?; let d3 = self.3.to_index(shape, op)?; let d4 = self.4.to_index(shape, op)?; let d5 = self.5.to_index(shape, op)?; Ok(vec![d0, d1, d2, d3, d4, d5]) } } extract_dims!(dims0, 0, |_: &[usize]| (), ()); extract_dims!(dims1, 1, |d: &[usize]| d[0], usize); extract_dims!(dims2, 2, |d: &[usize]| (d[0], d[1]), (usize, usize)); extract_dims!( dims3, 3, |d: &[usize]| (d[0], d[1], d[2]), (usize, usize, usize) ); extract_dims!( dims4, 4, |d: &[usize]| (d[0], d[1], d[2], d[3]), (usize, usize, usize, usize) ); extract_dims!( dims5, 5, |d: &[usize]| (d[0], d[1], d[2], d[3], d[4]), (usize, usize, usize, usize, usize) ); #[cfg(test)] mod tests { use super::*; #[test] fn stride() { let shape = Shape::from(()); assert_eq!(shape.stride_contiguous(), Vec::<usize>::new()); let shape = Shape::from(42); assert_eq!(shape.stride_contiguous(), [1]); let shape = Shape::from((42, 1337)); assert_eq!(shape.stride_contiguous(), [1337, 1]); let shape = Shape::from((299, 792, 458)); assert_eq!(shape.stride_contiguous(), [458 * 792, 458, 1]); } } pub trait ShapeWithOneHole { fn into_shape(self, el_count: usize) -> Result<Shape>; } impl<S: Into<Shape>> ShapeWithOneHole for S { fn into_shape(self, _el_count: usize) -> Result<Shape> { Ok(self.into()) } } impl ShapeWithOneHole for ((),) { fn into_shape(self, el_count: usize) -> Result<Shape> { Ok(el_count.into()) } } fn hole_size(el_count: usize, prod_d: usize, s: &dyn std::fmt::Debug) -> Result<usize> { if prod_d == 0 { crate::bail!("cannot reshape tensor of {el_count} elements to {s:?}") } if el_count % prod_d != 0 { crate::bail!("cannot reshape tensor with {el_count} elements to {s:?}") } Ok(el_count / prod_d) } impl ShapeWithOneHole for ((), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1) = self; Ok((hole_size(el_count, d1, &self)?, d1).into()) } } impl ShapeWithOneHole for (usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, ()) = self; Ok((d1, hole_size(el_count, d1, &self)?).into()) } } impl ShapeWithOneHole for ((), usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1, d2) = self; Ok((hole_size(el_count, d1 * d2, &self)?, d1, d2).into()) } } impl ShapeWithOneHole for (usize, (), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, (), d2) = self; Ok((d1, hole_size(el_count, d1 * d2, &self)?, d2).into()) } } impl ShapeWithOneHole for (usize, usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, ()) = self; Ok((d1, d2, hole_size(el_count, d1 * d2, &self)?).into()) } } impl ShapeWithOneHole for ((), usize, usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1, d2, d3) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d, d1, d2, d3).into()) } } impl ShapeWithOneHole for (usize, (), usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, (), d2, d3) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d1, d, d2, d3).into()) } } impl ShapeWithOneHole for (usize, usize, (), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, (), d3) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d1, d2, d, d3).into()) } } impl ShapeWithOneHole for (usize, usize, usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, d3, ()) = self; let d = hole_size(el_count, d1 * d2 * d3, &self)?; Ok((d1, d2, d3, d).into()) } } impl ShapeWithOneHole for ((), usize, usize, usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let ((), d1, d2, d3, d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d, d1, d2, d3, d4).into()) } } impl ShapeWithOneHole for (usize, (), usize, usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, (), d2, d3, d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d, d2, d3, d4).into()) } } impl ShapeWithOneHole for (usize, usize, (), usize, usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, (), d3, d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d2, d, d3, d4).into()) } } impl ShapeWithOneHole for (usize, usize, usize, (), usize) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, d3, (), d4) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d2, d3, d, d4).into()) } } impl ShapeWithOneHole for (usize, usize, usize, usize, ()) { fn into_shape(self, el_count: usize) -> Result<Shape> { let (d1, d2, d3, d4, ()) = self; let d = hole_size(el_count, d1 * d2 * d3 * d4, &self)?; Ok((d1, d2, d3, d4, d).into()) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/pickle.rs
// Just enough pickle support to be able to read PyTorch checkpoints. // This hardcodes objects that are required for tensor reading, we may want to make this a bit more // composable/tensor agnostic at some point. use crate::{DType, Error as E, Layout, Result, Tensor}; use byteorder::{LittleEndian, ReadBytesExt}; use std::collections::HashMap; use std::io::BufRead; const VERBOSE: bool = false; // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/ #[repr(u8)] #[derive(Debug, Eq, PartialEq, Clone)] pub enum OpCode { // https://github.com/python/cpython/blob/ed25f097160b5cbb0c9a1f9a746d2f1bbc96515a/Lib/pickletools.py#L2123 Proto = 0x80, Global = b'c', BinPut = b'q', LongBinPut = b'r', EmptyTuple = b')', Reduce = b'R', Mark = b'(', BinUnicode = b'X', BinInt = b'J', Tuple = b't', BinPersId = b'Q', BinInt1 = b'K', BinInt2 = b'M', Tuple1 = 0x85, Tuple2 = 0x86, Tuple3 = 0x87, NewTrue = 0x88, NewFalse = 0x89, None = b'N', BinGet = b'h', LongBinGet = b'j', SetItem = b's', SetItems = b'u', EmptyDict = b'}', Dict = b'd', Build = b'b', Stop = b'.', NewObj = 0x81, EmptyList = b']', BinFloat = b'g', Append = b'a', Appends = b'e', } // Avoid using FromPrimitive so as not to drag another dependency. impl TryFrom<u8> for OpCode { type Error = u8; fn try_from(value: u8) -> std::result::Result<Self, Self::Error> { match value { 0x80 => Ok(Self::Proto), b'c' => Ok(Self::Global), b'q' => Ok(Self::BinPut), b'r' => Ok(Self::LongBinPut), b')' => Ok(Self::EmptyTuple), b'R' => Ok(Self::Reduce), b'(' => Ok(Self::Mark), b'X' => Ok(Self::BinUnicode), b'J' => Ok(Self::BinInt), b't' => Ok(Self::Tuple), b'Q' => Ok(Self::BinPersId), b'K' => Ok(Self::BinInt1), b'M' => Ok(Self::BinInt2), b'N' => Ok(Self::None), 0x85 => Ok(Self::Tuple1), 0x86 => Ok(Self::Tuple2), 0x87 => Ok(Self::Tuple3), 0x88 => Ok(Self::NewTrue), 0x89 => Ok(Self::NewFalse), b'h' => Ok(Self::BinGet), b'j' => Ok(Self::LongBinGet), b's' => Ok(Self::SetItem), b'u' => Ok(Self::SetItems), b'}' => Ok(Self::EmptyDict), b'd' => Ok(Self::EmptyDict), b'b' => Ok(Self::Build), b'.' => Ok(Self::Stop), 0x81 => Ok(Self::NewObj), b']' => Ok(Self::EmptyList), b'G' => Ok(Self::BinFloat), b'a' => Ok(Self::Append), b'e' => Ok(Self::Appends), value => Err(value), } } } fn read_to_newline<R: BufRead>(r: &mut R) -> Result<Vec<u8>> { let mut data: Vec<u8> = Vec::with_capacity(32); r.read_until(b'\n', &mut data)?; data.pop(); if data.last() == Some(&b'\r') { data.pop(); } Ok(data) } #[derive(Debug, Clone, PartialEq)] pub enum Object { Class { module_name: String, class_name: String, }, Int(i32), Float(f64), Unicode(String), Bool(bool), None, Tuple(Vec<Object>), List(Vec<Object>), Mark, Dict(Vec<(Object, Object)>), Reduce { callable: Box<Object>, args: Box<Object>, }, Build { callable: Box<Object>, args: Box<Object>, }, PersistentLoad(Box<Object>), } type OResult<T> = std::result::Result<T, Object>; impl Object { pub fn unicode(self) -> OResult<String> { match self { Self::Unicode(t) => Ok(t), _ => Err(self), } } pub fn reduce(self) -> OResult<(Self, Self)> { match self { Self::Reduce { callable, args } => Ok((*callable, *args)), _ => Err(self), } } pub fn none(self) -> OResult<()> { match self { Self::None => Ok(()), _ => Err(self), } } pub fn persistent_load(self) -> OResult<Self> { match self { Self::PersistentLoad(t) => Ok(*t), _ => Err(self), } } pub fn bool(self) -> OResult<bool> { match self { Self::Bool(t) => Ok(t), _ => Err(self), } } pub fn int(self) -> OResult<i32> { match self { Self::Int(t) => Ok(t), _ => Err(self), } } pub fn tuple(self) -> OResult<Vec<Self>> { match self { Self::Tuple(t) => Ok(t), _ => Err(self), } } pub fn dict(self) -> OResult<Vec<(Self, Self)>> { match self { Self::Dict(t) => Ok(t), _ => Err(self), } } pub fn class(self) -> OResult<(String, String)> { match self { Self::Class { module_name, class_name, } => Ok((module_name, class_name)), _ => Err(self), } } pub fn into_tensor_info( self, name: Self, dir_name: &std::path::Path, ) -> Result<Option<TensorInfo>> { let name = match name.unicode() { Ok(name) => name, Err(_) => return Ok(None), }; let (callable, args) = match self.reduce() { Ok(callable_args) => callable_args, _ => return Ok(None), }; let (callable, args) = match callable { Object::Class { module_name, class_name, } if module_name == "torch._tensor" && class_name == "_rebuild_from_type_v2" => { let mut args = args.tuple()?; let callable = args.remove(0); let args = args.remove(1); (callable, args) } _ => (callable, args), }; match callable { Object::Class { module_name, class_name, } if module_name == "torch._utils" && class_name == "_rebuild_tensor_v2" => {} _ => return Ok(None), }; let (layout, dtype, file_path, storage_size) = rebuild_args(args)?; let mut path = dir_name.to_path_buf(); path.push(file_path); Ok(Some(TensorInfo { name, dtype, layout, path: path.to_string_lossy().into_owned(), storage_size, })) } } impl TryFrom<Object> for String { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Unicode(s) => Ok(s), other => Err(other), } } } impl TryFrom<Object> for usize { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Int(s) if s >= 0 => Ok(s as usize), other => Err(other), } } } impl<T: TryFrom<Object, Error = Object>> TryFrom<Object> for Vec<T> { type Error = Object; fn try_from(value: Object) -> std::result::Result<Self, Self::Error> { match value { Object::Tuple(values) => { // This does not return the appropriate value in the error case but instead return // the object related to the first error. values .into_iter() .map(|v| T::try_from(v)) .collect::<std::result::Result<Vec<T>, Self::Error>>() } other => Err(other), } } } #[derive(Debug)] pub struct Stack { stack: Vec<Object>, memo: HashMap<u32, Object>, } impl Stack { pub fn empty() -> Self { Self { stack: Vec::with_capacity(512), memo: HashMap::new(), } } pub fn stack(&self) -> &[Object] { self.stack.as_slice() } pub fn read_loop<R: BufRead>(&mut self, r: &mut R) -> Result<()> { loop { if self.read(r)? { break; } } Ok(()) } pub fn finalize(mut self) -> Result<Object> { self.pop() } fn push(&mut self, obj: Object) { self.stack.push(obj) } fn pop(&mut self) -> Result<Object> { match self.stack.pop() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } // https://docs.juliahub.com/Pickle/LAUNc/0.1.0/opcode/#Pickle.OpCodes.BUILD fn build(&mut self) -> Result<()> { let args = self.pop()?; let obj = self.pop()?; let obj = match (obj, args) { (Object::Dict(mut obj), Object::Dict(mut args)) => { obj.append(&mut args); Object::Dict(obj) } (obj, args) => Object::Build { callable: Box::new(obj), args: Box::new(args), }, }; self.push(obj); Ok(()) } fn reduce(&mut self) -> Result<()> { let args = self.pop()?; let callable = self.pop()?; #[allow(clippy::single_match)] let reduced = match &callable { Object::Class { module_name, class_name, } => { if module_name == "collections" && class_name == "OrderedDict" { // TODO: have a separate ordered dict. Some(Object::Dict(vec![])) } else { None } } _ => None, }; let reduced = reduced.unwrap_or_else(|| Object::Reduce { callable: Box::new(callable), args: Box::new(args), }); self.push(reduced); Ok(()) } fn last(&mut self) -> Result<&mut Object> { match self.stack.last_mut() { None => crate::bail!("unexpected empty stack"), Some(obj) => Ok(obj), } } fn memo_get(&self, id: u32) -> Result<Object> { match self.memo.get(&id) { None => crate::bail!("missing object in memo {id}"), Some(obj) => { // Maybe we should use refcounting rather than doing potential large clones here. Ok(obj.clone()) } } } fn memo_put(&mut self, id: u32) -> Result<()> { let obj = self.last()?.clone(); self.memo.insert(id, obj); Ok(()) } fn persistent_load(&self, id: Object) -> Result<Object> { Ok(Object::PersistentLoad(Box::new(id))) } fn new_obj(&self, class: Object, args: Object) -> Result<Object> { Ok(Object::Reduce { callable: Box::new(class), args: Box::new(args), }) } fn pop_to_marker(&mut self) -> Result<Vec<Object>> { let mut mark_idx = None; for (idx, obj) in self.stack.iter().enumerate().rev() { if obj == &Object::Mark { mark_idx = Some(idx); break; } } match mark_idx { Some(mark_idx) => { let objs = self.stack.split_off(mark_idx + 1); self.stack.pop(); Ok(objs) } None => { crate::bail!("marker object not found") } } } pub fn read<R: BufRead>(&mut self, r: &mut R) -> Result<bool> { let op_code = match OpCode::try_from(r.read_u8()?) { Ok(op_code) => op_code, Err(op_code) => { crate::bail!("unknown op-code {op_code}") } }; // println!("op: {op_code:?}"); // println!("{:?}", self.stack); match op_code { OpCode::Proto => { let version = r.read_u8()?; if VERBOSE { println!("proto {version}"); } } OpCode::Global => { let module_name = read_to_newline(r)?; let class_name = read_to_newline(r)?; let module_name = String::from_utf8_lossy(&module_name).to_string(); let class_name = String::from_utf8_lossy(&class_name).to_string(); self.push(Object::Class { module_name, class_name, }) } OpCode::BinInt1 => { let arg = r.read_u8()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt2 => { let arg = r.read_u16::<LittleEndian>()?; self.push(Object::Int(arg as i32)) } OpCode::BinInt => { let arg = r.read_i32::<LittleEndian>()?; self.push(Object::Int(arg)) } OpCode::BinFloat => { let arg = r.read_f64::<LittleEndian>()?; self.push(Object::Float(arg)) } OpCode::BinUnicode => { let len = r.read_u32::<LittleEndian>()?; let mut data = vec![0u8; len as usize]; r.read_exact(&mut data)?; let data = String::from_utf8(data).map_err(E::wrap)?; self.push(Object::Unicode(data)) } OpCode::BinPersId => { let id = self.pop()?; let obj = self.persistent_load(id)?; self.push(obj) } OpCode::Tuple => { let objs = self.pop_to_marker()?; self.push(Object::Tuple(objs)) } OpCode::Tuple1 => { let obj = self.pop()?; self.push(Object::Tuple(vec![obj])) } OpCode::Tuple2 => { let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2])) } OpCode::Tuple3 => { let obj3 = self.pop()?; let obj2 = self.pop()?; let obj1 = self.pop()?; self.push(Object::Tuple(vec![obj1, obj2, obj3])) } OpCode::NewTrue => self.push(Object::Bool(true)), OpCode::NewFalse => self.push(Object::Bool(false)), OpCode::Append => { let value = self.pop()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.push(value) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::Appends => { let objs = self.pop_to_marker()?; let pylist = self.last()?; if let Object::List(d) = pylist { d.extend(objs) } else { crate::bail!("expected a list, got {pylist:?}") } } OpCode::SetItem => { let value = self.pop()?; let key = self.pop()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { d.push((key, value)) } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::SetItems => { let mut objs = self.pop_to_marker()?; let pydict = self.last()?; if let Object::Dict(d) = pydict { if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().unwrap(); d.push((key, value)) } } else { crate::bail!("expected a dict, got {pydict:?}") } } OpCode::None => self.push(Object::None), OpCode::Stop => { return Ok(true); } OpCode::Build => self.build()?, OpCode::EmptyDict => self.push(Object::Dict(vec![])), OpCode::Dict => { let mut objs = self.pop_to_marker()?; let mut pydict = vec![]; if objs.len() % 2 != 0 { crate::bail!("setitems: not an even number of objects") } while let Some(value) = objs.pop() { let key = objs.pop().unwrap(); pydict.push((key, value)) } self.push(Object::Dict(pydict)) } OpCode::Mark => self.push(Object::Mark), OpCode::Reduce => self.reduce()?, OpCode::EmptyTuple => self.push(Object::Tuple(vec![])), OpCode::EmptyList => self.push(Object::List(vec![])), OpCode::BinGet => { let arg = r.read_u8()?; let obj = self.memo_get(arg as u32)?; self.push(obj) } OpCode::LongBinGet => { let arg = r.read_u32::<LittleEndian>()?; let obj = self.memo_get(arg)?; self.push(obj) } OpCode::BinPut => { let arg = r.read_u8()?; self.memo_put(arg as u32)? } OpCode::LongBinPut => { let arg = r.read_u32::<LittleEndian>()?; self.memo_put(arg)? } OpCode::NewObj => { let args = self.pop()?; let class = self.pop()?; let obj = self.new_obj(class, args)?; self.push(obj) } } Ok(false) } } impl From<Object> for E { fn from(value: Object) -> Self { E::Msg(format!("conversion error on {value:?}")) } } // https://github.com/pytorch/pytorch/blob/4eac43d046ded0f0a5a5fa8db03eb40f45bf656e/torch/_utils.py#L198 // Arguments: storage, storage_offset, size, stride, requires_grad, backward_hooks fn rebuild_args(args: Object) -> Result<(Layout, DType, String, usize)> { let mut args = args.tuple()?; let stride = Vec::<usize>::try_from(args.remove(3))?; let size = Vec::<usize>::try_from(args.remove(2))?; let offset = args.remove(1).int()? as usize; let storage = args.remove(0).persistent_load()?; let mut storage = storage.tuple()?; let storage_size = storage.remove(4).int()? as usize; let path = storage.remove(2).unicode()?; let (_module_name, class_name) = storage.remove(1).class()?; let dtype = match class_name.as_str() { "FloatStorage" => DType::F32, "DoubleStorage" => DType::F64, "HalfStorage" => DType::F16, "BFloat16Storage" => DType::BF16, "ByteStorage" => DType::U8, "LongStorage" => DType::I64, other => { crate::bail!("unsupported storage type {other}") } }; let layout = Layout::new(crate::Shape::from(size), stride, offset); Ok((layout, dtype, path, storage_size)) } #[derive(Debug, Clone)] pub struct TensorInfo { pub name: String, pub dtype: DType, pub layout: Layout, pub path: String, pub storage_size: usize, } pub fn read_pth_tensor_info<P: AsRef<std::path::Path>>( file: P, verbose: bool, ) -> Result<Vec<TensorInfo>> { let file = std::fs::File::open(file)?; let zip_reader = std::io::BufReader::new(file); let mut zip = zip::ZipArchive::new(zip_reader)?; let zip_file_names = zip .file_names() .map(|f| f.to_string()) .collect::<Vec<String>>(); let mut tensor_infos = vec![]; for file_name in zip_file_names.iter() { if !file_name.ends_with("data.pkl") { continue; } let dir_name = std::path::PathBuf::from(file_name.strip_suffix(".pkl").unwrap()); let reader = zip.by_name(file_name)?; let mut reader = std::io::BufReader::new(reader); let mut stack = Stack::empty(); stack.read_loop(&mut reader)?; let obj = stack.finalize()?; if VERBOSE || verbose { println!("{obj:?}"); } let obj = match obj { Object::Build { callable, args } => match *callable { Object::Reduce { callable, args: _ } => match *callable { Object::Class { module_name, class_name, } if module_name == "__torch__" && class_name == "Module" => *args, _ => continue, }, _ => continue, }, obj => obj, }; if let Object::Dict(key_values) = obj { for (name, value) in key_values.into_iter() { match value.into_tensor_info(name, &dir_name) { Ok(Some(tensor_info)) => tensor_infos.push(tensor_info), Ok(None) => {} Err(err) => eprintln!("skipping: {err:?}"), } } } } Ok(tensor_infos) } /// Lazy tensor loader. pub struct PthTensors { tensor_infos: HashMap<String, TensorInfo>, path: std::path::PathBuf, // We do not store a zip reader as it needs mutable access to extract data. Instead we // re-create a zip reader for each tensor. } impl PthTensors { pub fn new<P: AsRef<std::path::Path>>(path: P) -> Result<Self> { let tensor_infos = read_pth_tensor_info(path.as_ref(), false)?; let tensor_infos = tensor_infos .into_iter() .map(|ti| (ti.name.to_string(), ti)) .collect(); let path = path.as_ref().to_owned(); Ok(Self { tensor_infos, path }) } pub fn tensor_infos(&self) -> &HashMap<String, TensorInfo> { &self.tensor_infos } pub fn get(&self, name: &str) -> Result<Option<Tensor>> { let tensor_info = match self.tensor_infos.get(name) { None => return Ok(None), Some(tensor_info) => tensor_info, }; // We hope that the file has not changed since first reading it. let zip_reader = std::io::BufReader::new(std::fs::File::open(&self.path)?); let mut zip = zip::ZipArchive::new(zip_reader)?; let mut reader = zip.by_name(&tensor_info.path)?; // Reading the data is a bit tricky as it can be strided, use an offset, etc. // For now only support the basic case. if tensor_info.layout.start_offset() != 0 || !tensor_info.layout.is_contiguous() { crate::bail!( "cannot retrieve non-contiguous tensors {:?}", tensor_info.layout ) } let tensor = Tensor::from_reader( tensor_info.layout.shape().clone(), tensor_info.dtype, &mut reader, )?; Ok(Some(tensor)) } } /// Read all the tensors from a PyTorch pth file. pub fn read_all<P: AsRef<std::path::Path>>(path: P) -> Result<Vec<(String, Tensor)>> { let pth = PthTensors::new(path)?; let tensor_names = pth.tensor_infos.keys(); let mut tensors = Vec::with_capacity(tensor_names.len()); for name in tensor_names { if let Some(tensor) = pth.get(name)? { tensors.push((name.to_string(), tensor)) } } Ok(tensors) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/cpu_backend.rs
use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{DType, Error, IntDType, Layout, Result, Shape, WithDType}; use half::{bf16, f16}; use rayon::prelude::*; const USE_IM2COL_CONV1D: bool = true; const USE_IM2COL_CONV2D: bool = true; // TODO: Maybe we should not implement [Clone] here and instead have an explicit allocator + // intercept the oom errors to avoid panicking and provide a proper error. #[derive(Debug, Clone)] pub enum CpuStorage { U8(Vec<u8>), U32(Vec<u32>), I64(Vec<i64>), BF16(Vec<bf16>), F16(Vec<f16>), F32(Vec<f32>), F64(Vec<f64>), } #[derive(Debug, Clone)] pub struct CpuDevice; pub trait Map1 { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>>; fn map(&self, vs: &CpuStorage, layout: &Layout) -> Result<CpuStorage> { match vs { CpuStorage::U8(vs) => Ok(CpuStorage::U8(self.f(vs, layout)?)), CpuStorage::U32(vs) => Ok(CpuStorage::U32(self.f(vs, layout)?)), CpuStorage::I64(vs) => Ok(CpuStorage::I64(self.f(vs, layout)?)), CpuStorage::BF16(vs) => Ok(CpuStorage::BF16(self.f(vs, layout)?)), CpuStorage::F16(vs) => Ok(CpuStorage::F16(self.f(vs, layout)?)), CpuStorage::F32(vs) => Ok(CpuStorage::F32(self.f(vs, layout)?)), CpuStorage::F64(vs) => Ok(CpuStorage::F64(self.f(vs, layout)?)), } } } pub trait Map1Any { fn f<T: WithDType, W: Fn(Vec<T>) -> CpuStorage>( &self, vs: &[T], layout: &Layout, wrap: W, ) -> Result<CpuStorage>; fn map(&self, vs: &CpuStorage, layout: &Layout) -> Result<CpuStorage> { match vs { CpuStorage::U8(vs) => Ok(self.f(vs, layout, CpuStorage::U8)?), CpuStorage::U32(vs) => Ok(self.f(vs, layout, CpuStorage::U32)?), CpuStorage::I64(vs) => Ok(self.f(vs, layout, CpuStorage::I64)?), CpuStorage::BF16(vs) => Ok(self.f(vs, layout, CpuStorage::BF16)?), CpuStorage::F16(vs) => Ok(self.f(vs, layout, CpuStorage::F16)?), CpuStorage::F32(vs) => Ok(self.f(vs, layout, CpuStorage::F32)?), CpuStorage::F64(vs) => Ok(self.f(vs, layout, CpuStorage::F64)?), } } } type C = CpuStorage; pub trait Map2 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<T>>; fn map( &self, v1: &CpuStorage, l1: &Layout, v2: &CpuStorage, l2: &Layout, ) -> Result<CpuStorage> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U32(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::I64(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::BF16(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::F16(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::F32(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::F64(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } pub trait Map2U8 { const OP: &'static str; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, v2: &[T], l2: &Layout) -> Result<Vec<u8>>; fn map( &self, v1: &CpuStorage, l1: &Layout, v2: &CpuStorage, l2: &Layout, ) -> Result<CpuStorage> { match (v1, v2) { (C::U8(v1), C::U8(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::U32(v1), C::U32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::I64(v1), C::I64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::BF16(v1), C::BF16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F16(v1), C::F16(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F32(v1), C::F32(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), (C::F64(v1), C::F64(v2)) => Ok(C::U8(self.f(v1, l1, v2, l2)?)), _ => Err(Error::DTypeMismatchBinaryOp { lhs: v1.dtype(), rhs: v2.dtype(), op: Self::OP, } .bt()), } } } struct Cmp(CmpOp); impl Map2U8 for Cmp { const OP: &'static str = "cmp"; #[inline(always)] fn f<T: WithDType>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<u8>> { let dst = match self.0 { CmpOp::Eq => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x == y)), CmpOp::Ne => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x != y)), CmpOp::Lt => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x < y)), CmpOp::Le => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x <= y)), CmpOp::Gt => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x > y)), CmpOp::Ge => binary_map(lhs_l, rhs_l, lhs, rhs, |x, y| u8::from(x >= y)), }; Ok(dst) } } struct WCond<'a, T: IntDType>(&'a [T], &'a Layout); impl<'a, I: IntDType> Map2 for WCond<'a, I> { const OP: &'static str = "where"; #[inline(always)] fn f<T: WithDType>(&self, t: &[T], t_l: &Layout, f: &[T], f_l: &Layout) -> Result<Vec<T>> { let vs = match ( self.1.contiguous_offsets(), t_l.contiguous_offsets(), f_l.contiguous_offsets(), ) { (Some((o1, o2)), Some((o_t1, o_t2)), Some((o_f1, o_f2))) => { let pred = &self.0[o1..o2]; let t = &t[o_t1..o_t2]; let f = &f[o_f1..o_f2]; pred.iter() .zip(t.iter().zip(f.iter())) .map(|(p, (&t, &f))| if p.is_true() { t } else { f }) .collect::<Vec<_>>() } _ => self .1 .strided_index() .zip(t_l.strided_index().zip(f_l.strided_index())) .map(|(i_p, (i_t, i_f))| { if self.0[i_p].is_true() { t[i_t] } else { f[i_f] } }) .collect::<Vec<_>>(), }; Ok(vs) } } struct ReduceIndex { reduce_dim_index: usize, use_min: bool, return_index: bool, } impl ReduceIndex { // The value gets replaced if f(s[current_acc], s[i]) returns true. #[inline(always)] fn fold_impl<T, U, F, G>(&self, src: &[T], src_l: &Layout, f: F, g: G) -> Result<Vec<U>> where T: Clone + Copy, U: Clone + Copy, F: Fn(T, T) -> bool, G: Fn(T, usize) -> U, { let reduce_dim_size = src_l.dims()[self.reduce_dim_index]; let reduce_dim_stride = src_l.stride()[self.reduce_dim_index]; let dst_len = src_l.shape().elem_count() / reduce_dim_size; let mut dst: Vec<U> = Vec::with_capacity(dst_len); let dst_to_set = dst.spare_capacity_mut(); let dst_to_set = unsafe { std::mem::transmute::<_, &mut [U]>(dst_to_set) }; match src_l.contiguous_offsets() { Some((o1, o2)) => { let src = &src[o1..o2]; if reduce_dim_stride == 1 { for (start_src_i, dst_v) in dst_to_set.iter_mut().enumerate() { let start_src_i = start_src_i * reduce_dim_size; let src = &src[start_src_i..start_src_i + reduce_dim_size]; let mut acc = 0; let mut val = src[0]; for (src_i, &s) in src.iter().enumerate() { if f(val, s) { acc = src_i; val = s } } *dst_v = g(val, acc) } } else { for (start_src_i, dst_v) in dst_to_set.iter_mut().enumerate() { let (p, q) = ( start_src_i / reduce_dim_stride, start_src_i % reduce_dim_stride, ); // start_src_i = p * reduce_dim_stride + q let start_src_i = p * reduce_dim_stride * reduce_dim_size + q; let src = &src[start_src_i..]; let mut acc = 0; let mut val = src[0]; for src_i in 0..reduce_dim_size { let s = src[src_i * reduce_dim_stride]; if f(val, s) { acc = src_i; val = s } } *dst_v = g(val, acc) } } } None => { let l = src_l.narrow(self.reduce_dim_index, 0, 1)?; for (unstr_index, src_index) in l.strided_index().enumerate() { let src = &src[src_index..]; let mut acc = 0; let mut val = src[0]; for src_i in 0..reduce_dim_size { let s = src[src_i * reduce_dim_stride]; if f(val, s) { acc = src_i; val = s } } dst_to_set[unstr_index] = g(val, acc) } } } unsafe { dst.set_len(dst_len) }; Ok(dst) } } impl Map1Any for ReduceIndex { #[inline(always)] fn f<T: WithDType, W: Fn(Vec<T>) -> CpuStorage>( &self, src: &[T], src_l: &Layout, wrap: W, ) -> Result<CpuStorage> { if src_l.shape().elem_count() == 0 { Err(Error::EmptyTensor { op: "reduce" }.bt())? } let dst = match (self.return_index, self.use_min) { (false, true) => wrap(self.fold_impl(src, src_l, |x, y| x > y, |v, _i| v)?), (false, false) => wrap(self.fold_impl(src, src_l, |x, y| x < y, |v, _i| v)?), (true, true) => { CpuStorage::U32(self.fold_impl(src, src_l, |x, y| x > y, |_v, i| i as u32)?) } (true, false) => { CpuStorage::U32(self.fold_impl(src, src_l, |x, y| x < y, |_v, i| i as u32)?) } }; Ok(dst) } } struct ReduceSum<'a> { dst_shape: &'a Shape, reduce_dims: &'a [usize], reduce_dims_and_stride: Vec<(usize, usize)>, } impl<'a> ReduceSum<'a> { #[inline(always)] fn fold_impl<T>(&self, src: &[T], src_l: &Layout, start_elt: T) -> Result<Vec<T>> where T: WithDType, { let mut dst = vec![start_elt; self.dst_shape.elem_count()]; match src_l.contiguous_offsets() { Some((o1, o2)) => { let src = &src[o1..o2]; // Handle the case where we reduce over the last dimensions separately as it is // fairly common and easy to optimize. This rely on the layout being contiguous! // reduce_dims is sorted, check if it is ranging from a to n-1. let reduce_over_last_dims = self .reduce_dims .iter() .rev() .enumerate() .all(|(i, &v)| v == src_l.shape().rank() - 1 - i); if reduce_over_last_dims { let reduce_sz = self .reduce_dims_and_stride .iter() .map(|(u, _)| u) .product::<usize>(); for (dst_i, dst_v) in dst.iter_mut().enumerate() { let src_i = dst_i * reduce_sz; unsafe { T::vec_reduce_sum( src[src_i..src_i + reduce_sz].as_ptr(), dst_v, reduce_sz, ) }; } return Ok(dst); }; for (unstr_index, &src) in src.iter().enumerate() { let mut dst_index = unstr_index; // Set the reduce_dims indexes to 0. for &(dim, stride) in self.reduce_dims_and_stride.iter() { // The compiler is able to optimize the following in a single divmod op. let (pre, post) = (dst_index / stride, dst_index % stride); dst_index = (pre / dim) * stride + post; } dst[dst_index] += src; } } None => { for (unstr_index, src_index) in src_l.strided_index().enumerate() { let mut dst_index = unstr_index; // Set the reduce_dims indexes to 0. for &(dim, stride) in self.reduce_dims_and_stride.iter() { // The compiler is able to optimize the following in a single divmod op. let (pre, post) = (dst_index / stride, dst_index % stride); dst_index = (pre / dim) * stride + post; } dst[dst_index] += src[src_index]; } } } Ok(dst) } } impl<'a> Map1 for ReduceSum<'a> { #[inline(always)] fn f<T: WithDType>(&self, src: &[T], src_l: &Layout) -> Result<Vec<T>> { self.fold_impl(src, src_l, T::zero()) } } pub fn unary_map<T: Copy, U: Copy, F: FnMut(T) -> U>( vs: &[T], layout: &Layout, mut f: F, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => vs [start_offset..start_offset + len] .iter() .map(|&v| f(v)) .collect(), crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let mut result = Vec::with_capacity(layout.shape().elem_count()); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } } else { for index in block_start_index { for offset in 0..block_len { let v = unsafe { vs.get_unchecked(index + offset) }; result.push(f(*v)) } } } result } } } pub fn unary_map_vec<T: Copy, U: Copy, F: FnMut(T) -> U, FV: FnMut(&[T], &mut [U])>( vs: &[T], layout: &Layout, mut f: F, mut f_vec: FV, ) -> Vec<U> { match layout.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => { let mut ys: Vec<U> = Vec::with_capacity(len); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [U]>(ys_to_set) }; f_vec(&vs[start_offset..start_offset + len], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(len) }; ys } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let el_count = layout.shape().elem_count(); // Specialize the case where block_len is one to avoid the second loop. if block_len == 1 { let mut result = Vec::with_capacity(el_count); for index in block_start_index { let v = unsafe { vs.get_unchecked(index) }; result.push(f(*v)) } result } else { let mut ys: Vec<U> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [U]>(ys_to_set) }; let mut dst_index = 0; for src_index in block_start_index { let vs = &vs[src_index..src_index + block_len]; let ys = &mut ys_to_set[dst_index..dst_index + block_len]; f_vec(vs, ys); dst_index += block_len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } } } } // This function maps over two strided index sequences. pub fn binary_map<T: Copy, U: Copy, F: FnMut(T, T) -> U>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, ) -> Vec<U> { match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => lhs[o_l1..o_l2] .iter() .zip(rhs[o_r1..o_r2].iter()) .map(|(&l, &r)| f(l, r)) .collect(), (Some((o_l1, o_l2)), None) => { // TODO: Maybe we want to avoid going through the layout twice. match rhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; lhs[o_l1..o_l2] .iter() .map(|&l| { let r = unsafe { rhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(l, *r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } (None, Some((o_r1, o_r2))) => { // TODO: Maybe we want to avoid going through the layout twice. match lhs_l.offsets_b() { Some(ob) => { let mut i_in_block = 0; let mut i_right_broadcast = 0; rhs[o_r1..o_r2] .iter() .map(|&r| { let l = unsafe { lhs.get_unchecked(i_in_block + ob.start) }; i_right_broadcast += 1; if i_right_broadcast >= ob.right_broadcast { i_in_block += 1; i_right_broadcast = 0; } if i_in_block >= ob.len { i_in_block = 0 } f(*l, r) }) .collect() } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } // Similar to binary_map but with vectorized variants. pub fn binary_map_vec<T: Copy, F: FnMut(T, T) -> T, FV: FnMut(&[T], &[T], &mut [T])>( lhs_l: &Layout, rhs_l: &Layout, lhs: &[T], rhs: &[T], mut f: F, mut f_vec: FV, ) -> Vec<T> { let el_count = lhs_l.shape().elem_count(); match (lhs_l.contiguous_offsets(), rhs_l.contiguous_offsets()) { (Some((o_l1, o_l2)), Some((o_r1, o_r2))) => { let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [T]>(ys_to_set) }; f_vec(&lhs[o_l1..o_l2], &rhs[o_r1..o_r2], ys_to_set); // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } (Some((o_l1, o_l2)), None) => match rhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_l1..o_l2).step_by(ob.len) { f_vec( &lhs[src_i..src_i + ob.len], rhs, &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let rhs = &rhs[ob.start..ob.start + ob.len]; let mut ys = lhs[o_l1..o_l2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &r) in rhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(*v, r) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, (None, Some((o_r1, o_r2))) => match lhs_l.offsets_b() { Some(ob) if ob.right_broadcast == 1 => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys: Vec<T> = Vec::with_capacity(el_count); let ys_to_set = ys.spare_capacity_mut(); let ys_to_set = unsafe { std::mem::transmute::<_, &mut [T]>(ys_to_set) }; let mut dst_i = 0; for src_i in (o_r1..o_r2).step_by(ob.len) { f_vec( lhs, &rhs[src_i..src_i + ob.len], &mut ys_to_set[dst_i..dst_i + ob.len], ); dst_i += ob.len; } // SAFETY: values are all set by f_vec. unsafe { ys.set_len(el_count) }; ys } Some(ob) => { let lhs = &lhs[ob.start..ob.start + ob.len]; let mut ys = rhs[o_r1..o_r2].to_vec(); for idx_l in 0..ob.left_broadcast { let start = idx_l * ob.len * ob.right_broadcast; for (i, &l) in lhs.iter().enumerate() { let start = start + i * ob.right_broadcast; for v in ys[start..start + ob.right_broadcast].iter_mut() { *v = f(l, *v) } } } ys } None => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), }, _ => lhs_l .strided_index() .zip(rhs_l.strided_index()) .map(|(lhs_i, rhs_i)| f(lhs[lhs_i], rhs[rhs_i])) .collect(), } } struct Affine(f64, f64); impl Map1 for Affine { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let mul = T::from_f64(self.0); let add = T::from_f64(self.1); Ok(unary_map(vs, layout, |v| v * mul + add)) } } struct AvgPool2D((usize, usize), (usize, usize)); impl Map1 for AvgPool2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html let (k_h, k_w) = self.0; let (s_h, s_w) = self.1; let (b_sz, c, h, w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let h_out = (h - k_h) / s_h + 1; let w_out = (w - k_w) / s_w + 1; let src_index = layout.start_offset(); let mut dst = vec![T::zero(); b_sz * c * h_out * w_out]; let scale = 1f64 / (k_h * k_w) as f64; let scale = T::from_f64(scale); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * h_out * w_out..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * h_out * w_out..]; let src_index = src_index + c_idx * stride[1]; for h_idx in 0..h_out { for w_idx in 0..w_out { let mut sum = T::zero(); for m in 0..k_h { for n in 0..k_w { let m = s_h * h_idx + m; let n = s_w * w_idx + n; sum += src[src_index + m * stride_h + n * stride_w] } } dst[h_idx * w_out + w_idx] = sum * scale; } } } } Ok(dst) } } struct MaxPool2D((usize, usize), (usize, usize)); impl Map1 for MaxPool2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html let (k_h, k_w) = self.0; let (s_h, s_w) = self.1; let (b_sz, c, h, w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let h_out = (h - k_h) / s_h + 1; let w_out = (w - k_w) / s_w + 1; let src_index = layout.start_offset(); let mut dst = vec![T::zero(); b_sz * c * h_out * w_out]; for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * h_out * w_out..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * h_out * w_out..]; let src_index = src_index + c_idx * stride[1]; for h_idx in 0..h_out { for w_idx in 0..w_out { let mut largest = src[src_index + s_h * h_idx * stride_h + s_w * w_idx * stride_w]; for m in 0..k_h { for n in 0..k_w { let m = s_h * h_idx + m; let n = s_w * w_idx + n; if largest < src[src_index + m * stride_h + n * stride_w] { largest = src[src_index + m * stride_h + n * stride_w] } } } dst[h_idx * w_out + w_idx] = largest; } } } } Ok(dst) } } struct UpsampleNearest1D(usize); impl Map1 for UpsampleNearest1D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // TODO: Specialized implementation for the case 2*sz? let dst_sz = self.0; let (b_sz, c, src_sz) = layout.shape().dims3()?; let stride = layout.stride(); let stride_sz = stride[2]; let src_index = layout.start_offset(); let scale_sz = src_sz as f64 / dst_sz as f64; let mut dst = vec![T::zero(); b_sz * c * dst_sz]; let src_idxs = (0..dst_sz) .map(|idx| usize::min(src_sz - 1, (idx as f64 * scale_sz) as usize)) .collect::<Vec<_>>(); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * dst_sz..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * dst_sz..]; let src_index = src_index + c_idx * stride[1]; for (idx, src_idx) in src_idxs.iter().enumerate() { dst[idx] = src[src_index + src_idx * stride_sz] } } } Ok(dst) } } struct UpsampleNearest2D(usize, usize); impl Map1 for UpsampleNearest2D { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { // TODO: Specialized implementation for the case 2*h, 2*w? let (dst_h, dst_w) = (self.0, self.1); let (b_sz, c, src_h, src_w) = layout.shape().dims4()?; let stride = layout.stride(); let (stride_h, stride_w) = (stride[2], stride[3]); let src_index = layout.start_offset(); let scale_h = src_h as f64 / dst_h as f64; let scale_w = src_w as f64 / dst_w as f64; let mut dst = vec![T::zero(); b_sz * c * dst_h * dst_w]; let src_h_idxs = (0..dst_h) .map(|h_idx| usize::min(src_h - 1, (h_idx as f64 * scale_h) as usize)) .collect::<Vec<_>>(); let src_w_idxs = (0..dst_w) .map(|w_idx| usize::min(src_w - 1, (w_idx as f64 * scale_w) as usize)) .collect::<Vec<_>>(); for b_idx in 0..b_sz { let dst = &mut dst[b_idx * c * dst_h * dst_w..]; let src_index = src_index + b_idx * stride[0]; for c_idx in 0..c { let dst = &mut dst[c_idx * dst_h * dst_w..]; let src_index = src_index + c_idx * stride[1]; for (h_idx, src_h_idx) in src_h_idxs.iter().enumerate() { for (w_idx, src_w_idx) in src_w_idxs.iter().enumerate() { let src_index = src_index + src_h_idx * stride_h + src_w_idx * stride_w; dst[h_idx * dst_w + w_idx] = src[src_index] } } } } Ok(dst) } } struct Gather<'a, I: IntDType> { ids: &'a [I], ids_l: &'a Layout, dim: usize, } impl<'a, I: IntDType> Map1 for Gather<'a, I> { fn f<T: WithDType>(&self, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let ids = match self.ids_l.contiguous_offsets() { Some((a, b)) => &self.ids[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; let src = match src_l.contiguous_offsets() { Some((a, b)) => &src[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; let dim = self.dim; let ids_dims = self.ids_l.dims(); let src_dims = src_l.dims(); let dst_len: usize = ids_dims.iter().product(); let dst_left_len: usize = ids_dims[..dim].iter().product(); let dst_dim_len = ids_dims[dim]; let dst_right_len: usize = ids_dims[dim + 1..].iter().product(); let src_dim_len = src_dims[dim]; let src_right_len: usize = src_dims[dim + 1..].iter().product(); let mut dst = vec![T::zero(); dst_len]; for left_i in 0..dst_left_len { let start_src_idx = left_i * src_right_len * src_dim_len; let start_dst_idx = left_i * dst_right_len * dst_dim_len; for i in 0..dst_dim_len { let start_dst_idx = start_dst_idx + i * dst_right_len; for right_i in 0..dst_right_len { let dst_idx = start_dst_idx + right_i; let index = ids[dst_idx].as_usize(); if index >= src_dim_len { Err(Error::InvalidIndex { index, size: src_dim_len, op: "gather", } .bt())? } let src_idx = start_src_idx + index * src_right_len + right_i; dst[dst_idx] = src[src_idx] } } } Ok(dst) } } struct IndexSelect<'a, T: IntDType> { ids: &'a [T], ids_l: &'a Layout, dim: usize, } impl<'a, I: IntDType> Map1 for IndexSelect<'a, I> { fn f<T: WithDType>(&self, src: &[T], layout: &Layout) -> Result<Vec<T>> { let src = match layout.contiguous_offsets() { Some((a, b)) => &src[a..b], None => Err(Error::RequiresContiguous { op: "index-select" }.bt())?, }; let dim = self.dim; let n_ids = match self.ids_l.dims() { [n_ids] => *n_ids, d => Err(Error::UnexpectedNumberOfDims { expected: 1, got: d.len(), shape: self.ids_l.shape().clone(), } .bt())?, }; let stride_ids = self.ids_l.stride()[0]; let mut dst_dims = layout.dims().to_vec(); let src_dim = dst_dims[dim]; dst_dims[dim] = n_ids; let dst_len: usize = dst_dims.iter().product(); let left_len: usize = dst_dims[..dim].iter().product(); let right_len: usize = dst_dims[dim + 1..].iter().product(); let mut dst = vec![T::zero(); dst_len]; for left_i in 0..left_len { let start_src_idx = left_i * right_len * src_dim; let start_dst_idx = left_i * right_len * n_ids; for i in 0..n_ids { let index = self.ids[self.ids_l.start_offset() + stride_ids * i].as_usize(); if index >= src_dim { Err(Error::InvalidIndex { index, size: src_dim, op: "index-select", } .bt())? } let start_src_idx = start_src_idx + index * right_len; let start_dst_idx = start_dst_idx + i * right_len; dst[start_dst_idx..start_dst_idx + right_len] .copy_from_slice(&src[start_src_idx..start_src_idx + right_len]) } } Ok(dst) } } struct ScatterAdd<'a, I: IntDType> { ids: &'a [I], ids_l: &'a Layout, dim: usize, } impl<'a, I: IntDType> Map2 for ScatterAdd<'a, I> { const OP: &'static str = "scatter-add"; fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let dst_len = l1.shape().elem_count(); let mut dst = vec![T::zero(); dst_len]; copy_strided_src_(v1, &mut dst, 0, l1); let src = match src_l.contiguous_offsets() { None => Err(Error::RequiresContiguous { op: "scatter-add" }.bt())?, Some((o1, o2)) => &src[o1..o2], }; let dim = self.dim; let ids_dims = self.ids_l.dims(); let dst_dims = l1.dims(); let dst_dim_len = dst_dims[dim]; let dst_right_len: usize = dst_dims[dim + 1..].iter().product(); let ids_left_len: usize = ids_dims[..dim].iter().product(); let ids_dim_len = ids_dims[dim]; let ids_right_len: usize = ids_dims[dim + 1..].iter().product(); let ids = match self.ids_l.contiguous_offsets() { Some((a, b)) => &self.ids[a..b], None => Err(Error::RequiresContiguous { op: "gather" }.bt())?, }; for left_i in 0..ids_left_len { let start_ids_idx = left_i * ids_right_len * ids_dim_len; let start_dst_idx = left_i * dst_right_len * dst_dim_len; for i in 0..ids_dim_len { let start_ids_idx = start_ids_idx + i * ids_right_len; for right_i in 0..dst_right_len { let ids_idx = start_ids_idx + right_i; let index = ids[ids_idx].as_usize(); if index >= dst_dim_len { Err(Error::InvalidIndex { index, size: dst_dim_len, op: "gather", } .bt())? } let dst_idx = start_dst_idx + index * dst_right_len + right_i; dst[dst_idx] += src[ids_idx] } } } Ok(dst) } } struct IndexAdd<'a, I: IntDType> { ids: &'a [I], dim: usize, } impl<'a, I: IntDType> Map2 for IndexAdd<'a, I> { const OP: &'static str = "index-add"; // https://pytorch.org/docs/stable/generated/torch.Tensor.index_add_.html#torch.Tensor.index_add_ // v1, l1 -> self fn f<T: WithDType>(&self, v1: &[T], l1: &Layout, src: &[T], src_l: &Layout) -> Result<Vec<T>> { let dst_len = l1.shape().elem_count(); let mut dst = vec![T::zero(); dst_len]; copy_strided_src_(v1, &mut dst, 0, l1); let src = match src_l.contiguous_offsets() { None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, Some((o1, o2)) => &src[o1..o2], }; let dim = self.dim; let max_idx = l1.dims()[dim]; let pre_dim = src_l.dims()[..dim].iter().product::<usize>(); let src_dim_sz = src_l.dims()[dim]; let post_dim = src_l.dims()[dim + 1..].iter().product::<usize>(); if dim == 0 { for (src_idx, dst_idx) in self.ids.iter().enumerate() { let dst_idx = dst_idx.as_usize(); if dst_idx >= max_idx { Err(Error::InvalidIndex { index: dst_idx, op: "index-add", size: max_idx, })? } let src_idx = src_idx * post_dim; let dst_idx = dst_idx * post_dim; let src = &src[src_idx..src_idx + post_dim]; let dst = &mut dst[dst_idx..dst_idx + post_dim]; for (d, &s) in dst.iter_mut().zip(src.iter()) { *d += s } } } else { for (src_idx, dst_idx) in self.ids.iter().enumerate() { let dst_idx = dst_idx.as_usize(); if dst_idx >= max_idx { Err(Error::InvalidIndex { index: dst_idx, op: "index-add", size: max_idx, })? } for pre_i in 0..pre_dim { let pre_src_i = (pre_i * src_dim_sz + src_idx) * post_dim; let pre_dst_i = (pre_i * max_idx + dst_idx) * post_dim; let src = &src[pre_src_i..pre_src_i + post_dim]; let dst = &mut dst[pre_dst_i..pre_dst_i + post_dim]; for (d, &s) in dst.iter_mut().zip(src.iter()) { *d += s } } } } Ok(dst) } } fn copy_strided_src_<T: Copy>(src: &[T], dst: &mut [T], dst_offset: usize, src_l: &Layout) { match src_l.strided_blocks() { crate::StridedBlocks::SingleBlock { start_offset, len } => { let to_copy = (dst.len() - dst_offset).min(len); dst[dst_offset..dst_offset + to_copy] .copy_from_slice(&src[start_offset..start_offset + to_copy]) } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len: 1, } => { for (dst_index, src_index) in block_start_index.enumerate() { let dst_index = dst_index + dst_offset; if dst_index >= dst.len() { break; } dst[dst_index] = src[src_index] } } crate::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { let mut dst_index = dst_offset; for src_index in block_start_index { let next_dst_index = dst_index + block_len; if dst_index >= dst.len() { break; } let to_copy = usize::min(block_len, dst.len() - dst_index); dst[dst_index..dst_index + to_copy] .copy_from_slice(&src[src_index..src_index + to_copy]); dst_index = next_dst_index } } } } struct Conv1D<'a>(&'a crate::conv::ParamsConv1D); impl<'a> Map2 for Conv1D<'a> { const OP: &'static str = "conv1d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let k = &k[k_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2) = crate::shape::dims3(inp_l.stride())?; let (k_s0, k_s1, k_s2) = crate::shape::dims3(k_l.stride())?; let l_out = p.l_out(); let dst_elems = p.c_out * l_out * p.b_size; // The output shape is [b_size, c_out, l_out] let dst = vec![T::zero(); dst_elems]; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.l_in]; for b_idx in 0..p.b_size { for src_l in 0..p.l_in { for src_c_idx in 0..p.c_in { let inp_idx = b_idx * inp_s0 + src_c_idx * inp_s1 + src_l * inp_s2; inp_cont[b_idx * p.l_in * p.c_in + src_l * p.c_in + src_c_idx] = inp[inp_idx] } } } for offset in 0..p.k_size { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let dst_idx = dst_c_idx * l_out; let k_cont = (0..p.c_in) .map(|c_in_idx| k[dst_c_idx * k_s0 + c_in_idx * k_s1 + offset * k_s2]) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { let dst_idx = dst_idx + b_idx * p.c_out * l_out; for dst_l in 0..l_out { let dst_idx = dst_idx + dst_l; let src_l = p.stride * dst_l + offset * p.dilation; if src_l < p.padding || src_l >= p.padding + p.l_in { continue; } let src_l = src_l - p.padding; let inp_cont = &inp_cont[b_idx * p.l_in * p.c_in + src_l * p.c_in..]; assert!(inp_cont.len() >= p.c_in); assert!(k_cont.len() >= p.c_in); let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to parallelise // the different tasks so no two threads can try to write at the same // location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } }) } Ok(dst) } } struct Im2Col1D { l_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col1D { fn l_out(&self, l: usize) -> usize { (l + 2 * self.padding - self.dilation * (self.l_k - 1) - 1) / self.stride + 1 } } impl Map1 for Im2Col1D { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let &Self { l_k, stride, dilation, padding, } = self; let (b, c, l) = layout.shape().dims3()?; let l_out = self.l_out(l); let src = &vs[layout.start_offset()..]; let mut dst = vec![T::zero(); b * l_out * c * l_k]; let (src_s0, src_s1, src_s2) = { let s = layout.stride(); (s[0], s[1], s[2]) }; // TODO: provide specialized kernels for the common use cases. // - l_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * l_out * c * l_k; for l_idx in 0..l_out { let dst_idx = dst_idx + l_idx * c * l_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * l_k; let src_idx = c_idx * src_s1 + src_idx; for l_k_idx in 0..l_k { let src_l = l_idx * stride + l_k_idx * dilation; if padding != 0 && (src_l < padding || src_l >= l + padding) { continue; } let src_l = src_l - padding; let src_idx = src_idx + src_l * src_s2; let dst_idx = dst_idx + l_k_idx; dst[dst_idx] = src[src_idx] } } } } Ok(dst) } } struct Im2Col { h_k: usize, w_k: usize, stride: usize, dilation: usize, padding: usize, } impl Im2Col { fn hw_out(&self, h: usize, w: usize) -> (usize, usize) { let h_out = (h + 2 * self.padding - self.dilation * (self.h_k - 1) - 1) / self.stride + 1; let w_out = (w + 2 * self.padding - self.dilation * (self.w_k - 1) - 1) / self.stride + 1; (h_out, w_out) } } impl Map1 for Im2Col { fn f<T: WithDType>(&self, vs: &[T], layout: &Layout) -> Result<Vec<T>> { let &Self { h_k, w_k, stride, dilation, padding, } = self; let (b, c, h, w) = layout.shape().dims4()?; let (h_out, w_out) = self.hw_out(h, w); let src = &vs[layout.start_offset()..]; let mut dst = vec![T::zero(); b * h_out * w_out * c * h_k * w_k]; let (src_s0, src_s1, src_s2, src_s3) = { let s = layout.stride(); (s[0], s[1], s[2], s[3]) }; // TODO: provide specialized kernels for the common use cases. // - h_k = w_k = 1 // - padding = 0 // - stride = 1 // - dilation = 1 for b_idx in 0..b { let src_idx = b_idx * src_s0; let dst_idx = b_idx * h_out * w_out * c * h_k * w_k; for h_idx in 0..h_out { let dst_idx = dst_idx + h_idx * w_out * c * h_k * w_k; for w_idx in 0..w_out { let dst_idx = dst_idx + w_idx * c * h_k * w_k; for c_idx in 0..c { let dst_idx = dst_idx + c_idx * h_k * w_k; let src_idx = c_idx * src_s1 + src_idx; for h_k_idx in 0..h_k { let src_h = h_idx * stride + h_k_idx * dilation; if padding != 0 && (src_h < padding || src_h >= h + padding) { continue; } let src_h = src_h - padding; let src_idx = src_idx + src_h * src_s2; let dst_idx = dst_idx + h_k_idx * w_k; for w_k_idx in 0..w_k { let src_w = w_idx * stride + w_k_idx * dilation; if padding != 0 && (src_w < padding || src_w >= w + padding) { continue; } let src_w = src_w - padding; let src_idx = src_idx + src_w * src_s3; let dst_idx = dst_idx + w_k_idx; dst[dst_idx] = src[src_idx] } } } } } } Ok(dst) } } struct ConvTranspose1D<'a>(&'a crate::conv::ParamsConvTranspose1D); impl<'a> Map2 for ConvTranspose1D<'a> { const OP: &'static str = "conv_transpose1d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2) = crate::shape::dims3(inp_l.stride())?; let (k_s0, k_s1, k_s2) = crate::shape::dims3(k_l.stride())?; let l_out = p.l_out(); // Output shape: [b_size, c_out, l_out]. let dst_elems = p.c_out * l_out * p.b_size; let dst = vec![T::zero(); dst_elems]; let dst_s0 = p.c_out * l_out; let dst_s1 = l_out; let dst_s2 = 1; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.l_in]; let cont_s0 = p.l_in * p.c_in; let cont_s1 = p.c_in; for b_idx in 0..p.b_size { for l_idx in 0..p.l_in { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + l_idx * inp_s2; let dst_idx = b_idx * cont_s0 + l_idx * cont_s1 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } for k_idx in 0..p.k_size { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let k_cont = (0..p.c_in) .map(|c_in_idx| k[c_in_idx * k_s0 + dst_c_idx * k_s1 + k_idx * k_s2]) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { for l_idx in 0..p.l_in { let out_idx = l_idx * p.stride + k_idx * p.dilation; if out_idx < p.padding { continue; } let out_idx = out_idx - p.padding; if out_idx < l_out { let inp_cont = &inp_cont[b_idx * cont_s0 + l_idx * cont_s1..]; let dst_idx = b_idx * dst_s0 + out_idx * dst_s2 + dst_c_idx * dst_s1; let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to // parallelise the different tasks so no two threads can try to // write at the same location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } }) } Ok(dst) } } struct Conv2D<'a>(&'a crate::conv::ParamsConv2D); impl<'a> Map2 for Conv2D<'a> { const OP: &'static str = "conv2d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2, inp_s3) = crate::shape::dims4(inp_l.stride())?; let k = &k[k_l.start_offset()..]; let (k_s0, k_s1, k_s2, k_s3) = crate::shape::dims4(k_l.stride())?; let (out_h, out_w) = (p.out_h(), p.out_w()); // Output shape: [b_size, c_out, out_h, out_w]. let dst = vec![T::zero(); p.b_size * p.c_out * out_h * out_w]; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.i_h * p.i_w]; let cont_s0 = p.i_h * p.i_w * p.c_in; let cont_s1 = p.i_w * p.c_in; let cont_s2 = p.c_in; for b_idx in 0..p.b_size { for h_idx in 0..p.i_h { for w_idx in 0..p.i_w { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + h_idx * inp_s2 + w_idx * inp_s3; let dst_idx = b_idx * cont_s0 + h_idx * cont_s1 + w_idx * cont_s2 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } } for offset_h in 0..p.k_h { for offset_w in 0..p.k_w { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let dst_idx = dst_c_idx * out_w * out_h; let k_cont = (0..p.c_in) .map(|c_in_idx| { k[dst_c_idx * k_s0 + c_in_idx * k_s1 + offset_h * k_s2 + offset_w * k_s3] }) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { let dst_idx = dst_idx + b_idx * p.c_out * out_h * out_w; for dst_h in 0..out_h { let dst_idx = dst_idx + dst_h * out_w; let src_h = p.stride * dst_h + offset_h * p.dilation; if src_h < p.padding || src_h >= p.i_h + p.padding { continue; } let src_h = src_h - p.padding; for dst_w in 0..out_w { let dst_idx = dst_idx + dst_w; let src_w = p.stride * dst_w + offset_w * p.dilation; if src_w < p.padding || src_w >= p.i_w + p.padding { continue; } let src_w = src_w - p.padding; let inp_cont = &inp_cont [b_idx * cont_s0 + src_h * cont_s1 + src_w * cont_s2..]; assert!(inp_cont.len() >= p.c_in); assert!(k_cont.len() >= p.c_in); let mut d = T::zero(); unsafe { T::vec_dot(inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to parallelise // the different tasks so no two threads can try to write at the same // location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } }); } } Ok(dst) } } struct ConvTranspose2D<'a>(&'a crate::conv::ParamsConvTranspose2D); impl<'a> Map2 for ConvTranspose2D<'a> { const OP: &'static str = "conv_transpose2d"; fn f<T: WithDType>(&self, inp: &[T], inp_l: &Layout, k: &[T], k_l: &Layout) -> Result<Vec<T>> { let p = self.0; let inp = &inp[inp_l.start_offset()..]; let (inp_s0, inp_s1, inp_s2, inp_s3) = crate::shape::dims4(inp_l.stride())?; let k = &k[k_l.start_offset()..]; let (k_s0, k_s1, k_s2, k_s3) = crate::shape::dims4(k_l.stride())?; let (out_h, out_w) = (p.out_h(), p.out_w()); // Output shape: [b_size, c_out, out_h, out_w]. let dst = vec![T::zero(); p.b_size * p.c_out * out_h * out_w]; let dst_s0 = p.c_out * out_h * out_w; let dst_s1 = out_h * out_w; let dst_s2 = out_w; let dst_s3 = 1; // TODO: Avoid making this copy if `inp` already has the appropriate layout. let mut inp_cont = vec![T::zero(); p.b_size * p.c_in * p.i_h * p.i_w]; let cont_s0 = p.i_h * p.i_w * p.c_in; let cont_s1 = p.i_w * p.c_in; let cont_s2 = p.c_in; for b_idx in 0..p.b_size { for h_idx in 0..p.i_h { for w_idx in 0..p.i_w { for c_idx in 0..p.c_in { let src_idx = b_idx * inp_s0 + c_idx * inp_s1 + h_idx * inp_s2 + w_idx * inp_s3; let dst_idx = b_idx * cont_s0 + h_idx * cont_s1 + w_idx * cont_s2 + c_idx; inp_cont[dst_idx] = inp[src_idx] } } } } for k_y in 0..p.k_h { for k_x in 0..p.k_w { (0..p.c_out).into_par_iter().for_each(|dst_c_idx| { let k_cont = (0..p.c_in) .map(|c_in_idx| { k[c_in_idx * k_s0 + dst_c_idx * k_s1 + k_y * k_s2 + k_x * k_s3] }) .collect::<Vec<_>>(); for b_idx in 0..p.b_size { for inp_y in 0..p.i_h { for inp_x in 0..p.i_w { let out_x = inp_x * p.stride + k_x * p.dilation; let out_y = inp_y * p.stride + k_y * p.dilation; if out_x < p.padding || out_y < p.padding { continue; } let out_x = out_x - p.padding; let out_y = out_y - p.padding; if out_x < out_w && out_y < out_h { let inp_cont = &inp_cont [b_idx * cont_s0 + inp_y * cont_s1 + inp_x * cont_s2..]; let dst_idx = b_idx * dst_s0 + out_y * dst_s2 + out_x * dst_s3 + dst_c_idx * dst_s1; let mut d = T::zero(); unsafe { T::vec_dot( inp_cont.as_ptr(), k_cont.as_ptr(), &mut d, p.c_in, ) } let dst_p = dst.as_ptr(); // Safety: dst_idx are uniques per dst_c_idx which is used to // parallelise the different tasks so no two threads can try to // write at the same location. unsafe { let ptr = dst_p.add(dst_idx) as *mut T; *ptr += d } } } } } }) } } Ok(dst) } } struct MatMul((usize, usize, usize, usize)); impl MatMul { fn striding_error(&self, lhs_l: &Layout, rhs_l: &Layout, msg: &'static str) -> Error { Error::MatMulUnexpectedStriding(Box::new(crate::error::MatMulUnexpectedStriding { lhs_l: lhs_l.clone(), rhs_l: rhs_l.clone(), bmnk: self.0, msg, })) .bt() } } impl Map2 for MatMul { const OP: &'static str = "mat_mul"; #[cfg(all(not(feature = "mkl"), not(feature = "accelerate")))] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { use gemm::{gemm, Parallelism}; match T::DTYPE { DType::F16 | DType::F32 | DType::F64 => {} _ => Err(Error::UnsupportedDTypeForOp(T::DTYPE, "matmul").bt())?, } let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let lhs_cs = lhs_stride[rank - 1]; let lhs_rs = lhs_stride[rank - 2]; let rhs_cs = rhs_stride[rank - 1]; let rhs_rs = rhs_stride[rank - 2]; let a_skip: usize = match lhs_stride[..rank - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [stride] => stride, [] => m * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))?, }; let b_skip: usize = match rhs_stride[..rank - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [stride] => stride, [] => n * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))?, }; let c_skip: usize = m * n; let dst_shape: Shape = (m, n).into(); let dst_strides = dst_shape.stride_contiguous(); let dst_rs = dst_strides[0]; let dst_cs = dst_strides[1]; let mut dst = vec![T::zero(); b * m * n]; let num_threads = crate::utils::get_num_threads(); let parallelism = if num_threads > 1 { Parallelism::Rayon(num_threads) } else { Parallelism::None }; for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { gemm( /* m: usize = */ m, /* n: usize = */ n, /* k: usize = */ k, /* dst: *mut T = */ dst_p.as_mut_ptr(), /* dst_cs: isize = */ dst_cs as isize, /* dst_rs: isize = */ dst_rs as isize, /* read_dst: bool = */ false, /* lhs: *const T = */ lhs_p.as_ptr(), /* lhs_cs: isize = */ lhs_cs as isize, /* lhs_rs: isize = */ lhs_rs as isize, /* rhs: *const T = */ rhs_p.as_ptr(), /* rhs_cs: isize = */ rhs_cs as isize, /* rhs_rs: isize = */ rhs_rs as isize, /* alpha: T = */ T::zero(), /* beta: T = */ T::one(), /* conj_dst: bool = */ false, /* conj_lhs: bool = */ false, /* conj_rhs: bool = */ false, parallelism, ) } } Ok(dst) } #[cfg(feature = "accelerate")] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let a_skip: usize = match lhs_stride[..rank - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [stride] => stride, [] => m * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))?, }; let b_skip: usize = match rhs_stride[..rank - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [stride] => stride, [] => n * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))?, }; let c_skip: usize = m * n; let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; let (lda, transa) = if rhs_m1 == 1 && rhs_m2 == n { (n as i32, b'N') } else if rhs_m1 == k && rhs_m2 == 1 { (k as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))? }; // The b tensor has dims batching, m, k (lhs) let (ldb, transb) = if lhs_m1 == 1 && lhs_m2 == k { (k as i32, b'N') } else if lhs_m1 == m && lhs_m2 == 1 { (m as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))? }; let mut dst = vec![T::zero(); b * m * n]; match T::DTYPE { DType::F16 => { crate::bail!("the accelerate backend does not support f16 matmul") } DType::F32 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f32; let b = lhs_p.as_ptr() as *const f32; let c = dst_p.as_mut_ptr() as *mut f32; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::accelerate::sgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F64 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f64; let b = lhs_p.as_ptr() as *const f64; let c = dst_p.as_mut_ptr() as *mut f64; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::accelerate::dgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } dtype => Err(Error::UnsupportedDTypeForOp(dtype, "matmul").bt())?, } Ok(dst) } #[cfg(feature = "mkl")] fn f<T: 'static + WithDType + num_traits::Num + Copy>( &self, lhs: &[T], lhs_l: &Layout, rhs: &[T], rhs_l: &Layout, ) -> Result<Vec<T>> { let (b, m, n, k) = self.0; let lhs = &lhs[lhs_l.start_offset()..]; let rhs = &rhs[rhs_l.start_offset()..]; let lhs_stride = lhs_l.stride(); let rhs_stride = rhs_l.stride(); let rank = lhs_stride.len(); let a_skip: usize = match lhs_stride[..rank - 2] { [s1, stride] if s1 == stride * lhs_l.dims()[1] => stride, [stride] => stride, [] => m * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))?, }; let b_skip: usize = match rhs_stride[..rank - 2] { [s1, stride] if s1 == stride * rhs_l.dims()[1] => stride, [stride] => stride, [] => n * k, _ => Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))?, }; let c_skip: usize = m * n; let rhs_m1 = rhs_stride[rhs_stride.len() - 1]; let rhs_m2 = rhs_stride[rhs_stride.len() - 2]; let lhs_m1 = lhs_stride[lhs_stride.len() - 1]; let lhs_m2 = lhs_stride[lhs_stride.len() - 2]; let (lda, transa) = if rhs_m1 == 1 && rhs_m2 == n { (n as i32, b'N') } else if rhs_m1 == k && rhs_m2 == 1 { (k as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous rhs"))? }; // The b tensor has dims batching, m, k (lhs) let (ldb, transb) = if lhs_m1 == 1 && lhs_m2 == k { (k as i32, b'N') } else if lhs_m1 == m && lhs_m2 == 1 { (m as i32, b'T') } else { Err(self.striding_error(lhs_l, rhs_l, "non-contiguous lhs"))? }; let mut dst = vec![T::zero(); b * m * n]; match T::DTYPE { DType::F16 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f16; let b = lhs_p.as_ptr() as *const f16; let c = dst_p.as_mut_ptr() as *mut f16; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::hgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ f16::ONE, /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ f16::ZERO, /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F32 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f32; let b = lhs_p.as_ptr() as *const f32; let c = dst_p.as_mut_ptr() as *mut f32; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::sgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } DType::F64 => { for step in 0..b { let lhs_p = &lhs[step * a_skip..]; let rhs_p = &rhs[step * b_skip..]; let dst_p = &mut dst[step * c_skip..]; unsafe { let a = rhs_p.as_ptr() as *const f64; let b = lhs_p.as_ptr() as *const f64; let c = dst_p.as_mut_ptr() as *mut f64; let a = std::slice::from_raw_parts(a, a_skip); let b = std::slice::from_raw_parts(b, b_skip); let c = std::slice::from_raw_parts_mut(c, c_skip); crate::mkl::dgemm( transa, transb, /* m= */ n as i32, /* n= */ m as i32, /* k= */ k as i32, /* alpha= */ 1., /* a= */ a, /* lda= */ lda, /* b= */ b, /* ldb= */ ldb, /* beta= */ 0., /* c= */ c, /* ldc= */ n as i32, ) } } } dtype => Err(Error::UnsupportedDTypeForOp(dtype, "matmul").bt())?, } Ok(dst) } } fn elu<T: num_traits::Float>(v: T, alpha: T) -> T { if v.is_sign_positive() { v } else { (v.exp() - T::one()) * alpha } } impl CpuStorage { pub fn as_slice<D: WithDType>(&self) -> Result<&[D]> { D::cpu_storage_as_slice(self) } pub fn concat(storages: &[CpuStorage]) -> Result<CpuStorage> { let storage0 = &storages[0]; let s = match storage0 { Self::U8(_) => { let storages = storages .iter() .map(|s| match s { Self::U8(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::U8(storages) } Self::U32(_) => { let storages = storages .iter() .map(|s| match s { Self::U32(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::U32(storages) } Self::I64(_) => { let storages = storages .iter() .map(|s| match s { Self::I64(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::I64(storages) } Self::BF16(_) => { let storages = storages .iter() .map(|s| match s { Self::BF16(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::BF16(storages) } Self::F16(_) => { let storages = storages .iter() .map(|s| match s { Self::F16(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F16(storages) } Self::F32(_) => { let storages = storages .iter() .map(|s| match s { Self::F32(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F32(storages) } Self::F64(_) => { let storages = storages .iter() .map(|s| match s { Self::F64(s) => Ok(s.as_slice()), _ => crate::bail!("dtype mismatch"), }) .collect::<Result<Vec<_>>>()? .concat(); Self::F64(storages) } }; Ok(s) } } impl BackendStorage for CpuStorage { type Device = CpuDevice; fn dtype(&self) -> DType { match self { Self::U8(_) => DType::U8, Self::U32(_) => DType::U32, Self::I64(_) => DType::I64, Self::BF16(_) => DType::BF16, Self::F16(_) => DType::F16, Self::F32(_) => DType::F32, Self::F64(_) => DType::F64, } } fn to_dtype(&self, layout: &Layout, dtype: DType) -> Result<Self> { // TODO: find a way around the quadratic number of cases below. match (self, dtype) { (Self::U8(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::U32(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::I64(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v as f32)); Ok(Self::BF16(data)) } (Self::BF16(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| v); Ok(Self::BF16(data)) } (Self::F16(storage), DType::BF16) => { let data = unary_map(storage, layout, |v| bf16::from_f32(v.to_f32())); Ok(Self::BF16(data)) } (Self::F32(storage), DType::BF16) => { let data = unary_map(storage, layout, bf16::from_f32); Ok(Self::BF16(data)) } (Self::F64(storage), DType::BF16) => { let data = unary_map(storage, layout, bf16::from_f64); Ok(Self::BF16(data)) } (Self::U8(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::U32(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::I64(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v as f32)); Ok(Self::F16(data)) } (Self::BF16(storage), DType::F16) => { let data = unary_map(storage, layout, |v| f16::from_f32(v.to_f32())); Ok(Self::F16(data)) } (Self::F16(storage), DType::F16) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F16(data)) } (Self::F32(storage), DType::F16) => { let data = unary_map(storage, layout, f16::from_f32); Ok(Self::F16(data)) } (Self::F64(storage), DType::F16) => { let data = unary_map(storage, layout, f16::from_f64); Ok(Self::F16(data)) } (Self::U8(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::U32(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::I64(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::BF16(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v.to_f32()); Ok(Self::F32(data)) } (Self::F16(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v.to_f32()); Ok(Self::F32(data)) } (Self::F32(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F32(data)) } (Self::F64(storage), DType::F32) => { let data = unary_map(storage, layout, |v| v as f32); Ok(Self::F32(data)) } (Self::U8(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v); Ok(Self::U8(data)) } (Self::BF16(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v.to_f32() as u8); Ok(Self::U8(data)) } (Self::F16(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v.to_f32() as u8); Ok(Self::U8(data)) } (Self::F32(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::F64(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::U32(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::I64(storage), DType::U8) => { let data = unary_map(storage, layout, |v| v as u8); Ok(Self::U8(data)) } (Self::U8(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::U32(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v); Ok(Self::U32(data)) } (Self::I64(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::BF16(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v.to_f32() as u32); Ok(Self::U32(data)) } (Self::F16(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v.to_f32() as u32); Ok(Self::U32(data)) } (Self::F32(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::F64(storage), DType::U32) => { let data = unary_map(storage, layout, |v| v as u32); Ok(Self::U32(data)) } (Self::U8(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::U32(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::I64(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v); Ok(Self::I64(data)) } (Self::BF16(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v.to_f32() as i64); Ok(Self::I64(data)) } (Self::F16(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v.to_f32() as i64); Ok(Self::I64(data)) } (Self::F32(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::F64(storage), DType::I64) => { let data = unary_map(storage, layout, |v| v as i64); Ok(Self::I64(data)) } (Self::U8(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::U32(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::I64(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::BF16(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v.to_f64()); Ok(Self::F64(data)) } (Self::F16(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v.to_f64()); Ok(Self::F64(data)) } (Self::F32(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v as f64); Ok(Self::F64(data)) } (Self::F64(storage), DType::F64) => { let data = unary_map(storage, layout, |v| v); Ok(Self::F64(data)) } } } fn reduce_op(&self, op: ReduceOp, layout: &Layout, reduce_dims: &[usize]) -> Result<Self> { match op { ReduceOp::Sum => { let src_dims = layout.dims(); let mut dst_dims = src_dims.to_vec(); for &dim in reduce_dims.iter() { dst_dims[dim] = 1; } let dst_shape = Shape::from(dst_dims); let mut reduce_dims = reduce_dims.to_vec(); // Sort the reduce_dims as they have to be processed from left to right when converting the // indexes. reduce_dims.sort(); let reduce_dims_and_stride: Vec<_> = reduce_dims .iter() .map(|&d| (src_dims[d], src_dims[d + 1..].iter().product::<usize>())) .collect(); ReduceSum { dst_shape: &dst_shape, reduce_dims: &reduce_dims, reduce_dims_and_stride, } .map(self, layout) } ReduceOp::Min | ReduceOp::ArgMin | ReduceOp::Max | ReduceOp::ArgMax => { let reduce_dim_index = match reduce_dims { [reduce_dim_index] => *reduce_dim_index, _ => { let op = match op { ReduceOp::Min => "min", ReduceOp::ArgMin => "argmin", ReduceOp::Max => "max", ReduceOp::ArgMax => "argmax", _ => unreachable!(), }; let dims = reduce_dims.to_vec(); Err(Error::OnlySingleDimension { op, dims })? } }; let (use_min, return_index) = match op { ReduceOp::Min => (true, false), ReduceOp::ArgMin => (true, true), ReduceOp::Max => (false, false), ReduceOp::ArgMax => (false, true), _ => unreachable!(), }; ReduceIndex { reduce_dim_index, use_min, return_index, } .map(self, layout) } } } fn cmp(&self, op: CmpOp, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout) -> Result<Self> { Cmp(op).map(self, lhs_l, rhs, rhs_l) } fn affine(&self, layout: &Layout, mul: f64, add: f64) -> Result<Self> { Affine(mul, add).map(self, layout) } fn avg_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { AvgPool2D(kernel_size, stride).map(self, layout) } fn max_pool2d( &self, layout: &Layout, kernel_size: (usize, usize), stride: (usize, usize), ) -> Result<Self> { MaxPool2D(kernel_size, stride).map(self, layout) } fn upsample_nearest1d(&self, layout: &Layout, sz: usize) -> Result<Self> { UpsampleNearest1D(sz).map(self, layout) } fn upsample_nearest2d(&self, layout: &Layout, h: usize, w: usize) -> Result<Self> { UpsampleNearest2D(h, w).map(self, layout) } fn powf(&self, layout: &Layout, e: f64) -> Result<Self> { use num_traits::Float; // TODO: Have some generic map for functions that apply on num_traits::Float elements. match self { Self::BF16(storage) => { let data = unary_map(storage, layout, |v| v.powf(bf16::from_f64(e))); Ok(Self::BF16(data)) } Self::F16(storage) => { let data = unary_map(storage, layout, |v| v.powf(f16::from_f64(e))); Ok(Self::F16(data)) } Self::F32(storage) => { let data = unary_map(storage, layout, |v| v.powf(e as f32)); Ok(Self::F32(data)) } Self::F64(storage) => { let data = unary_map(storage, layout, |v| v.powf(e)); Ok(Self::F64(data)) } Self::U8(_) => Err(Error::UnsupportedDTypeForOp(DType::U8, "elu").bt()), Self::U32(_) => Err(Error::UnsupportedDTypeForOp(DType::U32, "elu").bt()), Self::I64(_) => Err(Error::UnsupportedDTypeForOp(DType::I64, "elu").bt()), } } fn elu(&self, layout: &Layout, alpha: f64) -> Result<Self> { // TODO: Have some generic map for functions that apply on num_traits::Float elements. match self { Self::BF16(storage) => { let data = unary_map(storage, layout, |v| elu(v, bf16::from_f64(alpha))); Ok(Self::BF16(data)) } Self::F16(storage) => { let data = unary_map(storage, layout, |v| elu(v, f16::from_f64(alpha))); Ok(Self::F16(data)) } Self::F32(storage) => { let data = unary_map(storage, layout, |v| elu(v, f32::from_f64(alpha))); Ok(Self::F32(data)) } Self::F64(storage) => { let data = unary_map(storage, layout, |v| elu(v, alpha)); Ok(Self::F64(data)) } Self::U8(_) => Err(Error::UnsupportedDTypeForOp(DType::U8, "elu").bt()), Self::U32(_) => Err(Error::UnsupportedDTypeForOp(DType::U32, "elu").bt()), Self::I64(_) => Err(Error::UnsupportedDTypeForOp(DType::I64, "elu").bt()), } } fn unary_impl<B: UnaryOpT>(&self, layout: &Layout) -> Result<Self> { match self { Self::BF16(storage) => { if B::BF16_VEC { let data = unary_map_vec(storage, layout, B::bf16, B::bf16_vec); Ok(Self::BF16(data)) } else { let data = unary_map(storage, layout, B::bf16); Ok(Self::BF16(data)) } } Self::F16(storage) => { if B::F16_VEC { let data = unary_map_vec(storage, layout, B::f16, B::f16_vec); Ok(Self::F16(data)) } else { let data = unary_map(storage, layout, B::f16); Ok(Self::F16(data)) } } Self::F32(storage) => { if B::F32_VEC { let data = unary_map_vec(storage, layout, B::f32, B::f32_vec); Ok(Self::F32(data)) } else { let data = unary_map(storage, layout, B::f32); Ok(Self::F32(data)) } } Self::F64(storage) => { if B::F64_VEC { let data = unary_map_vec(storage, layout, B::f64, B::f64_vec); Ok(Self::F64(data)) } else { let data = unary_map(storage, layout, B::f64); Ok(Self::F64(data)) } } Self::U8(storage) => { let data = unary_map(storage, layout, B::u8); Ok(Self::U8(data)) } Self::U32(storage) => { let data = unary_map(storage, layout, B::u32); Ok(Self::U32(data)) } Self::I64(storage) => { let data = unary_map(storage, layout, B::i64); Ok(Self::I64(data)) } } } fn binary_impl<B: BinaryOpT>( &self, rhs: &Self, lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { match (self, rhs) { (Self::BF16(lhs), Self::BF16(rhs)) => { let data = if B::BF16_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::bf16, B::bf16_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::bf16) }; Ok(Self::BF16(data)) } (Self::F16(lhs), Self::F16(rhs)) => { let data = if B::F16_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f16, B::f16_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f16) }; Ok(Self::F16(data)) } (Self::F32(lhs), Self::F32(rhs)) => { let data = if B::F32_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f32, B::f32_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f32) }; Ok(Self::F32(data)) } (Self::F64(lhs), Self::F64(rhs)) => { let data = if B::F64_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::f64, B::f64_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::f64) }; Ok(Self::F64(data)) } (Self::U32(lhs), Self::U32(rhs)) => { let data = if B::U32_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::u32, B::u32_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::u32) }; Ok(Self::U32(data)) } (Self::I64(lhs), Self::I64(rhs)) => { let data = if B::I64_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::i64, B::i64_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::i64) }; Ok(Self::I64(data)) } (Self::U8(lhs), Self::U8(rhs)) => { let data = if B::U8_VEC { binary_map_vec(lhs_l, rhs_l, lhs, rhs, B::u8, B::u8_vec) } else { binary_map(lhs_l, rhs_l, lhs, rhs, B::u8) }; Ok(Self::U8(data)) } _ => { // This should be covered by the dtype check above. Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: rhs.dtype(), op: B::NAME, } .bt()) } } } fn copy_strided_src(&self, dst: &mut Self, dst_offset: usize, src_l: &Layout) -> Result<()> { match (self, dst) { (Self::U8(src), Self::U8(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::U32(src), Self::U32(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::I64(src), Self::I64(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::BF16(src), Self::BF16(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F16(src), Self::F16(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F32(src), Self::F32(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (Self::F64(src), Self::F64(dst)) => copy_strided_src_(src, dst, dst_offset, src_l), (_, dst) => { // This should be covered by the dtype check above. return Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: dst.dtype(), op: "copy_strided", } .bt()); } } Ok(()) } fn where_cond( &self, layout: &Layout, t: &Self, t_l: &Layout, f: &Self, f_l: &Layout, ) -> Result<Self> { match self { Self::U8(pred) => WCond(pred, layout).map(t, t_l, f, f_l), Self::U32(pred) => WCond(pred, layout).map(t, t_l, f, f_l), Self::I64(pred) => WCond(pred, layout).map(t, t_l, f, f_l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "where-cond")), } } fn conv1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv1D, ) -> Result<Self> { if !USE_IM2COL_CONV1D { return Conv1D(params).map(self, l, kernel, kernel_l); } let op = Im2Col1D { l_k: params.k_size, padding: params.padding, stride: params.stride, dilation: params.dilation, }; let col = op.map(self, l)?; let b = params.b_size; let n = params.c_out; let l_out = params.l_out(); let k = op.l_k * params.c_in; let m = l_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, l_out, params.c_out)).transpose(1, 2)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose1d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { ConvTranspose1D(params).map(self, l, kernel, kernel_l) } fn conv2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConv2D, ) -> Result<Self> { if !USE_IM2COL_CONV2D { return Conv2D(params).map(self, l, kernel, kernel_l); } let op = Im2Col { h_k: params.k_h, w_k: params.k_w, padding: params.padding, stride: params.stride, dilation: params.dilation, }; let col = op.map(self, l)?; let b = params.b_size; let n = params.c_out; let (h_out, w_out) = (params.out_h(), params.out_w()); let k = op.h_k * op.w_k * params.c_in; let m = h_out * w_out; let col_l = Layout::contiguous((b, m, k)); let res = if kernel_l.is_contiguous() { let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? } else { // Make the kernel contiguous if not already the case. let mut kernel_c = self.device().zeros_impl(kernel_l.shape(), kernel.dtype())?; kernel.copy_strided_src(&mut kernel_c, 0, kernel_l)?; let kernel_l = Layout::contiguous_with_offset((1, n, k), kernel_l.start_offset()) .transpose(1, 2)? .broadcast_as((b, k, n))?; col.matmul(kernel, (b, m, n, k), &col_l, &kernel_l)? }; let res_l = Layout::contiguous((b, h_out, w_out, params.c_out)) .transpose(1, 2)? .transpose(1, 3)?; let mut res_t = self.device().zeros_impl(res_l.shape(), res.dtype())?; res.copy_strided_src(&mut res_t, 0, &res_l)?; Ok(res_t) } fn conv_transpose2d( &self, l: &Layout, kernel: &Self, kernel_l: &Layout, params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { ConvTranspose2D(params).map(self, l, kernel, kernel_l) } fn index_select(&self, ids: &Self, l: &Layout, ids_l: &Layout, dim: usize) -> Result<Self> { match ids { Self::U8(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), Self::U32(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), Self::I64(ids) => IndexSelect { ids, ids_l, dim }.map(self, l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "index-select")), } } fn gather(&self, l: &Layout, ids: &Self, ids_l: &Layout, dim: usize) -> Result<Self> { match ids { Self::U8(ids) => Gather { ids, ids_l, dim }.map(self, l), Self::U32(ids) => Gather { ids, ids_l, dim }.map(self, l), Self::I64(ids) => Gather { ids, ids_l, dim }.map(self, l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "gather")), } } fn scatter_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { match ids { Self::U8(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), Self::U32(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), Self::I64(ids) => ScatterAdd { ids, ids_l, dim }.map(self, l, src, src_l), _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "scatter-add")), } } fn index_add( &self, l: &Layout, ids: &Self, ids_l: &Layout, src: &Self, src_l: &Layout, dim: usize, ) -> Result<Self> { match ids { Self::U8(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } Self::U32(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } Self::I64(ids) => { let ids = match ids_l.contiguous_offsets() { Some((a, b)) => &ids[a..b], None => Err(Error::RequiresContiguous { op: "index-add" }.bt())?, }; IndexAdd { ids, dim }.map(self, l, src, src_l) } _ => Err(Error::UnsupportedDTypeForOp(self.dtype(), "index-add").bt()), } } fn matmul( &self, rhs: &Self, bmnk: (usize, usize, usize, usize), lhs_l: &Layout, rhs_l: &Layout, ) -> Result<Self> { MatMul(bmnk).map(self, lhs_l, rhs, rhs_l) } fn device(&self) -> &Self::Device { &CpuDevice } fn try_clone(&self, _: &Layout) -> Result<Self> { Ok(self.clone()) } fn to_cpu_storage(&self) -> Result<CpuStorage> { Ok(self.clone()) } } impl BackendDevice for CpuDevice { type Storage = CpuStorage; fn location(&self) -> crate::DeviceLocation { crate::DeviceLocation::Cpu } fn same_device(&self, _: &Self) -> bool { true } fn storage_from_cpu_storage(&self, s: &CpuStorage) -> Result<Self::Storage> { Ok(s.clone()) } fn new(_: usize) -> Result<Self> { Ok(Self) } fn set_seed(&self, _seed: u64) -> Result<()> { crate::bail!("cannot seed the CPU rng with set_seed") } fn rand_uniform(&self, shape: &Shape, dtype: DType, min: f64, max: f64) -> Result<CpuStorage> { use rand::prelude::*; let elem_count = shape.elem_count(); let mut rng = rand::thread_rng(); match dtype { DType::U8 | DType::U32 | DType::I64 => { Err(Error::UnsupportedDTypeForOp(dtype, "rand_uniform").bt()) } DType::BF16 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(bf16::from_f64(min), bf16::from_f64(max)); for _i in 0..elem_count { data.push(rng.sample::<bf16, _>(uniform)) } Ok(CpuStorage::BF16(data)) } DType::F16 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(f16::from_f64(min), f16::from_f64(max)); for _i in 0..elem_count { data.push(rng.sample::<f16, _>(uniform)) } Ok(CpuStorage::F16(data)) } DType::F32 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(min as f32, max as f32); for _i in 0..elem_count { data.push(rng.sample::<f32, _>(uniform)) } Ok(CpuStorage::F32(data)) } DType::F64 => { let mut data = Vec::with_capacity(elem_count); let uniform = rand::distributions::Uniform::new(min, max); for _i in 0..elem_count { data.push(rng.sample::<f64, _>(uniform)) } Ok(CpuStorage::F64(data)) } } } fn rand_normal(&self, shape: &Shape, dtype: DType, mean: f64, std: f64) -> Result<CpuStorage> { use rand::prelude::*; let elem_count = shape.elem_count(); let mut rng = rand::thread_rng(); match dtype { DType::U8 | DType::U32 | DType::I64 => { Err(Error::UnsupportedDTypeForOp(dtype, "rand_normal").bt()) } DType::BF16 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(bf16::from_f64(mean), bf16::from_f64(std)) .map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::BF16(data)) } DType::F16 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(f16::from_f64(mean), f16::from_f64(std)) .map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F16(data)) } DType::F32 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(mean as f32, std as f32).map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F32(data)) } DType::F64 => { let mut data = Vec::with_capacity(elem_count); let normal = rand_distr::Normal::new(mean, std).map_err(Error::wrap)?; for _i in 0..elem_count { data.push(normal.sample(&mut rng)) } Ok(CpuStorage::F64(data)) } } } fn ones_impl(&self, shape: &Shape, dtype: DType) -> Result<CpuStorage> { let elem_count = shape.elem_count(); let storage = match dtype { DType::U8 => CpuStorage::U8(vec![1u8; elem_count]), DType::U32 => CpuStorage::U32(vec![1u32; elem_count]), DType::I64 => CpuStorage::I64(vec![1i64; elem_count]), DType::BF16 => CpuStorage::BF16(vec![bf16::ONE; elem_count]), DType::F16 => CpuStorage::F16(vec![f16::ONE; elem_count]), DType::F32 => CpuStorage::F32(vec![1f32; elem_count]), DType::F64 => CpuStorage::F64(vec![1f64; elem_count]), }; Ok(storage) } fn zeros_impl(&self, shape: &Shape, dtype: DType) -> Result<CpuStorage> { let elem_count = shape.elem_count(); let storage = match dtype { DType::U8 => CpuStorage::U8(vec![0u8; elem_count]), DType::U32 => CpuStorage::U32(vec![0u32; elem_count]), DType::I64 => CpuStorage::I64(vec![0i64; elem_count]), DType::BF16 => CpuStorage::BF16(vec![bf16::ZERO; elem_count]), DType::F16 => CpuStorage::F16(vec![f16::ZERO; elem_count]), DType::F32 => CpuStorage::F32(vec![0f32; elem_count]), DType::F64 => CpuStorage::F64(vec![0f64; elem_count]), }; Ok(storage) } } #[macro_export] macro_rules! map_dtype { ($name:expr, $storage:ident, $fn:expr, ($($dtypes:ident),+)) => { match $storage { $(CpuStorage::$dtypes(__e) => CpuStorage::$dtypes($fn(__e)),)* s => Err(Error::UnsupportedDTypeForOp(s.dtype(), $name).bt())?, } }; }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/variable.rs
// Variables are wrappers around tensors that can be modified, they are typically used for holding // weights and being modified by gradient descent. // We do not expose a public way to create variables as this would break the invariant that the // tensor within a variable is actually with `is_variable` set to `true`. use crate::{DType, Device, Error, Result, Shape, Tensor}; /// A variable is a wrapper around a tensor, however variables can have their content modified /// whereas tensors are immutable. #[derive(Clone, Debug)] pub struct Var(Tensor); impl std::fmt::Display for Var { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&self.0, f) } } impl std::ops::Deref for Var { type Target = Tensor; fn deref(&self) -> &Self::Target { self.0.as_ref() } } impl Var { pub fn zeros<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { let inner = Tensor::zeros_impl(shape, dtype, device, true)?; Ok(Self(inner)) } pub fn ones<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { let inner = Tensor::ones_impl(shape, dtype, device, true)?; Ok(Self(inner)) } pub fn from_tensor(t: &Tensor) -> Result<Self> { let inner = t.make_var()?; Ok(Self(inner)) } pub fn rand_f64<S: Into<Shape>>( lo: f64, up: f64, s: S, dtype: DType, device: &Device, ) -> Result<Self> { let inner = Tensor::rand_f64_impl(lo, up, s, dtype, device, true)?; Ok(Self(inner)) } pub fn randn_f64<S: Into<Shape>>( mean: f64, std: f64, s: S, dtype: DType, device: &Device, ) -> Result<Self> { let inner = Tensor::randn_f64_impl(mean, std, s, dtype, device, true)?; Ok(Self(inner)) } pub fn rand<S: Into<Shape>, T: crate::FloatDType>( lo: T, up: T, s: S, device: &Device, ) -> Result<Self> { let inner = Tensor::rand_impl(lo, up, s, device, true)?; Ok(Self(inner)) } pub fn randn<S: Into<Shape>, T: crate::FloatDType>( mean: T, std: T, s: S, device: &Device, ) -> Result<Self> { let inner = Tensor::randn_impl(mean, std, s, device, true)?; Ok(Self(inner)) } /// Creates a new tensor on the specified device using the content and shape of the input. /// This is similar to `new` but the resulting tensor is a variable. pub fn new<A: crate::device::NdArray>(array: A, device: &Device) -> Result<Self> { let shape = array.shape()?; let inner = Tensor::new_impl(array, shape, device, true)?; Ok(Self(inner)) } pub fn from_vec<S: Into<Shape>, D: crate::WithDType>( data: Vec<D>, shape: S, device: &Device, ) -> Result<Self> { let inner = Tensor::from_vec_impl(data, shape, device, true)?; Ok(Self(inner)) } pub fn from_slice<S: Into<Shape>, D: crate::WithDType>( array: &[D], shape: S, device: &Device, ) -> Result<Self> { let inner = Tensor::new_impl(array, shape.into(), device, true)?; Ok(Self(inner)) } pub fn as_tensor(&self) -> &Tensor { &self.0 } /// Consumes this `Var` and return the underlying tensor. pub fn into_inner(self) -> Tensor { self.0 } /// Sets the content of the inner tensor, this does not require a mutable reference as inner /// mutability is used. pub fn set(&self, src: &Tensor) -> Result<()> { if self.same_storage(src) { let msg = "cannot set a variable to a tensor that is derived from its value"; Err(Error::CannotSetVar { msg }.bt())? } let (mut dst, layout) = self.storage_mut_and_layout(); if !layout.is_contiguous() { let msg = "cannot set a non-contiguous variable"; Err(Error::CannotSetVar { msg }.bt())? } let (src, src_l) = src.storage_and_layout(); if layout.shape() != src_l.shape() { Err(Error::ShapeMismatchBinaryOp { lhs: layout.shape().clone(), rhs: src_l.shape().clone(), op: "set", } .bt())? } src.copy_strided_src(&mut dst, layout.start_offset(), src_l)?; Ok(()) } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/op.rs
#![allow(clippy::redundant_closure_call)] use crate::{CpuStorage, CudaStorage, Layout, MetalStorage, Result, Shape, Tensor}; use half::{bf16, f16}; use num_traits::float::Float; #[derive(Clone, Copy, PartialEq, Eq)] pub enum CmpOp { Eq, Ne, Le, Ge, Lt, Gt, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum ReduceOp { Sum, Min, Max, ArgMin, ArgMax, } impl ReduceOp { pub(crate) fn name(&self) -> &'static str { match self { Self::ArgMax => "argmax", Self::ArgMin => "argmin", Self::Min => "min", Self::Max => "max", Self::Sum => "sum", } } } // These ops return the same type as their input type. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum BinaryOp { Add, Mul, Sub, Div, Maximum, Minimum, } // Unary ops with no argument #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UnaryOp { Exp, Log, Sin, Cos, Abs, Neg, Recip, Sqr, Sqrt, Gelu, GeluErf, Erf, Relu, Tanh, Floor, Ceil, Round, } #[derive(Clone)] pub enum Op { Binary(Tensor, Tensor, BinaryOp), Unary(Tensor, UnaryOp), Cmp(Tensor, CmpOp), // The third argument is the reduced shape with `keepdim=true`. Reduce(Tensor, ReduceOp, Vec<usize>), Matmul(Tensor, Tensor), Gather(Tensor, Tensor, usize), ScatterAdd(Tensor, Tensor, Tensor, usize), IndexSelect(Tensor, Tensor, usize), IndexAdd(Tensor, Tensor, Tensor, usize), WhereCond(Tensor, Tensor, Tensor), #[allow(dead_code)] Conv1D { arg: Tensor, kernel: Tensor, padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] ConvTranspose1D { arg: Tensor, kernel: Tensor, padding: usize, output_padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] Conv2D { arg: Tensor, kernel: Tensor, padding: usize, stride: usize, dilation: usize, }, #[allow(dead_code)] ConvTranspose2D { arg: Tensor, kernel: Tensor, padding: usize, output_padding: usize, stride: usize, dilation: usize, }, AvgPool2D { arg: Tensor, kernel_size: (usize, usize), stride: (usize, usize), }, MaxPool2D { arg: Tensor, kernel_size: (usize, usize), stride: (usize, usize), }, UpsampleNearest1D(Tensor), UpsampleNearest2D(Tensor), Cat(Vec<Tensor>, usize), #[allow(dead_code)] // add is currently unused. Affine { arg: Tensor, mul: f64, add: f64, }, ToDType(Tensor), Copy(Tensor), Broadcast(Tensor), Narrow(Tensor, usize, usize, usize), SliceScatter0(Tensor, Tensor, usize), Reshape(Tensor), ToDevice(Tensor), Transpose(Tensor, usize, usize), Permute(Tensor, Vec<usize>), Elu(Tensor, f64), Powf(Tensor, f64), CustomOp1(Tensor, std::sync::Arc<Box<dyn CustomOp1 + Send + Sync>>), CustomOp2( Tensor, Tensor, std::sync::Arc<Box<dyn CustomOp2 + Send + Sync>>, ), CustomOp3( Tensor, Tensor, Tensor, std::sync::Arc<Box<dyn CustomOp3 + Send + Sync>>, ), } /// Unary ops that can be defined in user-land. pub trait CustomOp1 { // Box<dyn> does not support const yet, so use a function to get the name. fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd(&self, storage: &CpuStorage, layout: &Layout) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd(&self, _storage: &CudaStorage, _layout: &Layout) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _storage: &MetalStorage, _layout: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } /// This function takes as argument the argument `arg` used in the forward pass, the result /// produced by the forward operation `res` and the gradient of the result `grad_res`. /// The function should return the gradient of the argument. fn bwd(&self, _arg: &Tensor, _res: &Tensor, _grad_res: &Tensor) -> Result<Option<Tensor>> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait CustomOp2 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, ) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd( &self, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, ) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } fn bwd( &self, _arg1: &Tensor, _arg2: &Tensor, _res: &Tensor, _grad_res: &Tensor, ) -> Result<(Option<Tensor>, Option<Tensor>)> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait CustomOp3 { fn name(&self) -> &'static str; /// The forward pass, as run on a cpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cpu_fwd( &self, s1: &CpuStorage, l1: &Layout, s2: &CpuStorage, l2: &Layout, s3: &CpuStorage, l3: &Layout, ) -> Result<(CpuStorage, Shape)>; /// The forward pass, as run on a gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn cuda_fwd( &self, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, _: &CudaStorage, _: &Layout, ) -> Result<(CudaStorage, Shape)> { Err(crate::Error::Cuda( format!("no cuda implementation for {}", self.name()).into(), )) } /// The forward pass, as run on a metal gpu device. Note that the storage can use arbitrary strides, /// offsets etc so the associated layout should be used to access it. fn metal_fwd( &self, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, _: &MetalStorage, _: &Layout, ) -> Result<(MetalStorage, Shape)> { Err(crate::Error::Metal( format!("no metal implementation for {}", self.name()).into(), )) } fn bwd( &self, _arg1: &Tensor, _arg2: &Tensor, _arg3: &Tensor, _res: &Tensor, _grad_res: &Tensor, ) -> Result<(Option<Tensor>, Option<Tensor>, Option<Tensor>)> { Err(crate::Error::BackwardNotSupported { op: self.name() }) } } pub trait UnaryOpT { const NAME: &'static str; const KERNEL: &'static str; const V: Self; fn bf16(v1: bf16) -> bf16; fn f16(v1: f16) -> f16; fn f32(v1: f32) -> f32; fn f64(v1: f64) -> f64; fn u8(v1: u8) -> u8; fn u32(v1: u32) -> u32; fn i64(v1: i64) -> i64; // There is no very good way to represent optional function in traits so we go for an explicit // boolean flag to mark the function as existing. const BF16_VEC: bool = false; fn bf16_vec(_xs: &[bf16], _ys: &mut [bf16]) {} const F16_VEC: bool = false; fn f16_vec(_xs: &[f16], _ys: &mut [f16]) {} const F32_VEC: bool = false; fn f32_vec(_xs: &[f32], _ys: &mut [f32]) {} const F64_VEC: bool = false; fn f64_vec(_xs: &[f64], _ys: &mut [f64]) {} } pub trait BinaryOpT { const NAME: &'static str; const KERNEL: &'static str; const V: Self; fn bf16(v1: bf16, v2: bf16) -> bf16; fn f16(v1: f16, v2: f16) -> f16; fn f32(v1: f32, v2: f32) -> f32; fn f64(v1: f64, v2: f64) -> f64; fn u8(v1: u8, v2: u8) -> u8; fn u32(v1: u32, v2: u32) -> u32; fn i64(v1: i64, v2: i64) -> i64; const BF16_VEC: bool = false; fn bf16_vec(_xs1: &[bf16], _xs2: &[bf16], _ys: &mut [bf16]) {} const F16_VEC: bool = false; fn f16_vec(_xs1: &[f16], _xs2: &[f16], _ys: &mut [f16]) {} const F32_VEC: bool = false; fn f32_vec(_xs1: &[f32], _xs2: &[f32], _ys: &mut [f32]) {} const F64_VEC: bool = false; fn f64_vec(_xs1: &[f64], _xs2: &[f64], _ys: &mut [f64]) {} const U8_VEC: bool = false; fn u8_vec(_xs1: &[u8], _xs2: &[u8], _ys: &mut [u8]) {} const U32_VEC: bool = false; fn u32_vec(_xs1: &[u32], _xs2: &[u32], _ys: &mut [u32]) {} const I64_VEC: bool = false; fn i64_vec(_xs1: &[i64], _xs2: &[i64], _ys: &mut [i64]) {} } pub(crate) struct Add; pub(crate) struct Div; pub(crate) struct Mul; pub(crate) struct Sub; pub(crate) struct Maximum; pub(crate) struct Minimum; pub(crate) struct Exp; pub(crate) struct Log; pub(crate) struct Sin; pub(crate) struct Cos; pub(crate) struct Abs; pub(crate) struct Neg; pub(crate) struct Recip; pub(crate) struct Sqr; pub(crate) struct Sqrt; pub(crate) struct Gelu; pub(crate) struct GeluErf; pub(crate) struct Erf; pub(crate) struct Relu; pub(crate) struct Tanh; pub(crate) struct Floor; pub(crate) struct Ceil; pub(crate) struct Round; macro_rules! bin_op { ($op:ident, $name: literal, $e: expr, $f32_vec: ident, $f64_vec: ident) => { impl BinaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("b", $name); const V: Self = $op; #[inline(always)] fn bf16(v1: bf16, v2: bf16) -> bf16 { $e(v1, v2) } #[inline(always)] fn f16(v1: f16, v2: f16) -> f16 { $e(v1, v2) } #[inline(always)] fn f32(v1: f32, v2: f32) -> f32 { $e(v1, v2) } #[inline(always)] fn f64(v1: f64, v2: f64) -> f64 { $e(v1, v2) } #[inline(always)] fn u8(v1: u8, v2: u8) -> u8 { $e(v1, v2) } #[inline(always)] fn u32(v1: u32, v2: u32) -> u32 { $e(v1, v2) } #[inline(always)] fn i64(v1: i64, v2: i64) -> i64 { $e(v1, v2) } #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs1: &[f32], xs2: &[f32], ys: &mut [f32]) { crate::mkl::$f32_vec(xs1, xs2, ys) } #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs1: &[f64], xs2: &[f64], ys: &mut [f64]) { crate::mkl::$f64_vec(xs1, xs2, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs1: &[f32], xs2: &[f32], ys: &mut [f32]) { crate::accelerate::$f32_vec(xs1, xs2, ys) } #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs1: &[f64], xs2: &[f64], ys: &mut [f64]) { crate::accelerate::$f64_vec(xs1, xs2, ys) } } }; } bin_op!(Add, "add", |v1, v2| v1 + v2, vs_add, vd_add); bin_op!(Sub, "sub", |v1, v2| v1 - v2, vs_sub, vd_sub); bin_op!(Mul, "mul", |v1, v2| v1 * v2, vs_mul, vd_mul); bin_op!(Div, "div", |v1, v2| v1 / v2, vs_div, vd_div); bin_op!( Minimum, "minimum", |v1, v2| if v1 > v2 { v2 } else { v1 }, vs_min, vd_min ); bin_op!( Maximum, "maximum", |v1, v2| if v1 < v2 { v2 } else { v1 }, vs_max, vd_max ); #[allow(clippy::redundant_closure_call)] macro_rules! unary_op { ($op: ident, $name: literal, $a: ident, $e: expr) => { impl UnaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("u", $name); const V: Self = $op; #[inline(always)] fn bf16($a: bf16) -> bf16 { $e } #[inline(always)] fn f16($a: f16) -> f16 { $e } #[inline(always)] fn f32($a: f32) -> f32 { $e } #[inline(always)] fn f64($a: f64) -> f64 { $e } #[inline(always)] fn u8(_: u8) -> u8 { todo!("no unary function for u8") } #[inline(always)] fn u32(_: u32) -> u32 { todo!("no unary function for u32") } #[inline(always)] fn i64(_: i64) -> i64 { todo!("no unary function for i64") } } }; ($op: ident, $name: literal, $a: ident, $e: expr, $f32_vec:ident, $f64_vec:ident) => { impl UnaryOpT for $op { const NAME: &'static str = $name; const KERNEL: &'static str = concat!("u", $name); const V: Self = $op; #[inline(always)] fn bf16($a: bf16) -> bf16 { $e } #[inline(always)] fn f16($a: f16) -> f16 { $e } #[inline(always)] fn f32($a: f32) -> f32 { $e } #[inline(always)] fn f64($a: f64) -> f64 { $e } #[inline(always)] fn u8(_: u8) -> u8 { todo!("no unary function for u8") } #[inline(always)] fn u32(_: u32) -> u32 { todo!("no unary function for u32") } #[inline(always)] fn i64(_: i64) -> i64 { todo!("no unary function for i64") } #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::mkl::$f32_vec(xs, ys) } #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::mkl::$f64_vec(xs, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::accelerate::$f32_vec(xs, ys) } #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::accelerate::$f64_vec(xs, ys) } } }; } unary_op!(Exp, "exp", v, v.exp(), vs_exp, vd_exp); unary_op!(Log, "log", v, v.ln(), vs_ln, vd_ln); unary_op!(Sin, "sin", v, v.sin(), vs_sin, vd_sin); unary_op!(Cos, "cos", v, v.cos(), vs_cos, vd_cos); unary_op!(Tanh, "tanh", v, v.tanh(), vs_tanh, vd_tanh); unary_op!(Neg, "neg", v, -v); unary_op!(Recip, "recip", v, v.recip()); unary_op!(Sqr, "sqr", v, v * v, vs_sqr, vd_sqr); unary_op!(Sqrt, "sqrt", v, v.sqrt(), vs_sqrt, vd_sqrt); /// Tanh based approximation of the `gelu` operation /// GeluErf is the more precise one. /// <https://en.wikipedia.org/wiki/Activation_function#Comparison_of_activation_functions> impl UnaryOpT for Gelu { const NAME: &'static str = "gelu"; const V: Self = Gelu; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f32_const(0.5) * v * (bf16::ONE + bf16::tanh( (bf16::from_f32_const(2.0) / bf16::PI).sqrt() * v * (bf16::ONE + bf16::from_f32_const(0.044715) * v * v), )) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f32_const(0.5) * v * (f16::ONE + f16::tanh( (f16::from_f32_const(2.0) / f16::PI).sqrt() * v * (f16::ONE + f16::from_f32_const(0.044715) * v * v), )) } #[inline(always)] fn f32(v: f32) -> f32 { 0.5 * v * (1.0 + f32::tanh((2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v))) } #[inline(always)] fn f64(v: f64) -> f64 { 0.5 * v * (1.0 + f64::tanh((2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v))) } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } const KERNEL: &'static str = "ugelu"; #[cfg(feature = "mkl")] const F32_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::mkl::vs_gelu(xs, ys) } #[cfg(feature = "mkl")] const F64_VEC: bool = true; #[cfg(feature = "mkl")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::mkl::vd_gelu(xs, ys) } #[cfg(feature = "accelerate")] const F32_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f32_vec(xs: &[f32], ys: &mut [f32]) { crate::accelerate::vs_gelu(xs, ys) } #[cfg(feature = "accelerate")] const F64_VEC: bool = true; #[cfg(feature = "accelerate")] #[inline(always)] fn f64_vec(xs: &[f64], ys: &mut [f64]) { crate::accelerate::vd_gelu(xs, ys) } } /// `erf` operation /// <https://en.wikipedia.org/wiki/Error_function> impl UnaryOpT for Erf { const NAME: &'static str = "erf"; const KERNEL: &'static str = "uerf"; const V: Self = Erf; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f32(v: f32) -> f32 { Self::f64(v as f64) as f32 } #[inline(always)] fn f64(v: f64) -> f64 { crate::cpu::erf::erf(v) } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } } impl UnaryOpT for Abs { const NAME: &'static str = "abs"; const KERNEL: &'static str = "uabs"; const V: Self = Abs; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.abs() } #[inline(always)] fn f16(v: f16) -> f16 { v.abs() } #[inline(always)] fn f32(v: f32) -> f32 { v.abs() } #[inline(always)] fn f64(v: f64) -> f64 { v.abs() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v.abs() } } impl UnaryOpT for Ceil { const NAME: &'static str = "ceil"; const KERNEL: &'static str = "uceil"; const V: Self = Ceil; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.ceil() } #[inline(always)] fn f16(v: f16) -> f16 { v.ceil() } #[inline(always)] fn f32(v: f32) -> f32 { v.ceil() } #[inline(always)] fn f64(v: f64) -> f64 { v.ceil() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for Floor { const NAME: &'static str = "floor"; const KERNEL: &'static str = "ufloor"; const V: Self = Floor; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.floor() } #[inline(always)] fn f16(v: f16) -> f16 { v.floor() } #[inline(always)] fn f32(v: f32) -> f32 { v.floor() } #[inline(always)] fn f64(v: f64) -> f64 { v.floor() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for Round { const NAME: &'static str = "round"; const KERNEL: &'static str = "uround"; const V: Self = Round; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.round() } #[inline(always)] fn f16(v: f16) -> f16 { v.round() } #[inline(always)] fn f32(v: f32) -> f32 { v.round() } #[inline(always)] fn f64(v: f64) -> f64 { v.round() } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } impl UnaryOpT for GeluErf { const NAME: &'static str = "gelu_erf"; const KERNEL: &'static str = "ugelu_erf"; const V: Self = GeluErf; #[inline(always)] fn bf16(v: bf16) -> bf16 { bf16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f16(v: f16) -> f16 { f16::from_f64(Self::f64(v.to_f64())) } #[inline(always)] fn f32(v: f32) -> f32 { Self::f64(v as f64) as f32 } #[inline(always)] fn f64(v: f64) -> f64 { (crate::cpu::erf::erf(v / 2f64.sqrt()) + 1.) * 0.5 * v } #[inline(always)] fn u8(_: u8) -> u8 { 0 } #[inline(always)] fn u32(_: u32) -> u32 { 0 } #[inline(always)] fn i64(_: i64) -> i64 { 0 } } impl UnaryOpT for Relu { const NAME: &'static str = "relu"; const KERNEL: &'static str = "urelu"; const V: Self = Relu; #[inline(always)] fn bf16(v: bf16) -> bf16 { v.max(bf16::ZERO) } #[inline(always)] fn f16(v: f16) -> f16 { v.max(f16::ZERO) } #[inline(always)] fn f32(v: f32) -> f32 { v.max(0f32) } #[inline(always)] fn f64(v: f64) -> f64 { v.max(0f64) } #[inline(always)] fn u8(v: u8) -> u8 { v } #[inline(always)] fn u32(v: u32) -> u32 { v } #[inline(always)] fn i64(v: i64) -> i64 { v } } /// `BackpropOp` is a wrapper around `Option<Op>`. The main goal is to ensure that dependencies are /// properly checked when creating a new value #[derive(Clone)] pub struct BackpropOp(Option<Op>); impl BackpropOp { pub(crate) fn none() -> Self { BackpropOp(None) } pub(crate) fn new1(arg: &Tensor, f: impl Fn(Tensor) -> Op) -> Self { let op = if arg.track_op() { Some(f(arg.clone())) } else { None }; Self(op) } pub(crate) fn new2(arg1: &Tensor, arg2: &Tensor, f: impl Fn(Tensor, Tensor) -> Op) -> Self { let op = if arg1.track_op() || arg2.track_op() { Some(f(arg1.clone(), arg2.clone())) } else { None }; Self(op) } pub(crate) fn new3( arg1: &Tensor, arg2: &Tensor, arg3: &Tensor, f: impl Fn(Tensor, Tensor, Tensor) -> Op, ) -> Self { let op = if arg1.track_op() || arg2.track_op() || arg3.track_op() { Some(f(arg1.clone(), arg2.clone(), arg3.clone())) } else { None }; Self(op) } pub(crate) fn new<A: AsRef<Tensor>>(args: &[A], f: impl Fn(Vec<Tensor>) -> Op) -> Self { let op = if args.iter().any(|arg| arg.as_ref().track_op()) { let args: Vec<Tensor> = args.iter().map(|arg| arg.as_ref().clone()).collect(); Some(f(args)) } else { None }; Self(op) } pub(crate) fn is_none(&self) -> bool { self.0.is_none() } } impl std::ops::Deref for BackpropOp { type Target = Option<Op>; fn deref(&self) -> &Self::Target { &self.0 } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/tensor.rs
//! Tensors are N-dimenional matrixes of elements using a single data type. #![allow(clippy::redundant_closure_call)] use crate::backend::{BackendDevice, BackendStorage}; use crate::op::{ BackpropOp, BinaryOp, CmpOp, CustomOp1, CustomOp2, CustomOp3, Op, ReduceOp, UnaryOp, }; use crate::scalar::TensorOrScalar; use crate::shape::{Dim, Dims}; use crate::{bail, storage::Storage, DType, Device, Error, Layout, Result, Shape}; use std::sync::{Arc, RwLock}; /// Unique identifier for tensors. #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub struct TensorId(usize); impl TensorId { fn new() -> Self { // https://users.rust-lang.org/t/idiomatic-rust-way-to-generate-unique-id/33805 use std::sync::atomic; static COUNTER: atomic::AtomicUsize = atomic::AtomicUsize::new(1); Self(COUNTER.fetch_add(1, atomic::Ordering::Relaxed)) } } pub struct Tensor_ { id: TensorId, // As we provide inner mutability on the tensor content, the alternatives are: // - Using a mutex, this would have the highest cost when retrieving the storage but would // prevent errors when concurrent access takes place. Mutex would also be subject to // deadlocks for example using the current code if the same tensor is used twice by a single // binary op. // - Using a refcell unsafe cell would have some intermediary cost, borrow checking would be // verified dynamically, but the resulting tensors would not be send or sync. // - Using an unsafe cell would have the lowest cost but undefined behavior on concurrent // accesses. // Ideally, we would use Arc<Storage> for tensors on which we don't plan on modifying the data // and Arc<Mutex<Storage>> for tensors where the data could be modified, e.g. variables but // that's tricky to encode in the current setup. storage: Arc<RwLock<Storage>>, layout: Layout, op: BackpropOp, is_variable: bool, dtype: DType, device: Device, } impl AsRef<Tensor> for Tensor { fn as_ref(&self) -> &Tensor { self } } // Tensors are refcounted so that cloning is cheap when building the op graph. // Storages are also refcounted independently so that its possible to avoid // copying the storage for operations that only modify the shape or stride. #[derive(Clone)] /// The core struct for manipulating tensors. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// /// let a = Tensor::arange(0f32, 6f32, &Device::Cpu)?.reshape((2, 3))?; /// let b = Tensor::arange(0f32, 12f32, &Device::Cpu)?.reshape((3, 4))?; /// /// let c = a.matmul(&b)?; /// # Ok::<(), candle_core::Error>(()) /// ``` /// /// Tensors are reference counted with [`Arc`] so cloning them is cheap. pub struct Tensor(Arc<Tensor_>); impl std::ops::Deref for Tensor { type Target = Tensor_; fn deref(&self) -> &Self::Target { self.0.as_ref() } } macro_rules! unary_op { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name(&self) -> Result<Self> { let shape = self.shape(); let storage = self .storage() .unary_impl::<crate::op::$op_name>(self.layout())?; let op = BackpropOp::new1(self, |s| Op::Unary(s, UnaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! binary_op { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name(&self, rhs: &Self) -> Result<Self> { let shape = self.same_shape_binary_op(rhs, stringify!($fn_name))?; let storage = self.storage().binary_impl::<crate::op::$op_name>( &*rhs.storage(), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! binary_op_scalar { ($fn_name:ident, $op_name:ident) => { pub fn $fn_name<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { let rhs = match rhs.to_tensor_scalar()? { crate::scalar::TensorScalar::Tensor(rhs) => rhs, crate::scalar::TensorScalar::Scalar(rhs) => rhs .to_dtype(self.dtype())? .to_device(self.device())? .broadcast_as(self.shape())?, }; let shape = self.same_shape_binary_op(&rhs, stringify!($fn_name))?; let storage = self.storage().binary_impl::<crate::op::$op_name>( &*rhs.storage(), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, &rhs, |t1, t2| Op::Binary(t1, t2, BinaryOp::$op_name)); Ok(from_storage(storage, shape.clone(), op, false)) } }; } macro_rules! broadcast_binary_op { ($fn_name:ident, $inner_fn_name:ident) => { pub fn $fn_name(&self, rhs: &Self) -> Result<Self> { let lhs = self; let shape = lhs .shape() .broadcast_shape_binary_op(rhs.shape(), stringify!($fn_name))?; let l_broadcast = shape != *lhs.shape(); let r_broadcast = shape != *rhs.shape(); match (l_broadcast, r_broadcast) { (true, true) => lhs .broadcast_as(&shape)? .$inner_fn_name(&rhs.broadcast_as(&shape)?), (false, true) => lhs.$inner_fn_name(&rhs.broadcast_as(&shape)?), (true, false) => lhs.broadcast_as(&shape)?.$inner_fn_name(rhs), (false, false) => lhs.$inner_fn_name(rhs), } } }; } /// Creates a fresh tensor structure based on a storage and a shape, this uses contiguous strides. pub(crate) fn from_storage<S: Into<Shape>>( storage: Storage, shape: S, op: BackpropOp, is_variable: bool, ) -> Tensor { let dtype = storage.dtype(); let device = storage.device(); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(storage)), layout: Layout::contiguous(shape), op, is_variable, dtype, device, }; Tensor(Arc::new(tensor_)) } impl Tensor { pub(crate) fn ones_impl<S: Into<Shape>>( shape: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let none = BackpropOp::none(); let shape = shape.into(); let storage = device.ones(&shape, dtype)?; Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor filled with ones. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::ones((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::from_slice(&[1.0f32, 1.0, 1.0, 1.0, 1.0, 1.0], (2, 3), &Device::Cpu)?; /// // a == b /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn ones<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { Self::ones_impl(shape, dtype, device, false) } /// Creates a new tensor filled with ones with same shape, dtype, and device as the other tensor. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = a.ones_like()?; /// // b == a + 1 /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn ones_like(&self) -> Result<Self> { Tensor::ones(self.shape(), self.dtype(), self.device()) } // Do not expose outside of the crate, the `is_variable=true` case should only be accessed from // the variable module. pub(crate) fn zeros_impl<S: Into<Shape>>( shape: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let none = BackpropOp::none(); let shape = shape.into(); let storage = device.zeros(&shape, dtype)?; Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor filled with zeros. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::from_slice(&[0.0f32, 0.0, 0.0, 0.0, 0.0, 0.0], (2, 3), &Device::Cpu)?; /// // a == b /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn zeros<S: Into<Shape>>(shape: S, dtype: DType, device: &Device) -> Result<Self> { Self::zeros_impl(shape, dtype, device, false) } /// Creates a new tensor filled with ones with same shape, dtype, and device as the other /// tensor. /// /// ```rust /// use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = a.zeros_like()?; /// // b is on CPU f32. /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn zeros_like(&self) -> Result<Self> { Tensor::zeros(self.shape(), self.dtype(), self.device()) } pub(crate) fn rand_impl<S: Into<Shape>, T: crate::FloatDType>( lo: T, up: T, s: S, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_uniform(lo, up, &s)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub(crate) fn rand_f64_impl<S: Into<Shape>>( lo: f64, up: f64, s: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_uniform_f64(lo, up, &s, dtype)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } /// Creates a new tensor initialized with values sampled uniformly between `lo` and `up`. pub fn rand<S: Into<Shape>, T: crate::FloatDType>( lo: T, up: T, s: S, device: &Device, ) -> Result<Self> { Self::rand_impl(lo, up, s, device, false) } pub fn rand_like(&self, lo: f64, up: f64) -> Result<Self> { Tensor::rand_f64_impl(lo, up, self.shape(), self.dtype(), self.device(), false) } pub(crate) fn randn_impl<S: Into<Shape>, T: crate::FloatDType>( mean: T, std: T, s: S, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_normal(mean, std, &s)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub(crate) fn randn_f64_impl<S: Into<Shape>>( mean: f64, std: f64, s: S, dtype: DType, device: &Device, is_variable: bool, ) -> Result<Self> { let s = s.into(); let storage = device.rand_normal_f64(mean, std, &s, dtype)?; let none = BackpropOp::none(); Ok(from_storage(storage, s, none, is_variable)) } pub fn randn_like(&self, mean: f64, stdev: f64) -> Result<Self> { Tensor::randn_f64_impl( mean, stdev, self.shape(), self.dtype(), self.device(), false, ) } /// Creates a new tensor initialized with values sampled from a normal distribution with the /// specified `mean` and standard deviation `std`. pub fn randn<S: Into<Shape>, T: crate::FloatDType>( mean: T, std: T, s: S, device: &Device, ) -> Result<Self> { Self::randn_impl(mean, std, s, device, false) } pub(crate) fn new_impl<A: crate::device::NdArray>( array: A, shape: Shape, device: &Device, is_variable: bool, ) -> Result<Self> { let n: usize = shape.elem_count(); let buffer_size: usize = array.shape()?.elem_count(); if buffer_size != n { return Err(Error::ShapeMismatch { buffer_size, shape }.bt()); } let storage = device.storage(array)?; let none = BackpropOp::none(); Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor on the specified device using the content and shape of the input. pub fn new<A: crate::device::NdArray>(array: A, device: &Device) -> Result<Self> { let shape = array.shape()?; Self::new_impl(array, shape, device, false) } /// Creates a new 1D tensor from an iterator. pub fn from_iter<D: crate::WithDType>( iter: impl IntoIterator<Item = D>, device: &Device, ) -> Result<Self> { let data = iter.into_iter().collect::<Vec<_>>(); let len = data.len(); Self::from_vec_impl(data, len, device, false) } /// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common /// difference `1` from `start`. pub fn arange<D: crate::WithDType>(start: D, end: D, device: &Device) -> Result<Self> { Self::arange_step(start, end, D::one(), device) } /// Creates a new 1D tensor with values from the interval `[start, end)` taken with a common /// difference `step` from `start`. pub fn arange_step<D: crate::WithDType>( start: D, end: D, step: D, device: &Device, ) -> Result<Self> { if D::is_zero(&step) { crate::bail!("step cannot be zero") } let mut data = vec![]; let mut current = start; if step >= D::zero() { while current < end { data.push(current); current += step; } } else { while current > end { data.push(current); current += step; } } let len = data.len(); Self::from_vec_impl(data, len, device, false) } pub(crate) fn from_vec_impl<S: Into<Shape>, D: crate::WithDType>( data: Vec<D>, shape: S, device: &Device, is_variable: bool, ) -> Result<Self> { let shape = shape.into(); let buffer_size = data.len(); if buffer_size != shape.elem_count() { return Err(Error::ShapeMismatch { buffer_size, shape }.bt()); } let storage = device.storage_owned(data)?; let none = BackpropOp::none(); Ok(from_storage(storage, shape, none, is_variable)) } /// Creates a new tensor initialized with values from the input vector. The number of elements /// in this vector must be the same as the number of elements defined by the shape. /// If the device is cpu, no data copy is made. pub fn from_vec<S: Into<Shape>, D: crate::WithDType>( data: Vec<D>, shape: S, device: &Device, ) -> Result<Self> { Self::from_vec_impl(data, shape, device, false) } /// Creates a new tensor initialized with values from the input slice. The number of elements /// in this vector must be the same as the number of elements defined by the shape. pub fn from_slice<S: Into<Shape>, D: crate::WithDType>( array: &[D], shape: S, device: &Device, ) -> Result<Self> { Self::new_impl(array, shape.into(), device, false) } pub(crate) fn same_shape_binary_op(&self, rhs: &Self, op: &'static str) -> Result<&Shape> { let lhs = self.shape(); let rhs = rhs.shape(); if lhs != rhs { Err(Error::ShapeMismatchBinaryOp { lhs: lhs.clone(), rhs: rhs.clone(), op, } .bt()) } else { Ok(lhs) } } /// Returns true if the computation graph should track this op, that is if it is /// a variable or if it has some variable as dependencies. pub fn track_op(&self) -> bool { self.is_variable || self.op.is_some() } // TODO: Also make an inplace version or a pre-allocated? This could be tricky // if this can create cycles in the compute graph. binary_op!(add, Add); binary_op!(mul, Mul); binary_op!(sub, Sub); binary_op!(div, Div); binary_op_scalar!(maximum, Maximum); binary_op_scalar!(minimum, Minimum); broadcast_binary_op!(broadcast_add, add); broadcast_binary_op!(broadcast_mul, mul); broadcast_binary_op!(broadcast_sub, sub); broadcast_binary_op!(broadcast_div, div); broadcast_binary_op!(broadcast_maximum, maximum); broadcast_binary_op!(broadcast_minimum, minimum); broadcast_binary_op!(broadcast_eq, eq); broadcast_binary_op!(broadcast_ne, ne); broadcast_binary_op!(broadcast_lt, lt); broadcast_binary_op!(broadcast_le, le); broadcast_binary_op!(broadcast_gt, gt); broadcast_binary_op!(broadcast_ge, ge); unary_op!(recip, Recip); unary_op!(neg, Neg); unary_op!(exp, Exp); unary_op!(log, Log); unary_op!(sin, Sin); unary_op!(cos, Cos); unary_op!(tanh, Tanh); unary_op!(abs, Abs); unary_op!(sqr, Sqr); unary_op!(sqrt, Sqrt); unary_op!(gelu, Gelu); unary_op!(gelu_erf, GeluErf); unary_op!(erf, Erf); unary_op!(relu, Relu); unary_op!(ceil, Ceil); unary_op!(floor, Floor); unary_op!(round, Round); /// Round element of the input tensor to the nearest integer. /// /// If the number of decimals is negative, it specifies the number of positions to the left of /// the decimal point. pub fn round_to(&self, decimals: i32) -> Result<Self> { let mult = 10f64.powi(decimals); (self * mult)?.round()? * (1f64 / mult) } /// Retrieves the single scalar value hold in the tensor. If the tensor contains multiple /// dimensions, an error is returned instead. pub fn to_scalar<S: crate::WithDType>(&self) -> Result<S> { if self.rank() != 0 { Err(Error::UnexpectedNumberOfDims { expected: 0, got: self.rank(), shape: self.shape().clone(), } .bt())? } let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; Ok::<_, Error>(data[self.layout().start_offset()]) }; match &*self.storage() { Storage::Cpu(cpu_storage) => from_cpu_storage(cpu_storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// An alias for `to_scalar`. pub fn to_vec0<S: crate::WithDType>(&self) -> Result<S> { self.to_scalar::<S>() } /// Repeat this tensor along the specified dimensions. pub fn repeat<S: Into<Shape>>(&self, shape: S) -> Result<Tensor> { // Similar to PyTorch, we extend the number of dimensions of self if needed. let repeats = shape.into(); let repeats = repeats.dims(); let mut inp = if self.rank() < repeats.len() { let shape = [vec![1; repeats.len() - self.rank()], self.dims().to_vec()].concat(); self.reshape(shape)? } else { self.clone() }; for (idx, &repeat) in repeats.iter().enumerate() { if repeat > 1 { inp = Tensor::cat(&vec![&inp; repeat], idx)? } } Ok(inp) } /// Creates grids of coordinates specified by the 1D inputs. /// /// # Arguments /// /// * `args` - A slice of 1D tensors. /// * `xy_indexing` - Whether to use xy indexing or ij indexing. If xy is selected, the /// first dimension corresponds to the cardinality of the second input and the second /// dimension corresponds to the cardinality of the first input. If ij is selected, the /// dimensions are in the same order as the cardinality of the inputs. /// /// # Examples /// /// ```rust /// use candle_core::{Tensor, Device, Shape}; /// let x = Tensor::new(&[1f32, 2., 3.], &Device::Cpu)?; /// let y = Tensor::new(&[4f32, 5., 6.], &Device::Cpu)?; /// /// let grids_xy = Tensor::meshgrid(&[&x, &y], true)?; /// /// assert_eq!(grids_xy.len(), 2); /// assert_eq!(grids_xy[0].dims(), &[3, 3]); /// /// assert_eq!(grids_xy[0].to_vec2::<f32>()?, &[[1., 2., 3.], [1., 2., 3.], [1., 2., 3.]]); /// assert_eq!(grids_xy[1].to_vec2::<f32>()?, &[[4., 4., 4.], [5., 5., 5.], [6., 6., 6.]]); /// /// let grids_ij = Tensor::meshgrid(&[&x, &y], false)?; /// /// assert_eq!(grids_ij[0].to_vec2::<f32>()?, &[[1., 1., 1.], [2., 2., 2.], [3., 3., 3.]]); /// assert_eq!(grids_ij[1].to_vec2::<f32>()?, &[[4., 5., 6.], [4., 5., 6.], [4., 5., 6.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` /// /// # Errors /// /// * Will return `Err` if `args` contains less than 2 tensors. /// pub fn meshgrid<A: AsRef<Tensor>>(args: &[A], xy_indexing: bool) -> Result<Vec<Self>> { if args.len() <= 1 { Err(Error::OpRequiresAtLeastTwoTensors { op: "meshgrid" }.bt())? } let args: Vec<_> = if xy_indexing { args.iter().rev().collect() } else { args.iter().collect() }; let mut shape = Vec::with_capacity(args.len()); for arg in args.iter() { shape.push(arg.as_ref().dims1()?) } let mut grids = Vec::with_capacity(args.len()); for idx in 0..args.len() { let mut ones = vec![1usize; args.len()]; ones[idx] = shape[idx]; let arg = args[idx].as_ref().reshape(ones)?; let mut repeats = shape.clone(); repeats[idx] = 1; let repeated_tensor = arg.repeat(repeats)?; grids.push(repeated_tensor); } if xy_indexing { grids.reverse(); } Ok(grids) } /// This operation multiplies the input tensor by `mul` then adds `add` and return the result. /// The input values `mul` and `add` are casted to the appropriate type so some rounding might /// be performed. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let a = a.affine(4., -2.)?; /// assert_eq!(a.to_vec2::<f32>()?, &[[-2.0, 2.0], [6.0, 10.0]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn affine(&self, mul: f64, add: f64) -> Result<Self> { let storage = self.storage().affine(self.layout(), mul, add)?; let op = BackpropOp::new1(self, |arg| Op::Affine { arg, mul, add }); Ok(from_storage(storage, self.shape(), op, false)) } /// Applies the Exponential Linear Unit (ELU) function on each element of the input tensor. pub fn elu(&self, alpha: f64) -> Result<Self> { let storage = self.storage().elu(self.layout(), alpha)?; let op = BackpropOp::new1(self, |t| Op::Elu(t, alpha)); Ok(from_storage(storage, self.shape(), op, false)) } /// Raise the tensor to some float exponent `e`. pub fn powf(&self, e: f64) -> Result<Self> { let storage = self.storage().powf(self.layout(), e)?; let op = BackpropOp::new1(self, |t| Op::Powf(t, e)); Ok(from_storage(storage, self.shape(), op, false)) } fn check_dim(&self, dim: usize, op: &'static str) -> Result<()> { if dim >= self.dims().len() { Err(Error::DimOutOfRange { shape: self.shape().clone(), dim: dim as i32, op, } .bt())? } else { Ok(()) } } /// Split a tensor into the specified number of chunks, this may return less chunks than /// specificed. pub fn chunk<D: Dim>(&self, chunks: usize, dim: D) -> Result<Vec<Self>> { let dim = dim.to_index(self.shape(), "chunk")?; let size = self.dim(dim)?; if size < chunks { (0..size).map(|i| self.narrow(dim, i, 1)).collect() } else { let chunk_size = size / chunks; let cnt_additional = size % chunks; let mut tensors = vec![]; let mut sum_chunk_size = 0; for i in 0..chunks { let chunk_size = if i < cnt_additional { chunk_size + 1 } else { chunk_size }; let tensor = self.narrow(dim, sum_chunk_size, chunk_size)?; tensors.push(tensor); sum_chunk_size += chunk_size } Ok(tensors) } } /// Returns a new tensor that is a narrowed version of the input, the dimension `dim` /// ranges from `start` to `start + len`. pub fn narrow<D: Dim>(&self, dim: D, start: usize, len: usize) -> Result<Self> { let dims = self.dims(); let dim = dim.to_index(self.shape(), "narrow")?; let err = |msg| { Err::<(), _>( Error::NarrowInvalidArgs { shape: self.shape().clone(), dim, start, len, msg, } .bt(), ) }; if start > dims[dim] { err("start > dim_len")? } if start.saturating_add(len) > dims[dim] { err("start + len > dim_len")? } if start == 0 && dims[dim] == len { Ok(self.clone()) } else { let op = BackpropOp::new1(self, |t| Op::Narrow(t, dim, start, len)); let layout = self.layout().narrow(dim, start, len)?; let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } fn squeeze_dims(self, dims: &[usize]) -> Result<Self> { match dims { [] => Ok(self), [i] => self.squeeze(*i), dims => { let dims = self .dims() .iter() .enumerate() .filter_map(|(dim_idx, &v)| { if dims.contains(&dim_idx) { None } else { Some(v) } }) .collect::<Vec<_>>(); self.reshape(dims) } } } fn reduce_impl<D: Dim>(&self, dim: D, keepdim: bool, op: ReduceOp) -> Result<Self> { let dim = dim.to_index(self.shape(), op.name())?; let storage = self.storage().reduce_op(op, self.layout(), &[dim])?; let mut dims = self.dims().to_vec(); dims[dim] = 1; let op = match op { ReduceOp::Sum | ReduceOp::Min | ReduceOp::Max => { BackpropOp::new1(self, |arg| Op::Reduce(arg, op, dims.to_vec())) } ReduceOp::ArgMin | ReduceOp::ArgMax => BackpropOp::none(), }; let res = from_storage(storage, dims, op, false); if keepdim { Ok(res) } else { res.squeeze_dims(&[dim]) } } fn sum_impl<D: Dims>(&self, sum_dims: D, keepdim: bool) -> Result<Self> { let sum_dims = sum_dims.to_indexes(self.shape(), "sum")?; let storage = self .storage() .reduce_op(ReduceOp::Sum, self.layout(), &sum_dims)?; let mut dims = self.dims().to_vec(); for &sum_dim in sum_dims.iter() { dims[sum_dim] = 1 } let op = BackpropOp::new1(self, |a| Op::Reduce(a, ReduceOp::Sum, dims.to_vec())); let sum = from_storage(storage, dims, op, false); if keepdim { Ok(sum) } else { sum.squeeze_dims(&sum_dims) } } /// Returns the sum of all elements in the input tensor. The sum is performed over all the /// input dimensions. /// /// The resulting tensor has a shape that is similar to the shape of the input tensor, except /// that the number of elements for each dimension index in `sum_dims` is 1. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let s = a.sum_keepdim(0)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[2., 4.]]); /// let s = a.sum_keepdim(1)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1.], [5.]]); /// let s = a.sum_keepdim((0, 1))?; /// assert_eq!(s.to_vec2::<f32>()?, &[[6.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn sum_keepdim<D: Dims>(&self, sum_dims: D) -> Result<Self> { self.sum_impl(sum_dims, true) } /// Returns the sum of all elements in the input tensor. The sum is performed over all the /// input dimensions and compared to `sum_keepdim` these dimensions are squeezed rather than /// kept. pub fn sum<D: Dims>(&self, sum_dims: D) -> Result<Self> { self.sum_impl(sum_dims, false) } /// Returns the mean of all elements in the input tensor. The mean is performed over all the /// input dimensions. /// /// The resulting tensor has a shape that is similar to the shape of the input tensor, except /// that the number of elements for each dimension index in `mean_dims` is 1. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let a = Tensor::new(&[[0f32, 1.], [2., 3.]], &Device::Cpu)?; /// let s = a.mean_keepdim(0)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1., 2.]]); /// let s = a.mean_keepdim(1)?; /// assert_eq!(s.to_vec2::<f32>()?, &[[0.5], [2.5]]); /// let s = a.mean_keepdim((0, 1))?; /// assert_eq!(s.to_vec2::<f32>()?, &[[1.5]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn mean_keepdim<D: Dims>(&self, mean_dims: D) -> Result<Self> { let mean_dims = mean_dims.to_indexes(self.shape(), "mean-keepdim")?; let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product(); let scale = 1f64 / (reduced_dim as f64); self.sum_impl(mean_dims, true)? * scale } /// Returns the mean of all elements in the input tensor. The mean is performed over all the /// input dimensions and compared to `mean_keepdim` these dimensions are squeezed rather than /// kept. pub fn mean<D: Dims>(&self, mean_dims: D) -> Result<Self> { let mean_dims = mean_dims.to_indexes(self.shape(), "mean")?; let reduced_dim: usize = mean_dims.iter().map(|i| self.dims()[*i]).product(); let scale = 1f64 / (reduced_dim as f64); self.sum_impl(mean_dims, false)? * scale } /// Returns the unbiased variance over the selected dimension. pub fn var_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "var")?; let mean = self.mean_keepdim(dim)?; let squares = self.broadcast_sub(&mean)?.sqr()?; squares.sum_impl(dim, true)? / (self.dim(dim)? - 1) as f64 } /// Returns the unbiased variance over the selected dimension. pub fn var<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "var")?; self.var_keepdim(dim)?.squeeze(dim) } /// Gathers the maximum value across the selected dimension. The resulting shape has the same /// number of dimensions as the original tensor and the select dimension has a single element. pub fn max_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::Max) } /// Similar to `max_keepdim` but the target dimension is squeezed. pub fn max<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::Max) } /// Gathers the minimum value across the selected dimension. The resulting shape has the same /// number of dimensions as the original tensor and the select dimension has a single element. pub fn min_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::Min) } /// Similar to `min_keepdim` but the target dimension is squeezed. pub fn min<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::Min) } pub fn argmax_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::ArgMax) } /// Similar to `argmax_keepdim` but the target dimension is squeezed. pub fn argmax<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::ArgMax) } pub fn argmin_keepdim<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, true, ReduceOp::ArgMin) } /// Similar to `argmin_keepdim` but the target dimension is squeezed. pub fn argmin<D: Dim>(&self, dim: D) -> Result<Self> { self.reduce_impl(dim, false, ReduceOp::ArgMin) } /// Element-wise comparison between two tensors, e.g. equality, greater than, ... The actual /// comparison operation is specified by the `op` argument. /// /// The returned tensor has the same shape as the original tensors and uses `u8` elements. pub fn cmp<T: TensorOrScalar>(&self, rhs: T, op: CmpOp) -> Result<Self> { let rhs = match rhs.to_tensor_scalar()? { crate::scalar::TensorScalar::Tensor(rhs) => rhs, crate::scalar::TensorScalar::Scalar(rhs) => rhs .to_dtype(self.dtype())? .to_device(self.device())? .broadcast_as(self.shape())?, }; let shape = self.same_shape_binary_op(&rhs, "cmp")?; let storage = self .storage() .cmp(op, &rhs.storage(), self.layout(), rhs.layout())?; let op = BackpropOp::new1(self, |a| Op::Cmp(a, op)); Ok(from_storage(storage, shape.dims(), op, false)) } /// Element-wise equality. pub fn eq<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Eq) } /// Element-wise non-equality. pub fn ne<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Ne) } /// Element-wise comparison with lower-than, the returned tensor uses value 1 where `self < /// rhs` and 0 otherwise. pub fn lt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Lt) } /// Element-wise comparison with greater-than, the returned tensor uses value 1 where `self > /// rhs` and 0 otherwise. pub fn gt<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Gt) } /// Element-wise comparison with greater-equal, the returned tensor uses value 1 where `self >= /// rhs` and 0 otherwise. pub fn ge<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Ge) } /// Element-wise comparison with lower-equal, the returned tensor uses value 1 where `self <= /// rhs` and 0 otherwise. pub fn le<T: TensorOrScalar>(&self, rhs: T) -> Result<Self> { self.cmp(rhs, CmpOp::Le) } /// Clamp the tensor values to be between `min` and `max`. pub fn clamp<T1: TensorOrScalar, T2: TensorOrScalar>(&self, min: T1, max: T2) -> Result<Self> { self.maximum(min)?.minimum(max) } /// Interpolate the input tensor to the `target_size` size, taking the value of the nearest element. /// /// The input tensor should have three dimensions, `(batch, channels, l)`, the returned /// tensor also has three dimensions, `(batch, channels, target_size)`. pub fn interpolate1d(&self, target_size: usize) -> Result<Self> { let (n, c, _l) = self.dims3()?; let op = BackpropOp::new1(self, Op::UpsampleNearest1D); let storage = self .storage() .upsample_nearest1d(self.layout(), target_size)?; Ok(from_storage(storage, (n, c, target_size), op, false)) } /// Alias for `interpolate1d`. pub fn upsample_nearest1d(&self, target_size: usize) -> Result<Self> { self.interpolate1d(target_size) } /// Interpolate the input tensor to the `(target_h, target_w)` size, taking the value of the /// nearest element. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, target_h, target_w)`. pub fn interpolate2d(&self, target_h: usize, target_w: usize) -> Result<Self> { let (n, c, _h, _w) = self.dims4()?; let op = BackpropOp::new1(self, Op::UpsampleNearest2D); let storage = self .storage() .upsample_nearest2d(self.layout(), target_h, target_w)?; Ok(from_storage(storage, (n, c, target_h, target_w), op, false)) } /// Alias for `interpolate2d`. pub fn upsample_nearest2d(&self, target_h: usize, target_w: usize) -> Result<Self> { self.interpolate2d(target_h, target_w) } /// 2D average pooling over an input tensor with multiple channels. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on /// the two last dimensions using a kernel of size `sz`. The returned element is the average /// value over the kernel window. pub fn avg_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> { let sz = sz.to_usize2(); self.avg_pool2d_with_stride(sz, sz) } /// Same as `avg_pool2d` but with a `stride` that can be set to a value different from the /// kernel size. pub fn avg_pool2d_with_stride<T: crate::ToUsize2>( &self, kernel_size: T, stride: T, ) -> Result<Self> { let kernel_size = kernel_size.to_usize2(); let stride = stride.to_usize2(); let (n, c, h, w) = self.dims4()?; // https://pytorch.org/docs/stable/generated/torch.nn.AvgPool2d.html#torch.nn.AvgPool2d let h_out = (h - kernel_size.0) / stride.0 + 1; let w_out = (w - kernel_size.1) / stride.1 + 1; let op = BackpropOp::new1(self, |arg| Op::AvgPool2D { arg, kernel_size, stride, }); let storage = self .storage() .avg_pool2d(self.layout(), kernel_size, stride)?; Ok(from_storage(storage, (n, c, h_out, w_out), op, false)) } /// 2D max pooling over an input tensor with multiple channels. /// /// The input tensor should have four dimensions, `(batch, channels, h, w)`, the returned /// tensor also has four dimensions, `(batch, channels, h', w')`. The pooling is performed on /// the two last dimensions using a kernel of size `sz`, the returned element is the maximum /// value over the kernel window. pub fn max_pool2d<T: crate::ToUsize2>(&self, sz: T) -> Result<Self> { let sz = sz.to_usize2(); self.max_pool2d_with_stride(sz, sz) } /// Same as `max_pool2d` but with a `stride` that can be set to a value different from the /// kernel size. pub fn max_pool2d_with_stride<T: crate::ToUsize2>( &self, kernel_size: T, stride: T, ) -> Result<Self> { let kernel_size = kernel_size.to_usize2(); let stride = stride.to_usize2(); let (n, c, h, w) = self.dims4()?; // https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html#torch.nn.MaxPool2d let h_out = (h - kernel_size.0) / stride.0 + 1; let w_out = (w - kernel_size.1) / stride.1 + 1; let op = BackpropOp::new1(self, |arg| Op::MaxPool2D { arg, kernel_size, stride, }); let storage = self .storage() .max_pool2d(self.layout(), kernel_size, stride)?; Ok(from_storage(storage, (n, c, h_out, w_out), op, false)) } /// Returns the matrix-multiplication of the input tensor with the other provided tensor. /// /// # Arguments /// /// * `self` - A tensor with dimensions `b1, b2, ..., bi, m, k`. /// * `rhs` - A tensor with dimensions `b1, b2, ..., bi, k, n`. /// /// The resulting tensor has dimensions `b1, b2, ..., bi, m, n`. pub fn matmul(&self, rhs: &Self) -> Result<Self> { let a_dims = self.shape().dims(); let b_dims = rhs.shape().dims(); let dim = a_dims.len(); if dim < 2 || b_dims.len() != dim { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "matmul", } .bt())? } let m = a_dims[dim - 2]; let k = a_dims[dim - 1]; let k2 = b_dims[dim - 2]; let n = b_dims[dim - 1]; let c_shape = Shape::from(&a_dims[..dim - 2]).extend(&[m, n]); let batching: usize = a_dims[..dim - 2].iter().product(); let batching_b: usize = b_dims[..dim - 2].iter().product(); if k != k2 || batching != batching_b { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: rhs.shape().clone(), op: "matmul", } .bt())? } let storage = self.storage().matmul( &rhs.storage(), (batching, m, n, k), self.layout(), rhs.layout(), )?; let op = BackpropOp::new2(self, rhs, Op::Matmul); Ok(from_storage(storage, c_shape, op, false)) } /// Matrix-multiplication with broadcasting support. /// /// Compared to `matmul` the two matrixes are allowed to have different dimensions as long as /// they are compatible for broadcast. E.g. if `self` has shape `(j, 1, n, k)` and `rhs` has /// shape `(l, k, m)`, the output will have shape `(j, l, n, m)`. pub fn broadcast_matmul(&self, rhs: &Self) -> Result<Self> { let lhs = self; let (l_shape, r_shape) = lhs.shape().broadcast_shape_matmul(rhs.shape())?; let l_broadcast = l_shape != *lhs.shape(); let r_broadcast = r_shape != *rhs.shape(); // TODO: Avoid concretising the broadcasted matrixes via contiguous. match (l_broadcast, r_broadcast) { (true, true) => lhs .broadcast_as(&l_shape)? .contiguous()? .matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?), (false, true) => lhs.matmul(&rhs.broadcast_as(&r_shape)?.contiguous()?), (true, false) => lhs.broadcast_as(&l_shape)?.contiguous()?.matmul(rhs), (false, false) => lhs.matmul(rhs), } } /// Returns a tensor with the same shape as the input tensor, the values are taken from /// `on_true` if the input tensor value is not zero, and `on_false` at the positions where the /// input tensor is equal to zero. pub fn where_cond(&self, on_true: &Self, on_false: &Self) -> Result<Self> { let _shap = self.same_shape_binary_op(on_true, "where_cond")?; let shape = self.same_shape_binary_op(on_false, "where_cond")?; let storage = self.storage().where_cond( self.layout(), &on_true.storage(), on_true.layout(), &on_false.storage(), on_false.layout(), )?; let op = BackpropOp::new3(self, on_true, on_false, Op::WhereCond); Ok(from_storage(storage, shape, op, false)) } /// Returns a tensor with the values from the `self` tensor at the index corresponding to the /// values hold in the `ids` tensor. /// /// # Arguments /// /// * `self` - A tensor with dimensions `v, h`. /// * `ids` - A tensor with dimensions `s` and with integer values between 0 and v (exclusive). /// /// The resulting tensor has dimensions `s, h`. `s` is called the sequence length, `v` the /// vocabulary size, and `h` the hidden size. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let values = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let ids = Tensor::new(&[2u32, 1u32, 2u32], &Device::Cpu)?; /// let emb = values.embedding(&ids)?; /// assert_eq!(emb.to_vec2::<f32>()?, &[[4., 5.], [2., 3.], [4., 5.]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn embedding(&self, ids: &Self) -> Result<Self> { if self.rank() != 2 || ids.rank() != 1 { Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: ids.shape().clone(), op: "embedding", } .bt())? } self.index_select(ids, 0) } pub fn scatter_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "scatter-add")?; let source_dims = source.dims(); let self_dims = self.dims(); let mismatch = if source_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "scatter-add (self, src)", lhs: self.shape().clone(), rhs: source.shape().clone(), } .bt())? } if indexes.dims() != source.dims() { Err(Error::ShapeMismatchBinaryOp { op: "scatter-add (indexes, src)", lhs: indexes.shape().clone(), rhs: source.shape().clone(), } .bt())? } let storage = self.storage().scatter_add( self.layout(), &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| { Op::ScatterAdd(t1, t2, t3, dim) }); Ok(from_storage(storage, self.shape(), op, false)) } /// Embeds the values of the `src` tensor into the `self` tensor on the specified dimension. pub fn slice_scatter<D: Dim>(&self, src: &Self, dim: D, start: usize) -> Result<Self> { let dim = dim.to_index(self.shape(), "slice-scatter")?; if dim == 0 { self.slice_scatter0(src, start) } else { // TODO: Maybe we want to add a more efficient implementation at some point. self.transpose(0, dim)? .slice_scatter0(&src.transpose(0, dim)?, start)? .transpose(0, dim) } } /// Embeds the values of the `src` tensor into the `self` tensor on the first dimension. pub fn slice_scatter0(&self, src: &Self, start: usize) -> Result<Self> { if self.dtype() != src.dtype() { Err(Error::DTypeMismatchBinaryOp { lhs: self.dtype(), rhs: src.dtype(), op: "slice-scatter", } .bt())? } if self.device().location() != src.device.location() { Err(Error::DeviceMismatchBinaryOp { lhs: self.device().location(), rhs: src.device().location(), op: "slice-scatter", } .bt())? } if self.rank() != src.rank() { Err(Error::UnexpectedNumberOfDims { expected: self.rank(), got: src.rank(), shape: src.shape().clone(), } .bt())? } let shape_ok = self.dims() .iter() .zip(src.dims().iter()) .enumerate() .all(|(dim_idx, (&d1, &d2))| { if 0 == dim_idx { d2 + start <= d1 } else { d1 == d2 } }); if !shape_ok { Err(Error::ShapeMismatchBinaryOp { op: "slice-scatter (self, src)", lhs: self.shape().clone(), rhs: src.shape().clone(), } .bt())? } let mut storage = self.device().zeros(self.shape(), self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let offset = start * src.dims()[1..].iter().product::<usize>(); src.storage() .copy_strided_src(&mut storage, offset, src.layout())?; let op = BackpropOp::new2(self, src, |t1, t2| Op::SliceScatter0(t1, t2, start)); Ok(from_storage(storage, self.shape(), op, false)) } /// Accumulate element from `source` at indexes `indexes` and add them to `self`. pub fn index_add<D: Dim>(&self, indexes: &Self, source: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "index-add")?; let source_dims = source.dims(); let self_dims = self.dims(); let mismatch = if source_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(source_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "index-add (self, source)", lhs: self.shape().clone(), rhs: source.shape().clone(), } .bt())? } // The number of element in indexes must match the dimension on which the add is // performed on the source tensor (and the index values from `indexes` are taken from // the target tensor self) let indexes_len = indexes.dims1()?; if source_dims[dim] != indexes_len { Err(Error::ShapeMismatchBinaryOp { op: "index-add (ids, source))", lhs: indexes.shape().clone(), rhs: source.shape().clone(), } .bt())? } let storage = self.storage().index_add( self.layout(), &indexes.storage(), indexes.layout(), &source.storage(), source.layout(), dim, )?; let op = BackpropOp::new3(self, indexes, source, |t1, t2, t3| { Op::IndexAdd(t1, t2, t3, dim) }); Ok(from_storage(storage, self.shape(), op, false)) } /// Gather values across the target dimension. /// /// # Arguments /// /// * `self` - The input tensor. /// * `indexes` - The indices of elements to gather, this should have the same shape as `self` /// but can have a different number of elements on the target dimension. /// * `dim` - the target dimension. /// /// The resulting tensor has the same shape as `indexes` and use values from `self` indexed on /// dimension `dim` by the values in `indexes`. pub fn gather<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "gather")?; let self_dims = self.dims(); let indexes_dims = indexes.dims(); let mismatch = if indexes_dims.len() != self_dims.len() { true } else { let mut mismatch = false; for (i, (&d1, &d2)) in self_dims.iter().zip(indexes_dims.iter()).enumerate() { if i != dim && d1 != d2 { mismatch = true; break; } } mismatch }; if mismatch { Err(Error::ShapeMismatchBinaryOp { op: "gather", lhs: self.shape().clone(), rhs: indexes.shape().clone(), } .bt())? } let storage = self.storage() .gather(self.layout(), &indexes.storage(), indexes.layout(), dim)?; let op = BackpropOp::new2(self, indexes, |t1, t2| Op::Gather(t1, t2, dim)); Ok(from_storage(storage, indexes.shape(), op, false)) } /// Select values for the input tensor at the target indexes across the specified dimension. /// /// The `indexes` is argument is an int tensor with a single dimension. /// The output has the same number of dimension as the `self` input. The target dimension of /// the output has length the length of `indexes` and the values are taken from `self` using /// the index from `indexes`. Other dimensions have the same number of elements as the input /// tensor. pub fn index_select<D: Dim>(&self, indexes: &Self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "index-select")?; let indexes_len = match indexes.dims() { [l] => *l, _ => Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: indexes.shape().clone(), op: "index-select", } .bt())?, }; let storage = self.storage().index_select( &indexes.storage(), self.layout(), indexes.layout(), dim, )?; let mut dims = self.dims().to_vec(); dims[dim] = indexes_len; let op = BackpropOp::new2(self, indexes, |t1, t2| Op::IndexSelect(t1, t2, dim)); Ok(from_storage(storage, dims, op, false)) } /// Returns an iterator over position of the elements in the storage when ranging over the /// index tuples in lexicographic order. pub fn strided_index(&self) -> crate::StridedIndex { self.layout.strided_index() } /// Similar to `strided_index` but returns the position of the start of each contiguous block /// as well as the length of the contiguous blocks. For a contiguous tensor, the index iterator /// will only return the start offset and the size would be the number of elements in the /// tensor. pub fn strided_blocks(&self) -> crate::StridedBlocks { self.layout.strided_blocks() } /// Returns the data contained in a 1D tensor as a vector of scalar values. pub fn to_vec1<S: crate::WithDType>(&self) -> Result<Vec<S>> { if self.rank() != 1 { Err(Error::UnexpectedNumberOfDims { expected: 1, got: self.rank(), shape: self.shape().clone(), } .bt())? } let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let data = match self.layout.contiguous_offsets() { Some((o1, o2)) => data[o1..o2].to_vec(), None => self.strided_index().map(|i| data[i]).collect(), }; Ok::<Vec<_>, Error>(data) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// Returns the data contained in a 2D tensor as a vector of vector of scalar values. pub fn to_vec2<S: crate::WithDType>(&self) -> Result<Vec<Vec<S>>> { let (dim1, dim2) = self.dims2()?; let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let mut rows = vec![]; match self.layout.contiguous_offsets() { Some((o1, o2)) => { let data = &data[o1..o2]; for idx_row in 0..dim1 { rows.push(data[idx_row * dim2..(idx_row + 1) * dim2].to_vec()) } } None => { let mut src_index = self.strided_index(); for _idx_row in 0..dim1 { let row = (0..dim2).map(|_| data[src_index.next().unwrap()]).collect(); rows.push(row) } assert!(src_index.next().is_none()); } } Ok(rows) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// Returns the data contained in a 3D tensor. pub fn to_vec3<S: crate::WithDType>(&self) -> Result<Vec<Vec<Vec<S>>>> { let (dim1, dim2, dim3) = self.dims3()?; let from_cpu_storage = |cpu_storage: &crate::CpuStorage| { let data = S::cpu_storage_as_slice(cpu_storage)?; let mut top_rows = vec![]; match self.layout.contiguous_offsets() { Some((o1, o2)) => { let data = &data[o1..o2]; let dim23 = dim2 * dim3; for idx1 in 0..dim1 { let data = &data[idx1 * dim23..(idx1 + 1) * dim23]; let mut rows = vec![]; for idx2 in 0..dim2 { rows.push(data[idx2 * dim3..(idx2 + 1) * dim3].to_vec()) } top_rows.push(rows); } } None => { let mut src_index = self.strided_index(); for _idx in 0..dim1 { let mut rows = vec![]; for _jdx in 0..dim2 { let row = (0..dim3).map(|_| data[src_index.next().unwrap()]).collect(); rows.push(row) } top_rows.push(rows); } assert!(src_index.next().is_none()); } } Ok(top_rows) }; match &*self.storage() { Storage::Cpu(storage) => from_cpu_storage(storage), Storage::Cuda(storage) => from_cpu_storage(&storage.to_cpu_storage()?), Storage::Metal(storage) => from_cpu_storage(&storage.to_cpu_storage()?), } } /// The dtype for the elements stored in the input tensor. pub fn dtype(&self) -> DType { self.dtype } /// The device on which the input tensor is located. pub fn device(&self) -> &Device { &self.device } /// The tensor shape, i.e. dimension sizes on each axis. pub fn shape(&self) -> &Shape { self.layout().shape() } /// The dimension size for this tensor on each axis. pub fn dims(&self) -> &[usize] { self.shape().dims() } /// The dimension size for a specified dimension index. pub fn dim<D: Dim>(&self, dim: D) -> Result<usize> { let dim = dim.to_index(self.shape(), "dim")?; Ok(self.dims()[dim]) } /// The layout of the input tensor, this stores both the shape of the tensor as well as the /// strides and the start offset to apply to the underlying storage. pub fn layout(&self) -> &Layout { &self.layout } pub fn stride(&self) -> &[usize] { self.layout.stride() } /// The number of dimensions for this tensor, 0 for a scalar tensor, 1 for a 1D tensor, etc. pub fn rank(&self) -> usize { self.shape().rank() } /// The number of elements stored in this tensor. pub fn elem_count(&self) -> usize { self.shape().elem_count() } /// The unique identifier for this tensor. pub fn id(&self) -> TensorId { self.id } /// Whether this tensor is a variable or not. A variable is a tensor for which gradient is /// tracked and on which backpropagation can be performed. pub fn is_variable(&self) -> bool { self.is_variable } pub(crate) fn op(&self) -> &Option<Op> { &self.op } /// Computes the sum of all the elements in this tensor and returns a tensor holding this /// scalar with zero dimensions. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.sum_all()?; /// assert_eq!(tensor.to_scalar::<f32>()?, 15.); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn sum_all(&self) -> Result<Tensor> { let dims: Vec<_> = (0..self.rank()).collect(); self.sum(dims) } pub fn mean_all(&self) -> Result<Tensor> { self.sum_all()? / self.elem_count() as f64 } fn flatten_<D1: Dim, D2: Dim>( &self, start_dim: Option<D1>, end_dim: Option<D2>, ) -> Result<Tensor> { if self.rank() == 0 { self.reshape(1) } else { let start_dim = match start_dim { None => 0, Some(dim) => dim.to_index(self.shape(), "flatten")?, }; let end_dim = match end_dim { None => self.rank() - 1, Some(dim) => dim.to_index(self.shape(), "flatten")?, }; if start_dim < end_dim { let dims = self.dims(); let mut dst_dims = dims[..start_dim].to_vec(); dst_dims.push(dims[start_dim..end_dim + 1].iter().product::<usize>()); if end_dim + 1 < dims.len() { dst_dims.extend(&dims[end_dim + 1..]); } self.reshape(dst_dims) } else { Ok(self.clone()) } } } /// Flattens the input tensor on the dimension indexes from `start_dim` to `end_dim` (both /// inclusive). pub fn flatten<D1: Dim, D2: Dim>(&self, start_dim: D1, end_dim: D2) -> Result<Tensor> { self.flatten_(Some(start_dim), Some(end_dim)) } /// Flattens the input tensor on the dimension indexes from `0` to `end_dim` (inclusive). pub fn flatten_to<D: Dim>(&self, end_dim: D) -> Result<Tensor> { self.flatten_(None::<usize>, Some(end_dim)) } /// Flattens the input tensor on the dimension indexes from `start_dim` (inclusive) to the last /// dimension. pub fn flatten_from<D: Dim>(&self, start_dim: D) -> Result<Tensor> { self.flatten_(Some(start_dim), None::<usize>) } /// Flattens the input tensor by reshaping it into a one dimension tensor. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.flatten_all()?; /// assert_eq!(tensor.to_vec1::<f32>()?, &[0., 1., 2., 3., 4., 5.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn flatten_all(&self) -> Result<Tensor> { self.flatten_(None::<usize>, None::<usize>) } /// Returns the sub-tensor fixing the index at `i` on the first dimension. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let t = tensor.get(0)?; /// assert_eq!(t.to_vec1::<f32>()?, &[0., 1.]); /// let t = tensor.get(1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn get(&self, i: usize) -> Result<Tensor> { let dims = self.dims(); if dims.is_empty() { Ok(self.clone()) } else { self.narrow(0, i, 1)?.reshape(&dims[1..]) } } /// Returns the sub-tensor fixing the index at `index` on the dimension `dim`. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let t = tensor.get_on_dim(1, 0)?; /// assert_eq!(t.to_vec1::<f32>()?, &[0., 2., 4.]); /// let t = tensor.get_on_dim(1, 1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[1., 3., 5.]); /// let t = tensor.get_on_dim(0, 1)?; /// assert_eq!(t.to_vec1::<f32>()?, &[2., 3.]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn get_on_dim<D: Dim>(&self, dim: D, index: usize) -> Result<Tensor> { let dim = dim.to_index(self.shape(), "get_on_dim")?; self.narrow(dim, index, 1)?.squeeze(dim) } /// Returns a tensor that is a transposed version of the input, the two last dimensions of the /// input are swapped. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(&[[0f32, 1.], [2., 3.], [4., 5.]], &Device::Cpu)?; /// let tensor = tensor.t()?; /// assert_eq!(tensor.to_vec2::<f32>()?, &[[0.0, 2.0, 4.0], [1.0, 3.0, 5.0]]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn t(&self) -> Result<Tensor> { let rank = self.rank(); if rank < 2 { Err(Error::UnexpectedNumberOfDims { expected: 2, got: rank, shape: self.shape().clone(), } .bt())? } self.transpose(rank - 2, rank - 1) } /// Returns a tensor that is a transposed version of the input, the given dimensions are /// swapped. pub fn transpose<D1: Dim, D2: Dim>(&self, dim1: D1, dim2: D2) -> Result<Tensor> { let dim1 = dim1.to_index(self.shape(), "transpose")?; let dim2 = dim2.to_index(self.shape(), "transpose")?; if dim1 == dim2 { return Ok(self.clone()); } let op = BackpropOp::new1(self, |t| Op::Transpose(t, dim1, dim2)); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.transpose(dim1, dim2)?, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns a tensor with the same data as the input where the dimensions have been permuted. /// dims must be a permutation, i.e. include each dimension index exactly once. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::arange(0u32, 120u32, &Device::Cpu)?.reshape((2, 3, 4, 5))?; /// assert_eq!(tensor.dims(), &[2, 3, 4, 5]); /// let tensor = tensor.permute((2, 3, 1, 0))?; /// assert_eq!(tensor.dims(), &[4, 5, 3, 2]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn permute<D: Dims>(&self, dims: D) -> Result<Tensor> { let dims = dims.to_indexes(self.shape(), "permute")?; // O(n^2) permutation check but these arrays are small. let is_permutation = dims.len() == self.rank() && (0..dims.len()).all(|i| dims.contains(&i)); if !is_permutation { crate::bail!( "dimension mismatch in permute, tensor {:?}, dims: {:?}", self.dims(), dims ) } let op = BackpropOp::new1(self, |t| Op::Permute(t, dims.clone())); let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.permute(&dims)?, op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns true if the data is stored in a C contiguous (aka row major) way. pub fn is_contiguous(&self) -> bool { self.layout.is_contiguous() } /// Returns true if the data is stored in a Fortran contiguous (aka column major) way. pub fn is_fortran_contiguous(&self) -> bool { self.layout.is_fortran_contiguous() } /// Compared to clone, this copies the actual storage but may fail because of running out of /// memory. pub fn copy(&self) -> Result<Tensor> { let op = BackpropOp::new1(self, Op::Copy); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(self.storage().try_clone(self.layout())?)), layout: self.layout.clone(), op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// Returns a new tensor detached from the current graph, gradient are not propagated through /// this new node. The storage of this tensor is shared with the initial tensor. /// /// If the tensor is already detached from the computation graph, the same tensor is returned. pub fn detach(&self) -> Result<Tensor> { if self.op.is_none() && !self.is_variable { Ok(self.clone()) } else { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.clone(), op: BackpropOp::none(), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } /// If the target device is the same as the tensor device, only a shallow copy is performed. pub fn to_device(&self, device: &Device) -> Result<Tensor> { if self.device().same_device(device) { Ok(self.clone()) } else { let storage = match (&*self.storage(), device) { (Storage::Cpu(storage), Device::Cuda(cuda)) => { Storage::Cuda(cuda.storage_from_cpu_storage(storage)?) } (Storage::Cpu(storage), Device::Metal(metal)) => { Storage::Metal(metal.storage_from_cpu_storage(storage)?) } (Storage::Cuda(storage), Device::Cpu) => Storage::Cpu(storage.to_cpu_storage()?), (Storage::Metal(storage), Device::Cpu) => { println!("{storage:?} - {:?}", storage.to_cpu_storage()?); Storage::Cpu(storage.to_cpu_storage()?) } (Storage::Cuda(storage), Device::Cuda(cuda)) => { // TODO: Avoid passing through the cpu storage here, especially if the gpu ids // are the same. let cpu_storage = storage.to_cpu_storage()?; Storage::Cuda(cuda.storage_from_cpu_storage(&cpu_storage)?) } (Storage::Cpu(storage), Device::Cpu) => Storage::Cpu(storage.clone()), _ => { bail!("not implemented yet") } }; let op = BackpropOp::new1(self, Op::ToDevice); let tensor_ = Tensor_ { id: TensorId::new(), storage: Arc::new(RwLock::new(storage)), layout: self.layout.clone(), op, is_variable: false, dtype: self.dtype, device: device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } } /// Returns a new tensor duplicating data from the original tensor. New dimensions are inserted /// on the left. pub fn broadcast_left<S: Into<Shape>>(&self, left_shape: S) -> Result<Self> { let left_shape = left_shape.into(); let mut dims = left_shape.into_dims(); dims.extend(self.dims()); self.broadcast_as(dims) } /// Broadcast the input tensor to the target shape. This returns an error if the input shape is /// not compatible with the target shape. /// /// If the input shape is `i_1, i_2, ... i_k`, the target shape has to have `k` dimensions or /// more and shape `j_1, ..., j_l, t_1, t_2, ..., t_k`. The dimensions `j_1` to `j_l` can have /// any value, the dimension `t_a` must be equal to `i_a` if `i_a` is different from 1. If /// `i_a` is equal to 1, any value can be used. pub fn broadcast_as<S: Into<Shape>>(&self, shape: S) -> Result<Self> { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: self.layout.broadcast_as(shape)?, op: BackpropOp::new1(self, Op::Broadcast), is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } /// An alias for broadcast_as. pub fn expand<S: Into<Shape>>(&self, shape: S) -> Result<Self> { self.broadcast_as(shape) } /// Casts the input tensor to the target `dtype`. /// /// ```rust /// use candle_core::{Tensor, Device}; /// let tensor = Tensor::new(3.14159265358979f64, &Device::Cpu)?; /// assert_eq!(tensor.to_scalar::<f64>()?, 3.14159265358979); /// let tensor = tensor.to_dtype(candle_core::DType::F32)?; /// assert_eq!(tensor.to_scalar::<f32>()?, 3.1415927); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn to_dtype(&self, dtype: DType) -> Result<Self> { if self.dtype() == dtype { Ok(self.clone()) } else { let shape = self.shape(); let storage = self.storage().to_dtype(self.layout(), dtype)?; let op = BackpropOp::new1(self, Op::ToDType); Ok(from_storage(storage, shape.clone(), op, false)) } } /// Returns a tensor that is in row major order. This is the same as the original tensor if it /// was already contiguous, otherwise a copy is triggered. pub fn contiguous(&self) -> Result<Tensor> { if self.is_contiguous() { Ok(self.clone()) } else { let shape = self.shape(); let mut storage = self.device().zeros(shape, self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; let op = BackpropOp::new1(self, Op::Copy); Ok(from_storage(storage, shape.clone(), op, false)) } } /// Create a variable based on the values currently stored in a tensor. The storage is always /// copied. pub(crate) fn make_var(&self) -> Result<Tensor> { let shape = self.shape().clone(); let mut storage = self.device().zeros(&shape, self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; Ok(from_storage(storage, shape, BackpropOp::none(), true)) } /// Reshape returns a tensor with the target shape provided that the number of elements of the /// original tensor is the same. /// If the input tensor is contiguous, this is a view on the original data. Otherwise this uses /// a new storage and copies the data over, the returned tensor is always contiguous. /// /// The shape can be specified using a tuple of `usize` and at most one `()` in which case /// the behavior is the same as when using `-1` in PyTorch: this dimension size is adjusted so /// as to match the number of elements in the tensor. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.reshape((1, 6))?; /// assert_eq!(c.shape().dims(), &[1, 6]); /// /// let c = a.reshape((3, 2))?; /// assert_eq!(c.shape().dims(), &[3, 2]); /// /// let c = a.reshape((2, (), 1))?; /// assert_eq!(c.shape().dims(), &[2, 3, 1]); /// /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn reshape<S: crate::shape::ShapeWithOneHole>(&self, s: S) -> Result<Tensor> { let shape = s.into_shape(self.elem_count())?; if shape.elem_count() != self.elem_count() { return Err(Error::ShapeMismatchBinaryOp { lhs: self.shape().clone(), rhs: shape, op: "reshape", } .bt()); } let op = BackpropOp::new1(self, Op::Reshape); if self.is_contiguous() { let tensor_ = Tensor_ { id: TensorId::new(), storage: self.storage.clone(), layout: Layout::contiguous_with_offset(shape, self.layout.start_offset()), op, is_variable: false, dtype: self.dtype, device: self.device.clone(), }; Ok(Tensor(Arc::new(tensor_))) } else { let mut storage = self.device().zeros(&shape, self.dtype())?; self.storage() .copy_strided_src(&mut storage, 0, self.layout())?; Ok(from_storage(storage, shape, op, false)) } } /// Creates a new tensor with the specified dimension removed if its size was one. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3, 1), DType::F32, &Device::Cpu)?; /// /// let c = a.squeeze(2)?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// /// let c = a.squeeze(D::Minus1)?; /// assert_eq!(c.shape().dims(), &[2, 3]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn squeeze<D: Dim>(&self, dim: D) -> Result<Self> { // The PyTorch semantics are to return the same tensor if the target dimension // does not have a size of 1. let dims = self.dims(); let dim = dim.to_index(self.shape(), "squeeze")?; if dims[dim] == 1 { let mut dims = dims.to_vec(); dims.remove(dim); self.reshape(dims) } else { Ok(self.clone()) } } /// Creates a new tensor with a dimension of size one inserted at the specified position. /// /// ```rust /// # use candle_core::{Tensor, DType, Device, D}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = a.unsqueeze(0)?; /// assert_eq!(c.shape().dims(), &[1, 2, 3]); /// /// let c = a.unsqueeze(D::Minus1)?; /// assert_eq!(c.shape().dims(), &[2, 3, 1]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn unsqueeze<D: Dim>(&self, dim: D) -> Result<Self> { let mut dims = self.dims().to_vec(); let dim = dim.to_index_plus_one(self.shape(), "unsqueeze")?; // Cannot panic because to_index_plus_one already checks dimensions dims.insert(dim, 1); self.reshape(dims) } /// Stacks two or more tensors along a particular dimension. /// /// All tensors must have the same rank, and the output has one additional rank /// /// ```rust /// # use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = Tensor::stack(&[&a, &b], 0)?; /// assert_eq!(c.shape().dims(), &[2, 2, 3]); /// /// let c = Tensor::stack(&[&a, &b], 2)?; /// assert_eq!(c.shape().dims(), &[2, 3, 2]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn stack<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> { if args.is_empty() { Err(Error::OpRequiresAtLeastOneTensor { op: "stack" }.bt())? } let dim = dim.to_index_plus_one(args[0].as_ref().shape(), "stack")?; let args = args .iter() .map(|t| t.as_ref().unsqueeze(dim)) .collect::<Result<Vec<_>>>()?; Self::cat(&args, dim) } /// Concatenates two or more tensors along a particular dimension. /// /// All tensors must of the same rank, and the output will have /// the same rank /// /// ```rust /// # use candle_core::{Tensor, DType, Device}; /// let a = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// let b = Tensor::zeros((2, 3), DType::F32, &Device::Cpu)?; /// /// let c = Tensor::cat(&[&a, &b], 0)?; /// assert_eq!(c.shape().dims(), &[4, 3]); /// /// let c = Tensor::cat(&[&a, &b], 1)?; /// assert_eq!(c.shape().dims(), &[2, 6]); /// # Ok::<(), candle_core::Error>(()) /// ``` pub fn cat<A: AsRef<Tensor>, D: Dim>(args: &[A], dim: D) -> Result<Self> { if args.is_empty() { Err(Error::OpRequiresAtLeastOneTensor { op: "cat" }.bt())? } let arg0 = args[0].as_ref(); if args.len() == 1 { return Ok(arg0.clone()); } let dim = dim.to_index(arg0.shape(), "cat")?; for arg in args { arg.as_ref().check_dim(dim, "cat")?; } for (arg_idx, arg) in args.iter().enumerate() { let arg = arg.as_ref(); if arg0.rank() != arg.rank() { Err(Error::UnexpectedNumberOfDims { expected: arg0.rank(), got: arg.rank(), shape: arg.shape().clone(), } .bt())? } for (dim_idx, (v1, v2)) in arg0 .shape() .dims() .iter() .zip(arg.shape().dims().iter()) .enumerate() { if dim_idx != dim && v1 != v2 { Err(Error::ShapeMismatchCat { dim: dim_idx, first_shape: arg0.shape().clone(), n: arg_idx + 1, nth_shape: arg.shape().clone(), } .bt())? } } } if dim == 0 { Self::cat0(args) } else { // TODO: Avoid these transpositions and have an implementation that works // for dim != 0... let args: Vec<Tensor> = args .iter() .map(|a| a.as_ref().transpose(0, dim)) .collect::<Result<Vec<_>>>()?; let cat = Self::cat0(&args)?; cat.transpose(0, dim) } } fn cat0<A: AsRef<Tensor>>(args: &[A]) -> Result<Self> { if args.is_empty() { Err(Error::OpRequiresAtLeastOneTensor { op: "cat" }.bt())? } let arg0 = args[0].as_ref(); if args.len() == 1 { return Ok(arg0.clone()); } let rank = arg0.rank(); let device = arg0.device(); let dtype = arg0.dtype(); let first_dims = arg0.shape().dims(); let mut cat_dims = first_dims.to_vec(); cat_dims[0] = 0; let mut offsets = vec![0usize]; for (arg_idx, arg) in args.iter().enumerate() { let arg = arg.as_ref(); if arg.dtype() != dtype { Err(Error::DTypeMismatchBinaryOp { lhs: dtype, rhs: arg.dtype(), op: "cat", } .bt())? } if arg.device().location() != device.location() { Err(Error::DeviceMismatchBinaryOp { lhs: device.location(), rhs: arg.device().location(), op: "cat", } .bt())? } if rank != arg.rank() { Err(Error::UnexpectedNumberOfDims { expected: rank, got: arg.rank(), shape: arg.shape().clone(), } .bt())? } for (dim_idx, (v1, v2)) in arg0 .shape() .dims() .iter() .zip(arg.shape().dims().iter()) .enumerate() { if dim_idx == 0 { cat_dims[0] += v2; } if dim_idx != 0 && v1 != v2 { Err(Error::ShapeMismatchCat { dim: dim_idx, first_shape: arg0.shape().clone(), n: arg_idx + 1, nth_shape: arg.shape().clone(), } .bt())? } } let next_offset = offsets.last().unwrap() + arg.elem_count(); offsets.push(next_offset); } let shape = Shape::from(cat_dims); let op = BackpropOp::new(args, |args| Op::Cat(args, 0)); let mut storage = device.zeros(&shape, dtype)?; for (arg, &offset) in args.iter().zip(offsets.iter()) { let arg = arg.as_ref(); arg.storage() .copy_strided_src(&mut storage, offset, arg.layout())?; } Ok(from_storage(storage, shape, op, false)) } /// Pad the input tensor using 0s along dimension `dim`. This adds `left` elements before the /// input tensor values and `right` elements after. pub fn pad_with_zeros<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> { if left == 0 && right == 0 { Ok(self.clone()) } else if left == 0 { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = right; let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[self, &right], dim) } else if right == 0 { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = left; let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[&left, self], dim) } else { let dim = dim.to_index(self.shape(), "pad_with_zeros")?; let mut dims = self.dims().to_vec(); dims[dim] = left; let left = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; dims[dim] = right; let right = Tensor::zeros(dims.as_slice(), self.dtype, self.device())?; Tensor::cat(&[&left, self, &right], dim) } } /// Pad the input tensor using same values along dimension `dim`. This adds `left` elements before the /// input tensor values and `right` elements after. pub fn pad_with_same<D: Dim>(&self, dim: D, left: usize, right: usize) -> Result<Self> { if left == 0 && right == 0 { Ok(self.clone()) } else if self.elem_count() == 0 { crate::bail!("cannot use pad_with_same on an empty tensor") } else if left == 0 { let dim = dim.to_index(self.shape(), "pad_with_same")?; let r = self.narrow(dim, self.dim(dim)? - 1, 1)?; let mut v = vec![self]; for _ in 0..right { v.push(&r) } Tensor::cat(&v, dim) } else if right == 0 { let dim = dim.to_index(self.shape(), "pad_with_same")?; let l = self.narrow(dim, 0, 1)?; let mut v = vec![]; for _ in 0..left { v.push(&l) } v.push(self); Tensor::cat(&v, dim) } else { let dim = dim.to_index(self.shape(), "pad_with_same")?; let l = self.narrow(dim, 0, 1)?; let r = self.narrow(dim, self.dim(dim)? - 1, 1)?; let mut v = vec![]; for _ in 0..left { v.push(&l) } v.push(self); for _ in 0..right { v.push(&r) } Tensor::cat(&v, dim) } } /// Run the `forward` method of `m` on `self`. pub fn apply<M: crate::Module>(&self, m: &M) -> Result<Self> { m.forward(self) } /// Run the `forward` method of `m` on `self`. pub fn apply_t<M: crate::ModuleT>(&self, m: &M, train: bool) -> Result<Self> { m.forward_t(self, train) } pub(crate) fn storage(&self) -> std::sync::RwLockReadGuard<'_, Storage> { self.storage.read().unwrap() } // If we extend the visibility of this function to be usable outside of this crate, we should // make it unsafe. pub(crate) fn storage_mut_and_layout( &self, ) -> (std::sync::RwLockWriteGuard<'_, Storage>, &Layout) { let storage = self.storage.write().unwrap(); (storage, &self.layout) } /// The storage used by this tensor, together with the layout to use to access it safely. pub fn storage_and_layout(&self) -> (std::sync::RwLockReadGuard<'_, Storage>, &Layout) { let storage = self.storage.read().unwrap(); (storage, &self.layout) } pub(crate) fn same_storage(&self, rhs: &Self) -> bool { let lhs: &RwLock<Storage> = self.storage.as_ref(); let rhs: &RwLock<Storage> = rhs.storage.as_ref(); std::ptr::eq(lhs, rhs) } /// Applies a unary custom op without backward support pub fn apply_op1_no_bwd<C: CustomOp1>(&self, c: &C) -> Result<Self> { let (storage, shape) = self.storage().apply_op1(self.layout(), c)?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a binary custom op without backward support pub fn apply_op2_no_bwd<C: CustomOp2>(&self, rhs: &Self, c: &C) -> Result<Self> { let (storage, shape) = self.storage() .apply_op2(self.layout(), &rhs.storage(), rhs.layout(), c)?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a ternary custom op without backward support pub fn apply_op3_no_bwd<C: CustomOp3>(&self, t2: &Self, t3: &Self, c: &C) -> Result<Self> { let (storage, shape) = self.storage().apply_op3( self.layout(), &t2.storage(), t2.layout(), &t3.storage(), t3.layout(), c, )?; Ok(from_storage(storage, shape, BackpropOp::none(), false)) } /// Applies a unary custom op. pub fn apply_op1_arc(&self, c: Arc<Box<dyn CustomOp1 + Send + Sync>>) -> Result<Self> { let (storage, shape) = self .storage() .apply_op1(self.layout(), c.as_ref().as_ref())?; let op = BackpropOp::new1(self, |s| Op::CustomOp1(s, c.clone())); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op1<C: 'static + CustomOp1 + Send + Sync>(&self, c: C) -> Result<Self> { self.apply_op1_arc(Arc::new(Box::new(c))) } /// Applies a binary custom op. pub fn apply_op2_arc( &self, rhs: &Self, c: Arc<Box<dyn CustomOp2 + Send + Sync>>, ) -> Result<Self> { let (storage, shape) = self.storage().apply_op2( self.layout(), &rhs.storage(), rhs.layout(), c.as_ref().as_ref(), )?; let op = BackpropOp::new2(self, rhs, |t1, t2| Op::CustomOp2(t1, t2, c.clone())); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op2<C: 'static + CustomOp2 + Send + Sync>(&self, r: &Self, c: C) -> Result<Self> { self.apply_op2_arc(r, Arc::new(Box::new(c))) } /// Applies a ternary custom op. pub fn apply_op3_arc( &self, t2: &Self, t3: &Self, c: Arc<Box<dyn CustomOp3 + Send + Sync>>, ) -> Result<Self> { let (storage, shape) = self.storage().apply_op3( self.layout(), &t2.storage(), t2.layout(), &t3.storage(), t3.layout(), c.as_ref().as_ref(), )?; let op = BackpropOp::new3(self, t2, t3, |t1, t2, t3| { Op::CustomOp3(t1, t2, t3, c.clone()) }); Ok(from_storage(storage, shape, op, false)) } pub fn apply_op3<C: 'static + CustomOp3 + Send + Sync>( &self, t2: &Self, t3: &Self, c: C, ) -> Result<Self> { self.apply_op3_arc(t2, t3, Arc::new(Box::new(c))) } /// Normalize a 'relative' axis value: positive values are kept, negative /// values means counting the dimensions from the back. pub fn normalize_axis(&self, axis: i64) -> Result<usize> { let rank = self.rank() as i64; if rank <= axis { crate::bail!("axis {axis} is too large, tensor rank {rank}") } else if 0 <= axis { Ok(axis as usize) } else { let naxis = rank + axis; if naxis < 0 { crate::bail!("axis {axis} is too small, tensor rank {rank}") } Ok(naxis as usize) } } /// Returns a lower triangular matrix of ones of size n by n. pub fn tril2(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.le(&t2)?.to_dtype(dtype) } /// Returns an upper triangular matrix of ones of size n by n. pub fn triu2(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.ge(&t2)?.to_dtype(dtype) } /// Returns a matrix with a diagonal of ones of size n by n. pub fn eye(n: usize, dtype: DType, device: &Device) -> Result<Self> { let t = Tensor::arange(0u32, n as u32, device)?; let t1 = t.reshape((1, n))?.broadcast_as((n, n))?; let t2 = t.reshape((n, 1))?.broadcast_as((n, n))?; t1.eq(&t2)?.to_dtype(dtype) } /// Returns the cumulative sum of elements of the input tensor summed over the specified /// dimension. /// /// This operation is most efficient when dim is the last dimension of the tensor. pub fn cumsum<D: Dim>(&self, dim: D) -> Result<Self> { let dim = dim.to_index(self.shape(), "cumsum")?; let rank = self.rank(); if rank == 0 { return Ok(self.clone()); } let n_axis = self.dim(dim)?; let triu = Tensor::triu2(n_axis, self.dtype(), self.device())?; if rank == 1 { self.unsqueeze(0)?.matmul(&triu)?.squeeze(0) } else { let last = rank - 1; let t = self.transpose(dim, last)?; let t = t.broadcast_matmul(&triu)?; t.transpose(dim, last) } } /// Returns a copy of `self` where the values within `ranges` have been replaced with the /// content of `src`. pub fn slice_assign<D: std::ops::RangeBounds<usize>>( &self, ranges: &[D], src: &Tensor, ) -> Result<Self> { let src_dims = src.dims(); let self_dims = self.dims(); if self_dims.len() != src_dims.len() { crate::bail!( "slice-assign requires input with the same rank {} <> {}", self_dims.len(), src_dims.len() ) } if self_dims.len() != ranges.len() { crate::bail!( "slice-assign requires input with the same rank as there are ranges {} <> {}", self_dims.len(), ranges.len() ) } let mut src = src.clone(); let mut mask = Self::ones(src.shape(), DType::U8, src.device())?; for (i, range) in ranges.iter().enumerate() { let start_included = match range.start_bound() { std::ops::Bound::Unbounded => 0, std::ops::Bound::Included(v) => *v, std::ops::Bound::Excluded(v) => *v + 1, }; let end_excluded = match range.end_bound() { std::ops::Bound::Unbounded => self_dims[i], std::ops::Bound::Included(v) => *v + 1, std::ops::Bound::Excluded(v) => *v, }; if end_excluded <= start_included { crate::bail!( "slice-assign: empty range for dim {i}, {start_included} {end_excluded}" ) } if self_dims[i] < end_excluded { crate::bail!( "slice-assign: upper bound is out of range for dim {i}, {end_excluded} {}", self_dims[i] ) } if end_excluded - start_included != src_dims[i] { crate::bail!( "slice-assign: the range for dim {i} ({start_included}..{end_excluded}) does not match the size of src {}", src_dims[i] ) } src = src.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)?; mask = mask.pad_with_zeros(i, start_included, self_dims[i] - end_excluded)? } mask.where_cond(/* on_true= */ &src, /* on_false= */ self) } } macro_rules! bin_trait { ($trait:ident, $fn1:ident, $mul:expr, $add:expr) => { impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: B) -> Self::Output { Tensor::$fn1(&self, rhs.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<B> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: B) -> Self::Output { Tensor::$fn1(&self, rhs.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Tensor> for Result<B> { type Output = Result<Tensor>; fn $fn1(self, rhs: Tensor) -> Self::Output { Tensor::$fn1(self?.borrow(), &rhs) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<&Tensor> for Result<B> { type Output = Result<Tensor>; fn $fn1(self, rhs: &Tensor) -> Self::Output { Tensor::$fn1(self?.borrow(), rhs) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: Result<B>) -> Self::Output { Tensor::$fn1(&self, rhs?.borrow()) } } impl<B: std::borrow::Borrow<Tensor>> std::ops::$trait<Result<B>> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: Result<B>) -> Self::Output { Tensor::$fn1(&self, rhs?.borrow()) } } impl std::ops::$trait<f64> for Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: f64) -> Self::Output { self.affine($mul(rhs), $add(rhs)) } } impl std::ops::$trait<f64> for &Tensor { type Output = Result<Tensor>; fn $fn1(self, rhs: f64) -> Self::Output { self.affine($mul(rhs), $add(rhs)) } } }; } bin_trait!(Add, add, |_| 1., |v| v); bin_trait!(Sub, sub, |_| 1., |v: f64| -v); bin_trait!(Mul, mul, |v| v, |_| 0.); bin_trait!(Div, div, |v| 1. / v, |_| 0.); impl std::ops::Add<Tensor> for f64 { type Output = Result<Tensor>; fn add(self, rhs: Tensor) -> Self::Output { rhs + self } } impl std::ops::Add<&Tensor> for f64 { type Output = Result<Tensor>; fn add(self, rhs: &Tensor) -> Self::Output { rhs + self } } impl std::ops::Mul<Tensor> for f64 { type Output = Result<Tensor>; fn mul(self, rhs: Tensor) -> Self::Output { rhs * self } } impl std::ops::Mul<&Tensor> for f64 { type Output = Result<Tensor>; fn mul(self, rhs: &Tensor) -> Self::Output { rhs * self } } impl std::ops::Sub<Tensor> for f64 { type Output = Result<Tensor>; fn sub(self, rhs: Tensor) -> Self::Output { rhs.affine(-1., self) } } impl std::ops::Sub<&Tensor> for f64 { type Output = Result<Tensor>; fn sub(self, rhs: &Tensor) -> Self::Output { rhs.affine(-1., self) } } impl std::ops::Div<Tensor> for f64 { type Output = Result<Tensor>; #[allow(clippy::suspicious_arithmetic_impl)] fn div(self, rhs: Tensor) -> Self::Output { rhs.recip()? * self } } impl std::ops::Div<&Tensor> for f64 { type Output = Result<Tensor>; #[allow(clippy::suspicious_arithmetic_impl)] fn div(self, rhs: &Tensor) -> Self::Output { rhs.recip()? * self } }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/accelerate.rs
#![allow(dead_code)] use libc::{c_char, c_double, c_float, c_int, c_long, c_ulong}; mod ffi { use super::*; extern "C" { // It would be nice to be able to switch to the NEWLAPACK version of the function but this // seems to trigger some link error. Available function names can be seen here: // /Library/Developer/CommandLineTools/SDKs/MacOSX13.3.sdk/System/Library/Frameworks/Accelerate.framework/Versions/A/Accelerate.tbd #[link_name = "sgemm_"] pub fn sgemm_ffi( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_float, a: *const c_float, lda: *const c_int, b: *const c_float, ldb: *const c_int, beta: *const c_float, c: *mut c_float, ldc: *const c_int, ); #[link_name = "dgemm_"] pub fn dgemm_ffi( transa: *const c_char, transb: *const c_char, m: *const c_int, n: *const c_int, k: *const c_int, alpha: *const c_double, a: *const c_double, lda: *const c_int, b: *const c_double, ldb: *const c_int, beta: *const c_double, c: *mut c_double, ldc: *const c_int, ); pub fn vvexpf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvexp(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvsqrtf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvsqrt(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvsinf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvsin(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvcosf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvcos(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvlogf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvlog(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vvtanhf(dst: *mut c_float, src: *const c_float, len: *const c_int); pub fn vvtanh(dst: *mut c_double, src: *const c_double, len: *const c_int); pub fn vDSP_vaddD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vadd( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vsubD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vsub( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vmulD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vmul( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vdivD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vdiv( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vminD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vmin( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); pub fn vDSP_vmaxD( _: *const c_double, _: c_long, _: *const c_double, _: c_long, _: *mut c_double, _: c_long, _: c_ulong, ); pub fn vDSP_vmax( _: *const c_float, _: c_long, _: *const c_float, _: c_long, _: *mut c_float, _: c_long, _: c_ulong, ); } } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn sgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f32, a: &[f32], lda: i32, b: &[f32], ldb: i32, beta: f32, c: &mut [f32], ldc: i32, ) { ffi::sgemm_ffi( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[allow(clippy::too_many_arguments)] #[inline] pub unsafe fn dgemm( transa: u8, transb: u8, m: i32, n: i32, k: i32, alpha: f64, a: &[f64], lda: i32, b: &[f64], ldb: i32, beta: f64, c: &mut [f64], ldc: i32, ) { ffi::dgemm_ffi( &(transa as c_char), &(transb as c_char), &m, &n, &k, &alpha, a.as_ptr(), &lda, b.as_ptr(), &ldb, &beta, c.as_mut_ptr(), &ldc, ) } #[inline] pub fn vs_exp(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvexpf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_exp(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvexp(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_sqrt(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsqrtf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_sqrt(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsqrt(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_sin(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsinf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_sin(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvsin(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_cos(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvcosf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_cos(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvcos(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_tanh(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvtanhf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_tanh(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvtanh(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_ln(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvlogf(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vd_ln(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } unsafe { ffi::vvlog(y.as_mut_ptr(), a.as_ptr(), &(a_len as i32)) } } #[inline] pub fn vs_sqr(a: &[f32], y: &mut [f32]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } y.iter_mut().zip(a.iter()).for_each(|(y, a)| *y = *a * *a) } #[inline] pub fn vd_sqr(a: &[f64], y: &mut [f64]) { let a_len = a.len(); let y_len = y.len(); if a_len != y_len { panic!("a and y have different lengths {a_len} <> {y_len}") } y.iter_mut().zip(a.iter()).for_each(|(y, a)| *y = *a * *a) } #[inline] pub fn vs_tanh_inplace(y: &mut [f32]) { unsafe { ffi::vvtanhf(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) } } #[inline] pub fn vd_tanh_inplace(y: &mut [f64]) { unsafe { ffi::vvtanh(y.as_mut_ptr(), y.as_ptr(), &(y.len() as i32)) } } #[inline] pub fn vs_gelu(vs: &[f32], ys: &mut [f32]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f32 / std::f32::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vs_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } #[inline] pub fn vd_gelu(vs: &[f64], ys: &mut [f64]) { for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = (2.0f64 / std::f64::consts::PI).sqrt() * v * (1.0 + 0.044715 * v * v) } vd_tanh_inplace(ys); for (&v, y) in vs.iter().zip(ys.iter_mut()) { *y = 0.5 * v * (1.0 + *y) } } macro_rules! binary_op { ($fn_name:ident, $ty:ty, $accelerate_name:ident) => { #[inline] pub fn $fn_name(a: &[$ty], b: &[$ty], y: &mut [$ty]) { let a_len = a.len(); let b_len = b.len(); let y_len = y.len(); if a_len != y_len || b_len != y_len { panic!( "{} a,b,y len mismatch {a_len} {b_len} {y_len}", stringify!($fn_name) ); } unsafe { // Weird quirk of accelerate, the rhs comes before the lhs. ffi::$accelerate_name( b.as_ptr(), 1, a.as_ptr(), 1, y.as_mut_ptr(), 1, a_len as u64, ) } } }; } binary_op!(vs_add, f32, vDSP_vadd); binary_op!(vd_add, f64, vDSP_vaddD); binary_op!(vs_sub, f32, vDSP_vsub); binary_op!(vd_sub, f64, vDSP_vsubD); binary_op!(vs_mul, f32, vDSP_vmul); binary_op!(vd_mul, f64, vDSP_vmulD); binary_op!(vs_div, f32, vDSP_vdiv); binary_op!(vd_div, f64, vDSP_vdivD); binary_op!(vs_max, f32, vDSP_vmax); binary_op!(vd_max, f64, vDSP_vmaxD); binary_op!(vs_min, f32, vDSP_vmin); binary_op!(vd_min, f64, vDSP_vminD);
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/utils.rs
use std::str::FromStr; pub fn get_num_threads() -> usize { // Respond to the same environment variable as rayon. match std::env::var("RAYON_NUM_THREADS") .ok() .and_then(|s| usize::from_str(&s).ok()) { Some(x) if x > 0 => x, Some(_) | None => num_cpus::get(), } } pub fn has_accelerate() -> bool { cfg!(feature = "accelerate") } pub fn has_mkl() -> bool { cfg!(feature = "mkl") } pub fn cuda_is_available() -> bool { cfg!(feature = "cuda") } pub fn metal_is_available() -> bool { cfg!(feature = "metal") } pub fn with_avx() -> bool { cfg!(target_feature = "avx") } pub fn with_neon() -> bool { cfg!(target_feature = "neon") } pub fn with_simd128() -> bool { cfg!(target_feature = "simd128") } pub fn with_f16c() -> bool { cfg!(target_feature = "f16c") }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/strided_index.rs
use crate::Layout; /// An iterator over offset position for items of an N-dimensional arrays stored in a /// flat buffer using some potential strides. #[derive(Debug)] pub struct StridedIndex<'a> { next_storage_index: Option<usize>, multi_index: Vec<usize>, dims: &'a [usize], stride: &'a [usize], } impl<'a> StridedIndex<'a> { pub(crate) fn new(dims: &'a [usize], stride: &'a [usize], start_offset: usize) -> Self { let elem_count: usize = dims.iter().product(); let next_storage_index = if elem_count == 0 { None } else { // This applies to the scalar case. Some(start_offset) }; StridedIndex { next_storage_index, multi_index: vec![0; dims.len()], dims, stride, } } pub(crate) fn from_layout(l: &'a Layout) -> Self { Self::new(l.dims(), l.stride(), l.start_offset()) } } impl<'a> Iterator for StridedIndex<'a> { type Item = usize; fn next(&mut self) -> Option<Self::Item> { let storage_index = match self.next_storage_index { None => return None, Some(storage_index) => storage_index, }; let mut updated = false; let mut next_storage_index = storage_index; for ((multi_i, max_i), stride_i) in self .multi_index .iter_mut() .zip(self.dims.iter()) .zip(self.stride.iter()) .rev() { let next_i = *multi_i + 1; if next_i < *max_i { *multi_i = next_i; updated = true; next_storage_index += stride_i; break; } else { next_storage_index -= *multi_i * stride_i; *multi_i = 0 } } self.next_storage_index = if updated { Some(next_storage_index) } else { None }; Some(storage_index) } } #[derive(Debug)] pub enum StridedBlocks<'a> { SingleBlock { start_offset: usize, len: usize, }, MultipleBlocks { block_start_index: StridedIndex<'a>, block_len: usize, }, }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/src/dummy_cuda_backend.rs
#![allow(dead_code)] use crate::op::{BinaryOpT, CmpOp, ReduceOp, UnaryOpT}; use crate::{CpuStorage, DType, Error, Layout, Result, Shape}; #[derive(Debug, Clone)] pub struct CudaDevice; #[derive(Debug)] pub struct CudaStorage; macro_rules! fail { () => { unimplemented!("cuda support has not been enabled, add `cuda` feature to enable.") }; } impl crate::backend::BackendStorage for CudaStorage { type Device = CudaDevice; fn try_clone(&self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn dtype(&self) -> DType { fail!() } fn device(&self) -> &Self::Device { fail!() } fn to_cpu_storage(&self) -> Result<CpuStorage> { Err(Error::NotCompiledWithCudaSupport) } fn affine(&self, _: &Layout, _: f64, _: f64) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn powf(&self, _: &Layout, _: f64) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn elu(&self, _: &Layout, _: f64) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn reduce_op(&self, _: ReduceOp, _: &Layout, _: &[usize]) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn cmp(&self, _: CmpOp, _: &Self, _: &Layout, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn to_dtype(&self, _: &Layout, _: DType) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn unary_impl<B: UnaryOpT>(&self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn binary_impl<B: BinaryOpT>(&self, _: &Self, _: &Layout, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn where_cond(&self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn conv1d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConv1D, ) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn conv_transpose1d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConvTranspose1D, ) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn conv2d( &self, _: &Layout, _: &Self, _: &Layout, _: &crate::conv::ParamsConv2D, ) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn conv_transpose2d( &self, _l: &Layout, _kernel: &Self, _kernel_l: &Layout, _params: &crate::conv::ParamsConvTranspose2D, ) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn index_select(&self, _: &Self, _: &Layout, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn gather(&self, _: &Layout, _: &Self, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn scatter_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn index_add( &self, _: &Layout, _: &Self, _: &Layout, _: &Self, _: &Layout, _: usize, ) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn matmul( &self, _: &Self, _: (usize, usize, usize, usize), _: &Layout, _: &Layout, ) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn copy_strided_src(&self, _: &mut Self, _: usize, _: &Layout) -> Result<()> { Err(Error::NotCompiledWithCudaSupport) } fn avg_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn max_pool2d(&self, _: &Layout, _: (usize, usize), _: (usize, usize)) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn upsample_nearest1d(&self, _: &Layout, _: usize) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn upsample_nearest2d(&self, _: &Layout, _: usize, _: usize) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } } impl crate::backend::BackendDevice for CudaDevice { type Storage = CudaStorage; fn new(_: usize) -> Result<Self> { Err(Error::NotCompiledWithCudaSupport) } fn set_seed(&self, _: u64) -> Result<()> { Err(Error::NotCompiledWithCudaSupport) } fn location(&self) -> crate::DeviceLocation { fail!() } fn same_device(&self, _: &Self) -> bool { fail!() } fn zeros_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> { Err(Error::NotCompiledWithCudaSupport) } fn ones_impl(&self, _shape: &Shape, _dtype: DType) -> Result<Self::Storage> { Err(Error::NotCompiledWithCudaSupport) } fn storage_from_cpu_storage(&self, _: &CpuStorage) -> Result<Self::Storage> { Err(Error::NotCompiledWithCudaSupport) } fn rand_uniform(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> { Err(Error::NotCompiledWithCudaSupport) } fn rand_normal(&self, _: &Shape, _: DType, _: f64, _: f64) -> Result<Self::Storage> { Err(Error::NotCompiledWithCudaSupport) } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/cpu/erf.rs
#![allow(clippy::excessive_precision)] // Code taken from https://github.com/statrs-dev/statrs //! Provides the [error](https://en.wikipedia.org/wiki/Error_function) and //! related functions mod evaluate { //! Provides functions that don't have a numerical solution and must //! be solved computationally (e.g. evaluation of a polynomial) /// evaluates a polynomial at `z` where `coeff` are the coeffecients /// to a polynomial of order `k` where `k` is the length of `coeff` and the /// coeffecient /// to the `k`th power is the `k`th element in coeff. E.g. [3,-1,2] equates to /// `2z^2 - z + 3` /// /// # Remarks /// /// Returns 0 for a 0 length coefficient slice pub fn polynomial(z: f64, coeff: &[f64]) -> f64 { let n = coeff.len(); if n == 0 { return 0.0; } let mut sum = *coeff.last().unwrap(); for c in coeff[0..n - 1].iter().rev() { sum = *c + z * sum; } sum } } use std::f64; /// `erf` calculates the error function at `x`. pub fn erf(x: f64) -> f64 { if x.is_nan() { f64::NAN } else if x >= 0.0 && x.is_infinite() { 1.0 } else if x <= 0.0 && x.is_infinite() { -1.0 } else if x == 0. { 0.0 } else { erf_impl(x, false) } } /// `erf_inv` calculates the inverse error function /// at `x`. pub fn erf_inv(x: f64) -> f64 { if x == 0.0 { 0.0 } else if x >= 1.0 { f64::INFINITY } else if x <= -1.0 { f64::NEG_INFINITY } else if x < 0.0 { erf_inv_impl(-x, 1.0 + x, -1.0) } else { erf_inv_impl(x, 1.0 - x, 1.0) } } /// `erfc` calculates the complementary error function /// at `x`. pub fn erfc(x: f64) -> f64 { if x.is_nan() { f64::NAN } else if x == f64::INFINITY { 0.0 } else if x == f64::NEG_INFINITY { 2.0 } else { erf_impl(x, true) } } /// `erfc_inv` calculates the complementary inverse /// error function at `x`. pub fn erfc_inv(x: f64) -> f64 { if x <= 0.0 { f64::INFINITY } else if x >= 2.0 { f64::NEG_INFINITY } else if x > 1.0 { erf_inv_impl(-1.0 + x, 2.0 - x, -1.0) } else { erf_inv_impl(1.0 - x, x, 1.0) } } // ********************************************************** // ********** Coefficients for erf_impl polynomial ********** // ********************************************************** /// Polynomial coefficients for a numerator of `erf_impl` /// in the interval [1e-10, 0.5]. const ERF_IMPL_AN: &[f64] = &[ 0.00337916709551257388990745, -0.00073695653048167948530905, -0.374732337392919607868241, 0.0817442448733587196071743, -0.0421089319936548595203468, 0.0070165709512095756344528, -0.00495091255982435110337458, 0.000871646599037922480317225, ]; /// Polynomial coefficients for a denominator of `erf_impl` /// in the interval [1e-10, 0.5] const ERF_IMPL_AD: &[f64] = &[ 1.0, -0.218088218087924645390535, 0.412542972725442099083918, -0.0841891147873106755410271, 0.0655338856400241519690695, -0.0120019604454941768171266, 0.00408165558926174048329689, -0.000615900721557769691924509, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [0.5, 0.75]. const ERF_IMPL_BN: &[f64] = &[ -0.0361790390718262471360258, 0.292251883444882683221149, 0.281447041797604512774415, 0.125610208862766947294894, 0.0274135028268930549240776, 0.00250839672168065762786937, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [0.5, 0.75]. const ERF_IMPL_BD: &[f64] = &[ 1.0, 1.8545005897903486499845, 1.43575803037831418074962, 0.582827658753036572454135, 0.124810476932949746447682, 0.0113724176546353285778481, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [0.75, 1.25]. const ERF_IMPL_CN: &[f64] = &[ -0.0397876892611136856954425, 0.153165212467878293257683, 0.191260295600936245503129, 0.10276327061989304213645, 0.029637090615738836726027, 0.0046093486780275489468812, 0.000307607820348680180548455, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [0.75, 1.25]. const ERF_IMPL_CD: &[f64] = &[ 1.0, 1.95520072987627704987886, 1.64762317199384860109595, 0.768238607022126250082483, 0.209793185936509782784315, 0.0319569316899913392596356, 0.00213363160895785378615014, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [1.25, 2.25]. const ERF_IMPL_DN: &[f64] = &[ -0.0300838560557949717328341, 0.0538578829844454508530552, 0.0726211541651914182692959, 0.0367628469888049348429018, 0.00964629015572527529605267, 0.00133453480075291076745275, 0.778087599782504251917881e-4, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [1.25, 2.25]. const ERF_IMPL_DD: &[f64] = &[ 1.0, 1.75967098147167528287343, 1.32883571437961120556307, 0.552528596508757581287907, 0.133793056941332861912279, 0.0179509645176280768640766, 0.00104712440019937356634038, -0.106640381820357337177643e-7, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [2.25, 3.5]. const ERF_IMPL_EN: &[f64] = &[ -0.0117907570137227847827732, 0.014262132090538809896674, 0.0202234435902960820020765, 0.00930668299990432009042239, 0.00213357802422065994322516, 0.00025022987386460102395382, 0.120534912219588189822126e-4, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [2.25, 3.5]. const ERF_IMPL_ED: &[f64] = &[ 1.0, 1.50376225203620482047419, 0.965397786204462896346934, 0.339265230476796681555511, 0.0689740649541569716897427, 0.00771060262491768307365526, 0.000371421101531069302990367, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [3.5, 5.25]. const ERF_IMPL_FN: &[f64] = &[ -0.00546954795538729307482955, 0.00404190278731707110245394, 0.0054963369553161170521356, 0.00212616472603945399437862, 0.000394984014495083900689956, 0.365565477064442377259271e-4, 0.135485897109932323253786e-5, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [3.5, 5.25]. const ERF_IMPL_FD: &[f64] = &[ 1.0, 1.21019697773630784832251, 0.620914668221143886601045, 0.173038430661142762569515, 0.0276550813773432047594539, 0.00240625974424309709745382, 0.891811817251336577241006e-4, -0.465528836283382684461025e-11, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [5.25, 8]. const ERF_IMPL_GN: &[f64] = &[ -0.00270722535905778347999196, 0.0013187563425029400461378, 0.00119925933261002333923989, 0.00027849619811344664248235, 0.267822988218331849989363e-4, 0.923043672315028197865066e-6, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [5.25, 8]. const ERF_IMPL_GD: &[f64] = &[ 1.0, 0.814632808543141591118279, 0.268901665856299542168425, 0.0449877216103041118694989, 0.00381759663320248459168994, 0.000131571897888596914350697, 0.404815359675764138445257e-11, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [8, 11.5]. const ERF_IMPL_HN: &[f64] = &[ -0.00109946720691742196814323, 0.000406425442750422675169153, 0.000274499489416900707787024, 0.465293770646659383436343e-4, 0.320955425395767463401993e-5, 0.778286018145020892261936e-7, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [8, 11.5]. const ERF_IMPL_HD: &[f64] = &[ 1.0, 0.588173710611846046373373, 0.139363331289409746077541, 0.0166329340417083678763028, 0.00100023921310234908642639, 0.24254837521587225125068e-4, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [11.5, 17]. const ERF_IMPL_IN: &[f64] = &[ -0.00056907993601094962855594, 0.000169498540373762264416984, 0.518472354581100890120501e-4, 0.382819312231928859704678e-5, 0.824989931281894431781794e-7, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [11.5, 17]. const ERF_IMPL_ID: &[f64] = &[ 1.0, 0.339637250051139347430323, 0.043472647870310663055044, 0.00248549335224637114641629, 0.535633305337152900549536e-4, -0.117490944405459578783846e-12, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [17, 24]. const ERF_IMPL_JN: &[f64] = &[ -0.000241313599483991337479091, 0.574224975202501512365975e-4, 0.115998962927383778460557e-4, 0.581762134402593739370875e-6, 0.853971555085673614607418e-8, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [17, 24]. const ERF_IMPL_JD: &[f64] = &[ 1.0, 0.233044138299687841018015, 0.0204186940546440312625597, 0.000797185647564398289151125, 0.117019281670172327758019e-4, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [24, 38]. const ERF_IMPL_KN: &[f64] = &[ -0.000146674699277760365803642, 0.162666552112280519955647e-4, 0.269116248509165239294897e-5, 0.979584479468091935086972e-7, 0.101994647625723465722285e-8, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [24, 38]. const ERF_IMPL_KD: &[f64] = &[ 1.0, 0.165907812944847226546036, 0.0103361716191505884359634, 0.000286593026373868366935721, 0.298401570840900340874568e-5, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [38, 60]. const ERF_IMPL_LN: &[f64] = &[ -0.583905797629771786720406e-4, 0.412510325105496173512992e-5, 0.431790922420250949096906e-6, 0.993365155590013193345569e-8, 0.653480510020104699270084e-10, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [38, 60]. const ERF_IMPL_LD: &[f64] = &[ 1.0, 0.105077086072039915406159, 0.00414278428675475620830226, 0.726338754644523769144108e-4, 0.477818471047398785369849e-6, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [60, 85]. const ERF_IMPL_MN: &[f64] = &[ -0.196457797609229579459841e-4, 0.157243887666800692441195e-5, 0.543902511192700878690335e-7, 0.317472492369117710852685e-9, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [60, 85]. const ERF_IMPL_MD: &[f64] = &[ 1.0, 0.052803989240957632204885, 0.000926876069151753290378112, 0.541011723226630257077328e-5, 0.535093845803642394908747e-15, ]; /// Polynomial coefficients for a numerator in `erf_impl` /// in the interval [85, 110]. const ERF_IMPL_NN: &[f64] = &[ -0.789224703978722689089794e-5, 0.622088451660986955124162e-6, 0.145728445676882396797184e-7, 0.603715505542715364529243e-10, ]; /// Polynomial coefficients for a denominator in `erf_impl` /// in the interval [85, 110]. const ERF_IMPL_ND: &[f64] = &[ 1.0, 0.0375328846356293715248719, 0.000467919535974625308126054, 0.193847039275845656900547e-5, ]; // ********************************************************** // ********** Coefficients for erf_inv_impl polynomial ****** // ********************************************************** /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0, 0.5]. const ERF_INV_IMPL_AN: &[f64] = &[ -0.000508781949658280665617, -0.00836874819741736770379, 0.0334806625409744615033, -0.0126926147662974029034, -0.0365637971411762664006, 0.0219878681111168899165, 0.00822687874676915743155, -0.00538772965071242932965, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0, 0.5]. const ERF_INV_IMPL_AD: &[f64] = &[ 1.0, -0.970005043303290640362, -1.56574558234175846809, 1.56221558398423026363, 0.662328840472002992063, -0.71228902341542847553, -0.0527396382340099713954, 0.0795283687341571680018, -0.00233393759374190016776, 0.000886216390456424707504, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.5, 0.75]. const ERF_INV_IMPL_BN: &[f64] = &[ -0.202433508355938759655, 0.105264680699391713268, 8.37050328343119927838, 17.6447298408374015486, -18.8510648058714251895, -44.6382324441786960818, 17.445385985570866523, 21.1294655448340526258, -3.67192254707729348546, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.5, 0.75]. const ERF_INV_IMPL_BD: &[f64] = &[ 1.0, 6.24264124854247537712, 3.9713437953343869095, -28.6608180499800029974, -20.1432634680485188801, 48.5609213108739935468, 10.8268667355460159008, -22.6436933413139721736, 1.72114765761200282724, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x less than 3. const ERF_INV_IMPL_CN: &[f64] = &[ -0.131102781679951906451, -0.163794047193317060787, 0.117030156341995252019, 0.387079738972604337464, 0.337785538912035898924, 0.142869534408157156766, 0.0290157910005329060432, 0.00214558995388805277169, -0.679465575181126350155e-6, 0.285225331782217055858e-7, -0.681149956853776992068e-9, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x less than 3. const ERF_INV_IMPL_CD: &[f64] = &[ 1.0, 3.46625407242567245975, 5.38168345707006855425, 4.77846592945843778382, 2.59301921623620271374, 0.848854343457902036425, 0.152264338295331783612, 0.01105924229346489121, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 3 and 6. const ERF_INV_IMPL_DN: &[f64] = &[ -0.0350353787183177984712, -0.00222426529213447927281, 0.0185573306514231072324, 0.00950804701325919603619, 0.00187123492819559223345, 0.000157544617424960554631, 0.460469890584317994083e-5, -0.230404776911882601748e-9, 0.266339227425782031962e-11, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 3 and 6. const ERF_INV_IMPL_DD: &[f64] = &[ 1.0, 1.3653349817554063097, 0.762059164553623404043, 0.220091105764131249824, 0.0341589143670947727934, 0.00263861676657015992959, 0.764675292302794483503e-4, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 6 and 18. const ERF_INV_IMPL_EN: &[f64] = &[ -0.0167431005076633737133, -0.00112951438745580278863, 0.00105628862152492910091, 0.000209386317487588078668, 0.149624783758342370182e-4, 0.449696789927706453732e-6, 0.462596163522878599135e-8, -0.281128735628831791805e-13, 0.99055709973310326855e-16, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 6 and 18. const ERF_INV_IMPL_ED: &[f64] = &[ 1.0, 0.591429344886417493481, 0.138151865749083321638, 0.0160746087093676504695, 0.000964011807005165528527, 0.275335474764726041141e-4, 0.282243172016108031869e-6, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 18 and 44. const ERF_INV_IMPL_FN: &[f64] = &[ -0.0024978212791898131227, -0.779190719229053954292e-5, 0.254723037413027451751e-4, 0.162397777342510920873e-5, 0.396341011304801168516e-7, 0.411632831190944208473e-9, 0.145596286718675035587e-11, -0.116765012397184275695e-17, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x between 18 and 44. const ERF_INV_IMPL_FD: &[f64] = &[ 1.0, 0.207123112214422517181, 0.0169410838120975906478, 0.000690538265622684595676, 0.145007359818232637924e-4, 0.144437756628144157666e-6, 0.509761276599778486139e-9, ]; /// Polynomial coefficients for a numerator of `erf_inv_impl` /// in the interval [0.75, 1] with x greater than 44. const ERF_INV_IMPL_GN: &[f64] = &[ -0.000539042911019078575891, -0.28398759004727721098e-6, 0.899465114892291446442e-6, 0.229345859265920864296e-7, 0.225561444863500149219e-9, 0.947846627503022684216e-12, 0.135880130108924861008e-14, -0.348890393399948882918e-21, ]; /// Polynomial coefficients for a denominator of `erf_inv_impl` /// in the interval [0.75, 1] with x greater than 44. const ERF_INV_IMPL_GD: &[f64] = &[ 1.0, 0.0845746234001899436914, 0.00282092984726264681981, 0.468292921940894236786e-4, 0.399968812193862100054e-6, 0.161809290887904476097e-8, 0.231558608310259605225e-11, ]; /// `erf_impl` computes the error function at `z`. /// If `inv` is true, `1 - erf` is calculated as opposed to `erf` fn erf_impl(z: f64, inv: bool) -> f64 { if z < 0.0 { if !inv { return -erf_impl(-z, false); } if z < -0.5 { return 2.0 - erf_impl(-z, true); } return 1.0 + erf_impl(-z, false); } let result = if z < 0.5 { if z < 1e-10 { z * 1.125 + z * 0.003379167095512573896158903121545171688 } else { z * 1.125 + z * evaluate::polynomial(z, ERF_IMPL_AN) / evaluate::polynomial(z, ERF_IMPL_AD) } } else if z < 110.0 { let (r, b) = if z < 0.75 { ( evaluate::polynomial(z - 0.5, ERF_IMPL_BN) / evaluate::polynomial(z - 0.5, ERF_IMPL_BD), 0.3440242112, ) } else if z < 1.25 { ( evaluate::polynomial(z - 0.75, ERF_IMPL_CN) / evaluate::polynomial(z - 0.75, ERF_IMPL_CD), 0.419990927, ) } else if z < 2.25 { ( evaluate::polynomial(z - 1.25, ERF_IMPL_DN) / evaluate::polynomial(z - 1.25, ERF_IMPL_DD), 0.4898625016, ) } else if z < 3.5 { ( evaluate::polynomial(z - 2.25, ERF_IMPL_EN) / evaluate::polynomial(z - 2.25, ERF_IMPL_ED), 0.5317370892, ) } else if z < 5.25 { ( evaluate::polynomial(z - 3.5, ERF_IMPL_FN) / evaluate::polynomial(z - 3.5, ERF_IMPL_FD), 0.5489973426, ) } else if z < 8.0 { ( evaluate::polynomial(z - 5.25, ERF_IMPL_GN) / evaluate::polynomial(z - 5.25, ERF_IMPL_GD), 0.5571740866, ) } else if z < 11.5 { ( evaluate::polynomial(z - 8.0, ERF_IMPL_HN) / evaluate::polynomial(z - 8.0, ERF_IMPL_HD), 0.5609807968, ) } else if z < 17.0 { ( evaluate::polynomial(z - 11.5, ERF_IMPL_IN) / evaluate::polynomial(z - 11.5, ERF_IMPL_ID), 0.5626493692, ) } else if z < 24.0 { ( evaluate::polynomial(z - 17.0, ERF_IMPL_JN) / evaluate::polynomial(z - 17.0, ERF_IMPL_JD), 0.5634598136, ) } else if z < 38.0 { ( evaluate::polynomial(z - 24.0, ERF_IMPL_KN) / evaluate::polynomial(z - 24.0, ERF_IMPL_KD), 0.5638477802, ) } else if z < 60.0 { ( evaluate::polynomial(z - 38.0, ERF_IMPL_LN) / evaluate::polynomial(z - 38.0, ERF_IMPL_LD), 0.5640528202, ) } else if z < 85.0 { ( evaluate::polynomial(z - 60.0, ERF_IMPL_MN) / evaluate::polynomial(z - 60.0, ERF_IMPL_MD), 0.5641309023, ) } else { ( evaluate::polynomial(z - 85.0, ERF_IMPL_NN) / evaluate::polynomial(z - 85.0, ERF_IMPL_ND), 0.5641584396, ) }; let g = (-z * z).exp() / z; g * b + g * r } else { 0.0 }; if inv && z >= 0.5 { result } else if z >= 0.5 || inv { 1.0 - result } else { result } } // `erf_inv_impl` computes the inverse error function where // `p`,`q`, and `s` are the first, second, and third intermediate // parameters respectively fn erf_inv_impl(p: f64, q: f64, s: f64) -> f64 { let result = if p <= 0.5 { let y = 0.0891314744949340820313; let g = p * (p + 10.0); let r = evaluate::polynomial(p, ERF_INV_IMPL_AN) / evaluate::polynomial(p, ERF_INV_IMPL_AD); g * y + g * r } else if q >= 0.25 { let y = 2.249481201171875; let g = (-2.0 * q.ln()).sqrt(); let xs = q - 0.25; let r = evaluate::polynomial(xs, ERF_INV_IMPL_BN) / evaluate::polynomial(xs, ERF_INV_IMPL_BD); g / (y + r) } else { let x = (-q.ln()).sqrt(); if x < 3.0 { let y = 0.807220458984375; let xs = x - 1.125; let r = evaluate::polynomial(xs, ERF_INV_IMPL_CN) / evaluate::polynomial(xs, ERF_INV_IMPL_CD); y * x + r * x } else if x < 6.0 { let y = 0.93995571136474609375; let xs = x - 3.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_DN) / evaluate::polynomial(xs, ERF_INV_IMPL_DD); y * x + r * x } else if x < 18.0 { let y = 0.98362827301025390625; let xs = x - 6.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_EN) / evaluate::polynomial(xs, ERF_INV_IMPL_ED); y * x + r * x } else if x < 44.0 { let y = 0.99714565277099609375; let xs = x - 18.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_FN) / evaluate::polynomial(xs, ERF_INV_IMPL_FD); y * x + r * x } else { let y = 0.99941349029541015625; let xs = x - 44.0; let r = evaluate::polynomial(xs, ERF_INV_IMPL_GN) / evaluate::polynomial(xs, ERF_INV_IMPL_GD); y * x + r * x } }; s * result }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/cpu/neon.rs
use super::Cpu; #[cfg(target_arch = "arm")] use core::arch::arm::*; #[cfg(target_arch = "aarch64")] use core::arch::aarch64::*; pub struct CurrentCpu {} const STEP: usize = 16; const EPR: usize = 4; const ARR: usize = STEP / EPR; impl CurrentCpu { #[cfg(target_arch = "aarch64")] unsafe fn reduce_one(x: float32x4_t) -> f32 { vaddvq_f32(x) } #[cfg(target_arch = "arm")] unsafe fn reduce_one(x: float32x4_t) -> f32 { vgetq_lane_f32(x, 0) + vgetq_lane_f32(x, 1) + vgetq_lane_f32(x, 2) + vgetq_lane_f32(x, 3) } } impl Cpu<ARR> for CurrentCpu { type Unit = float32x4_t; type Array = [float32x4_t; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { vdupq_n_f32(0.0) } unsafe fn from_f32(x: f32) -> Self::Unit { vdupq_n_f32(x) } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn load(mem_addr: *const f32) -> Self::Unit { vld1q_f32(mem_addr) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { vaddq_f32(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { vfmaq_f32(a, b, c) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { vst1q_f32(mem_addr, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = vaddq_f32(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = vaddq_f32(x[4 * i], x[4 * i + 2]); } *y = Self::reduce_one(x[0]); } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/cpu/simd128.rs
use super::Cpu; use core::arch::wasm32::*; pub struct CurrentCpu {} const STEP: usize = 16; const EPR: usize = 4; const ARR: usize = STEP / EPR; impl Cpu<ARR> for CurrentCpu { type Unit = v128; type Array = [v128; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { f32x4_splat(0.0) } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { f32x4_splat(v) } unsafe fn load(mem_addr: *const f32) -> Self::Unit { v128_load(mem_addr as *mut v128) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { f32x4_add(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { f32x4_add(f32x4_mul(b, c), a) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { v128_store(mem_addr as *mut v128, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = f32x4_add(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = f32x4_add(x[4 * i], x[4 * i + 2]); } for i in 0..ARR / 8 { x[8 * i] = f32x4_add(x[8 * i], x[8 * i + 4]); } *y = f32x4_extract_lane::<0>(x[0]) + f32x4_extract_lane::<1>(x[0]) + f32x4_extract_lane::<2>(x[0]) + f32x4_extract_lane::<3>(x[0]); } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/cpu/avx.rs
use super::{Cpu, CpuF16}; #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; use half::f16; pub struct CurrentCpu {} const STEP: usize = 32; const EPR: usize = 8; const ARR: usize = STEP / EPR; impl Cpu<ARR> for CurrentCpu { type Unit = __m256; type Array = [__m256; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { _mm256_setzero_ps() } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { _mm256_set1_ps(v) } unsafe fn load(mem_addr: *const f32) -> Self::Unit { _mm256_loadu_ps(mem_addr) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { _mm256_add_ps(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { _mm256_add_ps(_mm256_mul_ps(b, c), a) } unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit) { _mm256_storeu_ps(mem_addr, a); } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { for i in 0..ARR / 2 { x[2 * i] = _mm256_add_ps(x[2 * i], x[2 * i + 1]); } for i in 0..ARR / 4 { x[4 * i] = _mm256_add_ps(x[4 * i], x[4 * i + 2]); } #[allow(clippy::reversed_empty_ranges)] for i in 0..ARR / 8 { x[8 * i] = _mm256_add_ps(x[8 * i], x[8 * i + 4]); } let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1)); let t1 = _mm_hadd_ps(t0, t0); *y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); } } pub struct CurrentCpuF16 {} impl CpuF16<ARR> for CurrentCpuF16 { type Unit = __m256; type Array = [__m256; ARR]; const STEP: usize = STEP; const EPR: usize = EPR; fn n() -> usize { ARR } unsafe fn zero() -> Self::Unit { _mm256_setzero_ps() } unsafe fn zero_array() -> Self::Array { [Self::zero(); ARR] } unsafe fn from_f32(v: f32) -> Self::Unit { _mm256_set1_ps(v) } #[cfg(target_feature = "f16c")] unsafe fn load(mem_addr: *const f16) -> Self::Unit { _mm256_cvtph_ps(_mm_loadu_si128(mem_addr as *const __m128i)) } #[cfg(not(target_feature = "f16c"))] unsafe fn load(mem_addr: *const f16) -> Self::Unit { let mut tmp = [0.0f32; 8]; for i in 0..8 { tmp[i] = (*mem_addr.add(i)).to_f32(); } _mm256_loadu_ps(tmp.as_ptr()) } unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit { _mm256_add_ps(a, b) } unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit { _mm256_add_ps(_mm256_mul_ps(b, c), a) } #[cfg(target_feature = "f16c")] unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) { _mm_storeu_si128(mem_addr as *mut __m128i, _mm256_cvtps_ph(a, 0)) } #[cfg(not(target_feature = "f16c"))] unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit) { let mut tmp = [0.0f32; 8]; _mm256_storeu_ps(tmp.as_mut_ptr(), a); for i in 0..8 { *mem_addr.add(i) = f16::from_f32(tmp[i]); } } unsafe fn vec_reduce(mut x: Self::Array, y: *mut f32) { let mut offset = ARR >> 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } offset >>= 1; for i in 0..offset { x[i] = _mm256_add_ps(x[i], x[offset + i]); } let t0 = _mm_add_ps(_mm256_castps256_ps128(x[0]), _mm256_extractf128_ps(x[0], 1)); let t1 = _mm_hadd_ps(t0, t0); *y = _mm_cvtss_f32(_mm_hadd_ps(t1, t1)); } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/cpu/mod.rs
pub mod erf; pub mod kernels; trait Cpu<const ARR: usize> { type Unit; type Array; const STEP: usize; const EPR: usize; fn n() -> usize; unsafe fn zero() -> Self::Unit; unsafe fn zero_array() -> Self::Array; unsafe fn load(mem_addr: *const f32) -> Self::Unit; unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit; unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit; unsafe fn vec_reduce(x: Self::Array, y: *mut f32); unsafe fn from_f32(v: f32) -> Self::Unit; unsafe fn vec_store(mem_addr: *mut f32, a: Self::Unit); } trait CpuF16<const ARR: usize> { type Unit; type Array; const STEP: usize; const EPR: usize; fn n() -> usize; unsafe fn zero() -> Self::Unit; unsafe fn zero_array() -> Self::Array; unsafe fn load(mem_addr: *const f16) -> Self::Unit; unsafe fn vec_add(a: Self::Unit, b: Self::Unit) -> Self::Unit; unsafe fn vec_fma(a: Self::Unit, b: Self::Unit, c: Self::Unit) -> Self::Unit; unsafe fn vec_reduce(x: Self::Array, y: *mut f32); unsafe fn from_f32(v: f32) -> Self::Unit; unsafe fn vec_store(mem_addr: *mut f16, a: Self::Unit); } use half::f16; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[cfg(target_feature = "avx")] pub mod avx; #[cfg(any(target_arch = "x86", target_arch = "x86_64"))] #[cfg(target_feature = "avx")] pub use avx::{CurrentCpu, CurrentCpuF16}; #[cfg(target_arch = "wasm32")] #[cfg(target_feature = "simd128")] pub mod simd128; #[cfg(target_arch = "wasm32")] #[cfg(target_feature = "simd128")] pub use simd128::CurrentCpu; #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] #[cfg(target_feature = "neon")] pub mod neon; #[cfg(any(target_arch = "arm", target_arch = "aarch64"))] #[cfg(target_feature = "neon")] pub use neon::CurrentCpu; #[cfg(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" ))] #[inline(always)] pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) { let np = k & !(CurrentCpu::STEP - 1); let mut sum = CurrentCpu::zero_array(); let mut ax = CurrentCpu::zero_array(); let mut ay = CurrentCpu::zero_array(); for i in (0..np).step_by(CurrentCpu::STEP) { for j in 0..CurrentCpu::n() { ax[j] = CurrentCpu::load(a_row.add(i + j * CurrentCpu::EPR)); ay[j] = CurrentCpu::load(b_row.add(i + j * CurrentCpu::EPR)); sum[j] = CurrentCpu::vec_fma(sum[j], ax[j], ay[j]); } } CurrentCpu::vec_reduce(sum, c); // leftovers for i in np..k { *c += *a_row.add(i) * (*b_row.add(i)); } } #[cfg(not(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" )))] #[inline(always)] pub(crate) unsafe fn vec_dot_f32(a_row: *const f32, b_row: *const f32, c: *mut f32, k: usize) { // leftovers for i in 0..k { *c += *a_row.add(i) * (*b_row.add(i)); } } #[cfg(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" ))] #[inline(always)] pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) { let np = k & !(CurrentCpu::STEP - 1); let mut sum = CurrentCpu::zero_array(); let mut x = CurrentCpu::zero_array(); for i in (0..np).step_by(CurrentCpu::STEP) { for j in 0..CurrentCpu::n() { x[j] = CurrentCpu::load(row.add(i + j * CurrentCpu::EPR)); sum[j] = CurrentCpu::vec_add(sum[j], x[j]); } } CurrentCpu::vec_reduce(sum, b); // leftovers for i in np..k { *b += *row.add(i) } } #[cfg(not(any( target_feature = "neon", target_feature = "avx", target_feature = "simd128" )))] #[inline(always)] pub(crate) unsafe fn vec_sum(row: *const f32, b: *mut f32, k: usize) { *b = 0f32; for i in 0..k { *b += *row.add(i) } } #[cfg(target_feature = "avx")] #[inline(always)] pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) { let mut sumf = 0.0f32; let np = k & !(CurrentCpuF16::STEP - 1); let mut sum = CurrentCpuF16::zero_array(); let mut ax = CurrentCpuF16::zero_array(); let mut ay = CurrentCpuF16::zero_array(); for i in (0..np).step_by(CurrentCpuF16::STEP) { for j in 0..CurrentCpuF16::n() { ax[j] = CurrentCpuF16::load(a_row.add(i + j * CurrentCpuF16::EPR)); ay[j] = CurrentCpuF16::load(b_row.add(i + j * CurrentCpuF16::EPR)); sum[j] = CurrentCpuF16::vec_fma(sum[j], ax[j], ay[j]); } } CurrentCpuF16::vec_reduce(sum, &mut sumf); // leftovers for i in np..k { sumf += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32(); } *c = sumf; } #[cfg(not(target_feature = "avx"))] #[inline(always)] pub(crate) unsafe fn vec_dot_f16(a_row: *const f16, b_row: *const f16, c: *mut f32, k: usize) { // leftovers let mut sum = 0.0; for i in 0..k { sum += (*a_row.add(i)).to_f32() * (*b_row.add(i)).to_f32(); } *c = sum; }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/cpu/kernels.rs
pub trait VecOps: num_traits::NumAssign + Copy { fn min(self, rhs: Self) -> Self; fn max(self, rhs: Self) -> Self; /// Dot-product of two vectors. /// /// # Safety /// /// The length of `lhs` and `rhs` have to be at least `len`. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_dot(lhs: *const Self, rhs: *const Self, res: *mut Self, len: usize) { *res = Self::zero(); for i in 0..len { *res += *lhs.add(i) * *rhs.add(i) } } /// Sum of all elements in a vector. /// /// # Safety /// /// The length of `xs` must be at least `len`. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_reduce_sum(xs: *const Self, res: *mut Self, len: usize) { *res = Self::zero(); for i in 0..len { *res += *xs.add(i) } } /// Maximum element in a non-empty vector. /// /// # Safety /// /// The length of `xs` must be at least `len` and positive. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_reduce_max(xs: *const Self, res: *mut Self, len: usize) { *res = *xs; for i in 1..len { *res = (*res).max(*xs.add(i)) } } /// Minimum element in a non-empty vector. /// /// # Safety /// /// The length of `xs` must be at least `len` and positive. `res` has to point to a valid /// element. #[inline(always)] unsafe fn vec_reduce_min(xs: *const Self, res: *mut Self, len: usize) { *res = *xs; for i in 1..len { *res = (*res).min(*xs.add(i)) } } } impl VecOps for f32 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } #[inline(always)] unsafe fn vec_dot(lhs: *const Self, rhs: *const Self, res: *mut Self, len: usize) { super::vec_dot_f32(lhs, rhs, res, len) } #[inline(always)] unsafe fn vec_reduce_sum(xs: *const Self, res: *mut Self, len: usize) { super::vec_sum(xs, res, len) } } impl VecOps for half::f16 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } #[inline(always)] unsafe fn vec_dot(lhs: *const Self, rhs: *const Self, res: *mut Self, len: usize) { let mut res_f32 = 0f32; super::vec_dot_f16(lhs, rhs, &mut res_f32, len); *res = half::f16::from_f32(res_f32); } } impl VecOps for f64 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } } impl VecOps for half::bf16 { #[inline(always)] fn min(self, other: Self) -> Self { Self::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { Self::max(self, other) } } impl VecOps for u8 { #[inline(always)] fn min(self, other: Self) -> Self { <Self as Ord>::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { <Self as Ord>::max(self, other) } } impl VecOps for u32 { #[inline(always)] fn min(self, other: Self) -> Self { <Self as Ord>::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { <Self as Ord>::max(self, other) } } impl VecOps for i64 { #[inline(always)] fn min(self, other: Self) -> Self { <Self as Ord>::min(self, other) } #[inline(always)] fn max(self, other: Self) -> Self { <Self as Ord>::max(self, other) } } #[inline(always)] pub fn par_for_each(n_threads: usize, func: impl Fn(usize) + Send + Sync) { if n_threads == 1 { func(0) } else { rayon::scope(|s| { for thread_idx in 0..n_threads { let func = &func; s.spawn(move |_| func(thread_idx)); } }) } } #[inline(always)] pub fn par_range(lo: usize, up: usize, n_threads: usize, func: impl Fn(usize) + Send + Sync) { if n_threads == 1 { for i in lo..up { func(i) } } else { rayon::scope(|s| { for thread_idx in 0..n_threads { let func = &func; s.spawn(move |_| { for i in (thread_idx..up).step_by(n_threads) { func(i) } }); } }) } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/neon.rs
use super::k_quants::{ BlockQ2K, BlockQ3K, BlockQ4K, BlockQ4_0, BlockQ5K, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K, }; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; #[allow(unused_imports)] #[cfg(target_arch = "arm")] use core::arch::arm::*; #[allow(unused_imports)] #[cfg(target_arch = "aarch64")] use core::arch::aarch64::*; #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; let nb = n / qk; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut sumv0 = vdupq_n_f32(0.0f32); for i in 0..nb { let x0 = &xs[i]; let y0 = &ys[i]; let m4b = vdupq_n_u8(0x0F); let s8b = vdupq_n_s8(0x8); let v0_0 = vld1q_u8(x0.qs.as_ptr()); // 4-bit -> 8-bit let v0_0l = vreinterpretq_s8_u8(vandq_u8(v0_0, m4b)); let v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4)); // sub 8 let v0_0ls = vsubq_s8(v0_0l, s8b); let v0_0hs = vsubq_s8(v0_0h, s8b); // load y let v1_0l = vld1q_s8(y0.qs.as_ptr()); let v1_0h = vld1q_s8(y0.qs.as_ptr().add(16)); // TODO: Support dotprod when it's available outside of nightly. let pl0l = vmull_s8(vget_low_s8(v0_0ls), vget_low_s8(v1_0l)); let pl0h = vmull_s8(vget_high_s8(v0_0ls), vget_high_s8(v1_0l)); let ph0l = vmull_s8(vget_low_s8(v0_0hs), vget_low_s8(v1_0h)); let ph0h = vmull_s8(vget_high_s8(v0_0hs), vget_high_s8(v1_0h)); let pl0 = vaddq_s32(vpaddlq_s16(pl0l), vpaddlq_s16(pl0h)); let ph0 = vaddq_s32(vpaddlq_s16(ph0l), vpaddlq_s16(ph0h)); sumv0 = vmlaq_n_f32( sumv0, vcvtq_f32_s32(vaddq_s32(pl0, ph0)), x0.d.to_f32() * y0.d.to_f32(), ); } Ok(vaddvq_f32(sumv0)) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } let nb = n / QK8_0; unsafe { let mut sumv0 = vdupq_n_f32(0.0f32); for i in 0..nb { let x0 = &xs[i]; let y0 = &ys[i]; let x0_0 = vld1q_s8(x0.qs.as_ptr()); let x0_1 = vld1q_s8(x0.qs.as_ptr().add(16)); // load y let y0_0 = vld1q_s8(y0.qs.as_ptr()); let y0_1 = vld1q_s8(y0.qs.as_ptr().add(16)); // TODO dotprod once this is the intrinsics are. let p0_0 = vmull_s8(vget_low_s8(x0_0), vget_low_s8(y0_0)); let p0_1 = vmull_s8(vget_high_s8(x0_0), vget_high_s8(y0_0)); let p0_2 = vmull_s8(vget_low_s8(x0_1), vget_low_s8(y0_1)); let p0_3 = vmull_s8(vget_high_s8(x0_1), vget_high_s8(y0_1)); let p0 = vaddq_s32(vpaddlq_s16(p0_0), vpaddlq_s16(p0_1)); let p1 = vaddq_s32(vpaddlq_s16(p0_2), vpaddlq_s16(p0_3)); sumv0 = vmlaq_n_f32( sumv0, vcvtq_f32_s32(vaddq_s32(p0, p1)), x0.d.to_f32() * y0.d.to_f32(), ); } Ok(vaddvq_f32(sumv0)) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { unsafe { let mut sum_i = vdupq_n_s32(0); let scale = xs.d * ys.d; let xs = xs.qs.as_ptr(); let ys = ys.qs.as_ptr(); for i in (0..QK_K).step_by(16) { let xs = vld1q_s8(xs.add(i)); let ys = vld1q_s8(ys.add(i)); let xy_lo = vmull_s8(vget_low_s8(xs), vget_low_s8(ys)); let xy_up = vmull_s8(vget_high_s8(xs), vget_high_s8(ys)); let xy = vaddq_s32(vpaddlq_s16(xy_lo), vpaddlq_s16(xy_up)); sum_i = vaddq_s32(sum_i, xy) } sumf += vaddvq_s32(sum_i) as f32 * scale } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut sum = 0f32; unsafe { let m4b = vdupq_n_u8(0xF); let mone = vdupq_n_u8(3); for (x, y) in xs.iter().zip(ys.iter()) { let d_all = x.d.to_f32(); let mut q6 = x.ql.as_ptr(); let mut qh = x.qh.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut scale = x.scales.as_ptr(); let q8sums = vld1q_s16_x2(y.bsums.as_ptr()); let scales = vld1q_s8(scale); let q6scales = int16x8x2_t( vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales)), ); let prod = vaddq_s32( vaddq_s32( vmull_s16(vget_low_s16(q8sums.0), vget_low_s16(q6scales.0)), vmull_s16(vget_high_s16(q8sums.0), vget_high_s16(q6scales.0)), ), vaddq_s32( vmull_s16(vget_low_s16(q8sums.1), vget_low_s16(q6scales.1)), vmull_s16(vget_high_s16(q8sums.1), vget_high_s16(q6scales.1)), ), ); let isum_mins = vaddvq_s32(prod); let mut isum = 0i32; for _j in 0..QK_K / 128 { let qhbits = vld1q_u8_x2(qh); qh = qh.add(32); let q6bits = vld1q_u8_x4(q6); q6 = q6.add(64); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let q6h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4); let q6h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4); let shifted = vshrq_n_u8(qhbits.0, 2); let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 2); let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.0, m4b), q6h_0)); let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.1, m4b), q6h_1)); let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.2, m4b), q6h_2)); let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.3, m4b), q6h_3)); // TODO: dotprod let p0 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_0), vget_low_s8(q8bytes.0)), vmull_s8(vget_high_s8(q6bytes_0), vget_high_s8(q8bytes.0)), ); let p1 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_1), vget_low_s8(q8bytes.1)), vmull_s8(vget_high_s8(q6bytes_1), vget_high_s8(q8bytes.1)), ); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s16(p0) as i32 * scale0 + vaddvq_s16(p1) as i32 * scale1; scale = scale.add(2); let p2 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_2), vget_low_s8(q8bytes.2)), vmull_s8(vget_high_s8(q6bytes_2), vget_high_s8(q8bytes.2)), ); let p3 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_3), vget_low_s8(q8bytes.3)), vmull_s8(vget_high_s8(q6bytes_3), vget_high_s8(q8bytes.3)), ); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s16(p2) as i32 * scale0 + vaddvq_s16(p3) as i32 * scale1; scale = scale.add(2); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let shifted = vshrq_n_u8(qhbits.0, 4); let q6h_0 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 4); let q6h_1 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.0, 6); let q6h_2 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let shifted = vshrq_n_u8(qhbits.1, 6); let q6h_3 = vshlq_n_u8(vandq_u8(mone, shifted), 4); let q6bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.0, 4), q6h_0)); let q6bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.1, 4), q6h_1)); let q6bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.2, 4), q6h_2)); let q6bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.3, 4), q6h_3)); // TODO: dotprod case. let p0 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_0), vget_low_s8(q8bytes.0)), vmull_s8(vget_high_s8(q6bytes_0), vget_high_s8(q8bytes.0)), ); let p1 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_1), vget_low_s8(q8bytes.1)), vmull_s8(vget_high_s8(q6bytes_1), vget_high_s8(q8bytes.1)), ); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s16(p0) as i32 * scale0 + vaddvq_s16(p1) as i32 * scale1; scale = scale.add(2); let p2 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_2), vget_low_s8(q8bytes.2)), vmull_s8(vget_high_s8(q6bytes_2), vget_high_s8(q8bytes.2)), ); let p3 = vaddq_s16( vmull_s8(vget_low_s8(q6bytes_3), vget_low_s8(q8bytes.3)), vmull_s8(vget_high_s8(q6bytes_3), vget_high_s8(q8bytes.3)), ); let (scale0, scale1) = (*scale as i32, *scale.add(1) as i32); isum += vaddvq_s16(p2) as i32 * scale0 + vaddvq_s16(p3) as i32 * scale1; scale = scale.add(2); } sum += d_all * y.d * ((isum - 32 * isum_mins) as f32); } } Ok(sum) } #[inline(always)] pub(crate) fn vec_dot_q5k_q8k(n: usize, xs: &[BlockQ5K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4b = vdupq_n_u8(0xF); let mone = vdupq_n_u8(1); let mtwo = vdupq_n_u8(2); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let q8sums = vpaddq_s16( vld1q_s16(y.bsums.as_ptr()), vld1q_s16(y.bsums.as_ptr().add(8)), ); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; let mins8 = vld1_u8((utmp.as_ptr() as *const u8).add(8)); let mins = vreinterpretq_s16_u16(vmovl_u8(mins8)); let prod = vaddq_s32( vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)), ); let sumi_mins = vaddvq_s32(prod); let mut scales = utmp.as_ptr() as *const u8; let mut q5 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut qhbits = vld1q_u8_x2(x.qh.as_ptr()); let mut sumi = 0i32; for _j in 0..QK_K / 64 { let q5bits = vld1q_u8_x2(q5); q5 = q5.add(32); let q8bytes = vld1q_s8_x4(q8); q8 = q8.add(64); let q5h_0 = vshlq_n_u8(vandq_u8(mone, qhbits.0), 4); let q5h_1 = vshlq_n_u8(vandq_u8(mone, qhbits.1), 4); let q5h_2 = vshlq_n_u8(vandq_u8(mtwo, qhbits.0), 3); let q5h_3 = vshlq_n_u8(vandq_u8(mtwo, qhbits.1), 3); qhbits.0 = vshrq_n_u8(qhbits.0, 2); qhbits.1 = vshrq_n_u8(qhbits.1, 2); let q5bytes_0 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.0, m4b), q5h_0)); let q5bytes_1 = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.1, m4b), q5h_1)); let q5bytes_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.0, 4), q5h_2)); let q5bytes_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.1, 4), q5h_3)); // TODO: dotprod let p0 = vaddq_s16( vmull_s8(vget_low_s8(q5bytes_0), vget_low_s8(q8bytes.0)), vmull_s8(vget_high_s8(q5bytes_0), vget_high_s8(q8bytes.0)), ); let p1 = vaddq_s16( vmull_s8(vget_low_s8(q5bytes_1), vget_low_s8(q8bytes.1)), vmull_s8(vget_high_s8(q5bytes_1), vget_high_s8(q8bytes.1)), ); sumi += vaddvq_s16(vaddq_s16(p0, p1)) as i32 * *scales as i32; scales = scales.add(1); let p2 = vaddq_s16( vmull_s8(vget_low_s8(q5bytes_2), vget_low_s8(q8bytes.2)), vmull_s8(vget_high_s8(q5bytes_2), vget_high_s8(q8bytes.2)), ); let p3 = vaddq_s16( vmull_s8(vget_low_s8(q5bytes_3), vget_low_s8(q8bytes.3)), vmull_s8(vget_high_s8(q5bytes_3), vget_high_s8(q8bytes.3)), ); sumi += vaddvq_s16(vaddq_s16(p2, p3)) as i32 * *scales as i32; scales = scales.add(1); } sumf += d * sumi as f32 - dmin * sumi_mins as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; let mut scales = [0u8; 16]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4b = vdupq_n_u8(0xF); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let q8sums = vpaddq_s16( vld1q_s16(y.bsums.as_ptr()), vld1q_s16(y.bsums.as_ptr().add(8)), ); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); let mins8 = vld1_u32( [ utmp[1] & KMASK1, ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4), ] .as_ptr(), ); utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[0] &= KMASK1; let mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8))); let prod = vaddq_s32( vmull_s16(vget_low_s16(q8sums), vget_low_s16(mins)), vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)), ); sumf -= dmin * vaddvq_s32(prod) as f32; LittleEndian::write_u32_into(&utmp, &mut scales); let mut q4 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut sumi1 = 0i32; let mut sumi2 = 0i32; for j in 0..QK_K / 64 { let q4bits = vld1q_u8_x2(q4); q4 = q4.add(32); // TODO: dotprod let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let q4bytes = int8x16x2_t( vreinterpretq_s8_u8(vandq_u8(q4bits.0, m4b)), vreinterpretq_s8_u8(vandq_u8(q4bits.1, m4b)), ); let p0 = vaddq_s16( vmull_s8(vget_low_s8(q4bytes.0), vget_low_s8(q8bytes.0)), vmull_s8(vget_high_s8(q4bytes.0), vget_high_s8(q8bytes.0)), ); let p1 = vaddq_s16( vmull_s8(vget_low_s8(q4bytes.1), vget_low_s8(q8bytes.1)), vmull_s8(vget_high_s8(q4bytes.1), vget_high_s8(q8bytes.1)), ); sumi1 += vaddvq_s16(vaddq_s16(p0, p1)) as i32 * scales[2 * j] as i32; let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let q4bytes = int8x16x2_t( vreinterpretq_s8_u8(vshrq_n_u8(q4bits.0, 4)), vreinterpretq_s8_u8(vshrq_n_u8(q4bits.1, 4)), ); let p2 = vaddq_s16( vmull_s8(vget_low_s8(q4bytes.0), vget_low_s8(q8bytes.0)), vmull_s8(vget_high_s8(q4bytes.0), vget_high_s8(q8bytes.0)), ); let p3 = vaddq_s16( vmull_s8(vget_low_s8(q4bytes.1), vget_low_s8(q8bytes.1)), vmull_s8(vget_high_s8(q4bytes.1), vget_high_s8(q8bytes.1)), ); sumi2 += vaddvq_s16(vaddq_s16(p2, p3)) as i32 * scales[2 * j + 1] as i32; } sumf += d * (sumi1 + sumi2) as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q3k_q8k(n: usize, xs: &[BlockQ3K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut utmp = [0u32; 4]; let mut aux = [0u32; 3]; const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; unsafe { let m3b = vdupq_n_u8(0x3); let m0 = vdupq_n_u8(1); let m1 = vshlq_n_u8(m0, 1); let m2 = vshlq_n_u8(m0, 2); let m3 = vshlq_n_u8(m0, 3); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let mut q3 = x.qs.as_ptr(); let qh = x.hmask.as_ptr(); let mut q8 = y.qs.as_ptr(); let mut qhbits = vld1q_u8_x2(qh); let mut isum = 0i32; // Set up scales LittleEndian::read_u32_into(&x.scales, &mut aux); utmp[3] = ((aux[1] >> 4) & KMASK2) | (((aux[2] >> 6) & KMASK1) << 4); utmp[2] = ((aux[0] >> 4) & KMASK2) | (((aux[2] >> 4) & KMASK1) << 4); utmp[1] = (aux[1] & KMASK2) | (((aux[2] >> 2) & KMASK1) << 4); utmp[0] = (aux[0] & KMASK2) | ((aux[2] & KMASK1) << 4); let mut scale = utmp.as_mut_ptr() as *mut i8; for j in 0..16 { *scale.add(j) -= 32i8 } for j in 0..QK_K / 128 { let q3bits = vld1q_u8_x2(q3); q3 = q3.add(32); let q8bytes_1 = vld1q_s8_x4(q8); q8 = q8.add(64); let q8bytes_2 = vld1q_s8_x4(q8); q8 = q8.add(64); let q3h_0 = vshlq_n_u8(vbicq_u8(m0, qhbits.0), 2); let q3h_1 = vshlq_n_u8(vbicq_u8(m0, qhbits.1), 2); let q3h_2 = vshlq_n_u8(vbicq_u8(m1, qhbits.0), 1); let q3h_3 = vshlq_n_u8(vbicq_u8(m1, qhbits.1), 1); let q3bytes_0 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(q3bits.0, m3b)), vreinterpretq_s8_u8(q3h_0), ); let q3bytes_1 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(q3bits.1, m3b)), vreinterpretq_s8_u8(q3h_1), ); let q3bytes_2 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 2), m3b)), vreinterpretq_s8_u8(q3h_2), ); let q3bytes_3 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 2), m3b)), vreinterpretq_s8_u8(q3h_3), ); // TODO: dotprod let p0 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_0), vget_low_s8(q8bytes_1.0)), vmull_s8(vget_high_s8(q3bytes_0), vget_high_s8(q8bytes_1.0)), ); let p1 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_1), vget_low_s8(q8bytes_1.1)), vmull_s8(vget_high_s8(q3bytes_1), vget_high_s8(q8bytes_1.1)), ); let p2 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_2), vget_low_s8(q8bytes_1.2)), vmull_s8(vget_high_s8(q3bytes_2), vget_high_s8(q8bytes_1.2)), ); let p3 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_3), vget_low_s8(q8bytes_1.3)), vmull_s8(vget_high_s8(q3bytes_3), vget_high_s8(q8bytes_1.3)), ); isum += vaddvq_s16(p0) as i32 * *scale as i32 + vaddvq_s16(p1) as i32 * *scale.add(1) as i32 + vaddvq_s16(p2) as i32 * *scale.add(2) as i32 + vaddvq_s16(p3) as i32 * *scale.add(3) as i32; scale = scale.add(4); let q3h_0 = vbicq_u8(m2, qhbits.0); let q3h_1 = vbicq_u8(m2, qhbits.1); let q3h_2 = vshrq_n_u8(vbicq_u8(m3, qhbits.0), 1); let q3h_3 = vshrq_n_u8(vbicq_u8(m3, qhbits.1), 1); let q3bytes_0 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 4), m3b)), vreinterpretq_s8_u8(q3h_0), ); let q3bytes_1 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 4), m3b)), vreinterpretq_s8_u8(q3h_1), ); let q3bytes_2 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.0, 6), m3b)), vreinterpretq_s8_u8(q3h_2), ); let q3bytes_3 = vsubq_s8( vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.1, 6), m3b)), vreinterpretq_s8_u8(q3h_3), ); // TODO: dotprod let p0 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_0), vget_low_s8(q8bytes_2.0)), vmull_s8(vget_high_s8(q3bytes_0), vget_high_s8(q8bytes_2.0)), ); let p1 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_1), vget_low_s8(q8bytes_2.1)), vmull_s8(vget_high_s8(q3bytes_1), vget_high_s8(q8bytes_2.1)), ); let p2 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_2), vget_low_s8(q8bytes_2.2)), vmull_s8(vget_high_s8(q3bytes_2), vget_high_s8(q8bytes_2.2)), ); let p3 = vaddq_s16( vmull_s8(vget_low_s8(q3bytes_3), vget_low_s8(q8bytes_2.3)), vmull_s8(vget_high_s8(q3bytes_3), vget_high_s8(q8bytes_2.3)), ); isum += vaddvq_s16(p0) as i32 * *scale as i32 + vaddvq_s16(p1) as i32 * *scale.add(1) as i32 + vaddvq_s16(p2) as i32 * *scale.add(2) as i32 + vaddvq_s16(p3) as i32 * *scale.add(3) as i32; scale = scale.add(4); if j == 0 { qhbits.0 = vshrq_n_u8(qhbits.0, 4); qhbits.1 = vshrq_n_u8(qhbits.1, 4); } } sumf += d * isum as f32; } } Ok(sumf) } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0f32; let mut aux = [0u8; 16]; unsafe { let m3 = vdupq_n_u8(0x3); let m4 = vdupq_n_u8(0xF); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); let mut q2 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let sc = x.scales.as_ptr(); let mins_and_scales = vld1q_u8(sc); let scales = vandq_u8(mins_and_scales, m4); vst1q_u8(aux.as_mut_ptr(), scales); let mins = vshrq_n_u8(mins_and_scales, 4); let q8sums = vld1q_s16_x2(y.bsums.as_ptr()); let mins16 = int16x8x2_t( vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins))), ); let s0 = vaddq_s32( vmull_s16(vget_low_s16(mins16.0), vget_low_s16(q8sums.0)), vmull_s16(vget_high_s16(mins16.0), vget_high_s16(q8sums.0)), ); let s1 = vaddq_s32( vmull_s16(vget_low_s16(mins16.1), vget_low_s16(q8sums.1)), vmull_s16(vget_high_s16(mins16.1), vget_high_s16(q8sums.1)), ); sumf += dmin * vaddvq_s32(vaddq_s32(s0, s1)) as f32; let mut isum = 0i32; let mut is = 0usize; // TODO: dotprod for _j in 0..QK_K / 128 { let q2bits = vld1q_u8_x2(q2); q2 = q2.add(32); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); let mut q2bytes = int8x16x2_t( vreinterpretq_s8_u8(vandq_u8(q2bits.0, m3)), vreinterpretq_s8_u8(vandq_u8(q2bits.1, m3)), ); isum += multiply_accum_with_scale(&aux, is, 0, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 2), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 2), m3)); isum += multiply_accum_with_scale(&aux, is, 2, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 4), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 4), m3)); isum += multiply_accum_with_scale(&aux, is, 4, q2bytes, q8bytes); let q8bytes = vld1q_s8_x2(q8); q8 = q8.add(32); q2bytes.0 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.0, 6), m3)); q2bytes.1 = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.1, 6), m3)); isum += multiply_accum_with_scale(&aux, is, 6, q2bytes, q8bytes); is += 8; } sumf += d * isum as f32; } } Ok(sumf) } #[inline(always)] unsafe fn multiply_accum_with_scale( aux: &[u8; 16], is: usize, index: usize, q2bytes: int8x16x2_t, q8bytes: int8x16x2_t, ) -> i32 { let p1 = vaddq_s16( vmull_s8(vget_low_s8(q2bytes.0), vget_low_s8(q8bytes.0)), vmull_s8(vget_high_s8(q2bytes.0), vget_high_s8(q8bytes.0)), ); let p2 = vaddq_s16( vmull_s8(vget_low_s8(q2bytes.1), vget_low_s8(q8bytes.1)), vmull_s8(vget_high_s8(q2bytes.1), vget_high_s8(q8bytes.1)), ); vaddvq_s16(p1) as i32 * aux[is + index] as i32 + vaddvq_s16(p2) as i32 * aux[is + 1 + index] as i32 }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/simd128.rs
use super::k_quants::{BlockQ2K, BlockQ4K, BlockQ4_0, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K}; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; use core::arch::wasm32::*; #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1234 = v128_load(x.qs.as_ptr() as *const v128); let x12 = v128_and(x1234, u8x16_splat(0x0F)); let x12 = i8x16_sub(x12, i8x16_splat(8)); let x34 = u8x16_shr(x1234, 4); let x34 = i8x16_sub(x34, i8x16_splat(8)); let x1 = i16x8_extend_low_i8x16(x12); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_extend_high_i8x16(x12); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_extend_low_i8x16(x34); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_extend_high_i8x16(x34); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (x, y) in xs.iter().zip(ys.iter()) { let x1 = i16x8_load_extend_i8x8(x.qs.as_ptr()); let y1 = i16x8_load_extend_i8x8(y.qs.as_ptr()); let sum_xy = i32x4_dot_i16x8(x1, y1); let x2 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(8)); let y2 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(8)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x2, y2)); let x3 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(16)); let y3 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(16)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x3, y3)); let x4 = i16x8_load_extend_i8x8(x.qs.as_ptr().add(24)); let y4 = i16x8_load_extend_i8x8(y.qs.as_ptr().add(24)); let sum_xy = i32x4_add(sum_xy, i32x4_dot_i16x8(x4, y4)); let sum_xy = f32x4_convert_i32x4(sum_xy); // f32x4_relaxed_madd is nightly only. let d = f32x4_splat(f16::to_f32(x.d) * f16::to_f32(y.d)); let scaled = f32x4_mul(sum_xy, d); acc = f32x4_add(acc, scaled) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } unsafe { let mut sumf = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let mut q2: &[_] = &x.qs; let mut q8: &[_] = &y.qs; let sc = &x.scales; let mut summs = i32x4_splat(0); for i in (0..(QK_K / 16)).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(i)); let scales = i32x4_shr( i32x4( sc[i] as i32, sc[i + 1] as i32, sc[i + 2] as i32, sc[i + 3] as i32, ), 4, ); summs = i32x4_add(summs, i32x4_mul(bsums, scales)) } let summs = f32x4_convert_i32x4(summs); let dall = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let mut isum = i32x4_splat(0); let mut is = 0; for _ in 0..(QK_K / 128) { let mut shift = 0; for _ in 0..4 { let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (0..16).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = i16x8_splat(0); for l in (16..32).step_by(8) { let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(l)); let q2 = i16x8_load_extend_u8x8(q2.as_ptr().add(l)); let q2 = v128_and(i16x8_shr(q2, shift), i16x8_splat(3)); isuml = i16x8_add(isuml, i16x8_mul(q2, q8)) } let dd = i32x4_splat(d); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_low_i16x8(isuml), dd)); isum = i32x4_add(isum, i32x4_mul(i32x4_extend_high_i16x8(isuml), dd)); shift += 2; // adjust the indexing q8 = &q8[32..]; } // adjust the indexing q2 = &q2[32..]; } let isum = f32x4_convert_i32x4(isum); sumf = f32x4_add( sumf, f32x4_sub( f32x4_mul(isum, f32x4_splat(dall)), f32x4_mul(summs, f32x4_splat(dmin)), ), ); } let sumf = f32x4_extract_lane::<0>(sumf) + f32x4_extract_lane::<1>(sumf) + f32x4_extract_lane::<2>(sumf) + f32x4_extract_lane::<3>(sumf); Ok(sumf) } } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [u8; QK_K] = [0; QK_K]; let mut sums = f32x4_splat(0f32); unsafe { for (y, x) in ys.iter().zip(xs.iter()) { let q4 = &x.qs; let q8 = &y.qs; for j in 0..QK_K / 64 { let q4_1 = v128_load(q4.as_ptr().add(32 * j) as *const v128); let q4_2 = v128_load(q4.as_ptr().add(32 * j + 16) as *const v128); v128_store( aux8.as_mut_ptr().add(64 * j) as *mut v128, v128_and(q4_1, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 16) as *mut v128, v128_and(q4_2, u8x16_splat(0x0F)), ); v128_store( aux8.as_mut_ptr().add(64 * j + 32) as *mut v128, u8x16_shr(q4_1, 4), ); v128_store( aux8.as_mut_ptr().add(64 * j + 48) as *mut v128, u8x16_shr(q4_2, 4), ); } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = i32x4_splat(0); for j in (0..QK_K / 16).step_by(4) { let bsums = i32x4_load_extend_i16x4(y.bsums.as_ptr().add(j)); let (m1, m2) = (mins[j / 2] as i32, mins[j / 2 + 1] as i32); let mins = i32x4(m1, m1, m2, m2); sumi = i32x4_add(sumi, i32x4_mul(bsums, mins)); } let mut aux32 = i32x4_splat(0i32); for (scale_i, scale) in scales.iter().enumerate() { let scale = i32x4_splat(*scale as i32); for j in 0..4 { let i = 32 * scale_i + 8 * j; let q8 = i16x8_load_extend_i8x8(q8.as_ptr().add(i)); let aux8 = i16x8_load_extend_u8x8(aux8.as_ptr().add(i)); let aux16 = i16x8_mul(q8, aux8); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_low_i16x8(aux16))); aux32 = i32x4_add(aux32, i32x4_mul(scale, i32x4_extend_high_i16x8(aux16))); } } let aux32 = f32x4_convert_i32x4(aux32); let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); let dmin = x.dmin.to_f32() * y.d; let dmin = f32x4_splat(dmin); let sumi = f32x4_convert_i32x4(sumi); sums = f32x4_sub(sums, f32x4_mul(sumi, dmin)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut aux8 = [0i8; QK_K]; unsafe { let mut sums = f32x4_splat(0f32); for (x, y) in xs.iter().zip(ys.iter()) { let q4 = &x.ql; let qh = &x.qh; let q8 = &y.qs; let mut aux32 = f32x4_splat(0f32); for j in (0..QK_K).step_by(128) { let aux8 = aux8.as_mut_ptr().add(j); let q4 = &q4.as_ptr().add(j / 2); let qh = &qh.as_ptr().add(j / 4); for l in (0..32).step_by(16) { // aux8[l] = (((q4[l] & 0xF) | ((qh[l] & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and(v128_load(qh.add(l) as *const v128), u8x16_splat(3)), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 32] = // (((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( v128_and(v128_load(q4.add(l + 32) as *const v128), u8x16_splat(0xF)), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 2), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 32) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 64] = (((q4[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 4), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 64) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); // aux8[l + 96] = // (((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i32 - 32) as i8; let a8 = v128_or( u8x16_shr(v128_load(q4.add(l + 32) as *const v128), 4), u8x16_shl( v128_and( u8x16_shr(v128_load(qh.add(l) as *const v128), 6), u8x16_splat(3), ), 4, ), ); let a8_low = i16x8_sub(i16x8_extend_low_u8x16(a8), i16x8_splat(32)); let a8_high = i16x8_sub(i16x8_extend_high_u8x16(a8), i16x8_splat(32)); v128_store( aux8.add(l + 96) as *mut v128, i8x16_narrow_i16x8(a8_low, a8_high), ); } } for (j, &scale) in x.scales.iter().enumerate() { let scale = f32x4_splat(scale as f32); for offset in [0, 8] { let aux16 = i16x8_mul( i16x8_load_extend_i8x8(q8.as_ptr().add(16 * j + offset)), i16x8_load_extend_i8x8(aux8.as_ptr().add(16 * j + offset)), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_low_i16x8(aux16)), scale), ); aux32 = f32x4_add( aux32, f32x4_mul(f32x4_convert_i32x4(i32x4_extend_high_i16x8(aux16)), scale), ); } } let d = f32x4_splat(x.d.to_f32() * y.d); sums = f32x4_add(sums, f32x4_mul(aux32, d)); } let sums = f32x4_extract_lane::<0>(sums) + f32x4_extract_lane::<1>(sums) + f32x4_extract_lane::<2>(sums) + f32x4_extract_lane::<3>(sums); Ok(sums) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } unsafe { let mut acc = f32x4_splat(0.0f32); for (xs, ys) in xs.iter().zip(ys.iter()) { let x_qs = xs.qs.as_ptr(); let y_qs = ys.qs.as_ptr(); let mut sumi = i32x4_splat(0); for j in (0..QK_K).step_by(8) { let xs = i16x8_load_extend_i8x8(x_qs.add(j)); let ys = i16x8_load_extend_i8x8(y_qs.add(j)); let sum_xy = i32x4_dot_i16x8(xs, ys); sumi = i32x4_add(sumi, sum_xy) } let d = f32x4_splat(xs.d * ys.d); acc = f32x4_add(acc, f32x4_mul(f32x4_convert_i32x4(sumi), d)) } let res = f32x4_extract_lane::<0>(acc) + f32x4_extract_lane::<1>(acc) + f32x4_extract_lane::<2>(acc) + f32x4_extract_lane::<3>(acc); Ok(res) } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/avx.rs
use super::k_quants::{ BlockQ2K, BlockQ3K, BlockQ4K, BlockQ4_0, BlockQ5K, BlockQ6K, BlockQ8K, BlockQ8_0, QK8_0, QK_K, }; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; #[cfg(target_arch = "x86")] use core::arch::x86::*; #[cfg(target_arch = "x86_64")] use core::arch::x86_64::*; #[inline(always)] pub(crate) unsafe fn sum_i16_pairs_float(x: __m256i) -> __m256 { let ones = _mm256_set1_epi16(1); let summed_pairs = _mm256_madd_epi16(ones, x); _mm256_cvtepi32_ps(summed_pairs) } #[inline(always)] pub(crate) unsafe fn mul_sum_us8_pairs_float(ax: __m256i, sy: __m256i) -> __m256 { let dot = _mm256_maddubs_epi16(ax, sy); sum_i16_pairs_float(dot) } #[inline(always)] pub(crate) unsafe fn hsum_float_8(x: __m256) -> f32 { let res = _mm256_extractf128_ps(x, 1); let res = _mm_add_ps(res, _mm256_castps256_ps128(x)); let res = _mm_add_ps(res, _mm_movehl_ps(res, res)); let res = _mm_add_ss(res, _mm_movehdup_ps(res)); _mm_cvtss_f32(res) } #[inline(always)] pub(crate) unsafe fn bytes_from_nibbles_32(rsi: *const u8) -> __m256i { let tmp = _mm_loadu_si128(rsi as *const __m128i); let bytes = _mm256_insertf128_si256::<1>(_mm256_castsi128_si256(tmp), _mm_srli_epi16(tmp, 4)); let low_mask = _mm256_set1_epi8(0xF); _mm256_and_si256(low_mask, bytes) } #[inline(always)] pub(crate) unsafe fn mul_sum_i8_pairs_float(x: __m256i, y: __m256i) -> __m256 { let ax = _mm256_sign_epi8(x, x); let sy = _mm256_sign_epi8(y, x); mul_sum_us8_pairs_float(ax, sy) } #[inline(always)] pub(crate) fn vec_dot_q4_0_q8_0(n: usize, xs: &[BlockQ4_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = _mm256_set1_ps(f16::to_f32(x.d) * f16::to_f32(y.d)); let bx = bytes_from_nibbles_32(x.qs.as_ptr()); let off = _mm256_set1_epi8(8); let bx = _mm256_sub_epi8(bx, off); let by = _mm256_loadu_si256(y.qs.as_ptr() as *const __m256i); let q = mul_sum_i8_pairs_float(bx, by); acc = _mm256_fmadd_ps(d, q, acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] pub(crate) fn vec_dot_q8_0_q8_0(n: usize, xs: &[BlockQ8_0], ys: &[BlockQ8_0]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } unsafe { let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = _mm256_set1_ps(f16::to_f32(x.d) * f16::to_f32(y.d)); let bx = _mm256_loadu_si256(x.qs.as_ptr() as *const __m256i); let by = _mm256_loadu_si256(y.qs.as_ptr() as *const __m256i); let q = mul_sum_i8_pairs_float(bx, by); acc = _mm256_fmadd_ps(d, q, acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] unsafe fn get_scale_shuffle(i: usize) -> __m128i { const K_SHUFFLE: [u8; 128] = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 11, 11, 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 15, 15, 15, ]; _mm_loadu_si128((K_SHUFFLE.as_ptr() as *const __m128i).add(i)) } #[inline(always)] unsafe fn get_scale_shuffle_k4(i: usize) -> __m256i { const K_SHUFFLE: [u8; 256] = [ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, ]; _mm256_loadu_si256((K_SHUFFLE.as_ptr() as *const __m256i).add(i)) } #[inline(always)] unsafe fn get_scale_shuffle_q3k(i: usize) -> __m256i { const K_SHUFFLE: [u8; 128] = [ 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 2, 3, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 4, 5, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 6, 7, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 8, 9, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 10, 11, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 12, 13, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, 14, 15, ]; _mm256_loadu_si256((K_SHUFFLE.as_ptr() as *const __m256i).add(i)) } #[inline(always)] pub(crate) fn vec_dot_q6k_q8k(n: usize, xs: &[BlockQ6K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % qk != 0 { crate::bail!("vec_dot_q6k_8k: {n} is not divisible by {qk}") } unsafe { let m4 = _mm256_set1_epi8(0xF); let m2 = _mm256_set1_epi8(3); let m32s = _mm256_set1_epi8(32); let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let mut q4 = x.ql.as_ptr(); let mut qh = x.qh.as_ptr(); let mut q8 = y.qs.as_ptr(); let scales = _mm_loadu_si128(x.scales.as_ptr() as *const __m128i); let mut sumi = _mm256_setzero_si256(); for j in 0..QK_K / 128 { let is = j * 4; let scale_0 = _mm_shuffle_epi8(scales, get_scale_shuffle(is)); let scale_1 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 1)); let scale_2 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 2)); let scale_3 = _mm_shuffle_epi8(scales, get_scale_shuffle(is + 3)); let q4bits1 = _mm256_loadu_si256(q4 as *const __m256i); q4 = q4.add(32); let q4bits2 = _mm256_loadu_si256(q4 as *const __m256i); q4 = q4.add(32); let q4bits_h = _mm256_loadu_si256(qh as *const __m256i); qh = qh.add(32); let q4h_0 = _mm256_slli_epi16(_mm256_and_si256(q4bits_h, m2), 4); let q4h_1 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bits_h, 2), m2), 4); let q4h_2 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bits_h, 4), m2), 4); let q4h_3 = _mm256_slli_epi16(_mm256_and_si256(_mm256_srli_epi16(q4bits_h, 6), m2), 4); let q4_0 = _mm256_or_si256(_mm256_and_si256(q4bits1, m4), q4h_0); let q4_1 = _mm256_or_si256(_mm256_and_si256(q4bits2, m4), q4h_1); let q4_2 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits1, 4), m4), q4h_2); let q4_3 = _mm256_or_si256(_mm256_and_si256(_mm256_srli_epi16(q4bits2, 4), m4), q4h_3); let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_2 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_3 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8s_0 = _mm256_maddubs_epi16(m32s, q8_0); let q8s_1 = _mm256_maddubs_epi16(m32s, q8_1); let q8s_2 = _mm256_maddubs_epi16(m32s, q8_2); let q8s_3 = _mm256_maddubs_epi16(m32s, q8_3); let p16_0 = _mm256_maddubs_epi16(q4_0, q8_0); let p16_1 = _mm256_maddubs_epi16(q4_1, q8_1); let p16_2 = _mm256_maddubs_epi16(q4_2, q8_2); let p16_3 = _mm256_maddubs_epi16(q4_3, q8_3); let p16_0 = _mm256_sub_epi16(p16_0, q8s_0); let p16_1 = _mm256_sub_epi16(p16_1, q8s_1); let p16_2 = _mm256_sub_epi16(p16_2, q8s_2); let p16_3 = _mm256_sub_epi16(p16_3, q8s_3); let p16_0 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_0), p16_0); let p16_1 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_1), p16_1); let p16_2 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_2), p16_2); let p16_3 = _mm256_madd_epi16(_mm256_cvtepi8_epi16(scale_3), p16_3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_2, p16_3)); } acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] unsafe fn mm256_set_m128i(a: __m128i, b: __m128i) -> __m256i { _mm256_insertf128_si256(_mm256_castsi128_si256(b), a, 1) } #[inline(always)] pub(crate) fn vec_dot_q2k_q8k(n: usize, xs: &[BlockQ2K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } unsafe { let m3 = _mm256_set1_epi8(3); let m4 = _mm_set1_epi8(0xF); let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); let mut q2 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mins_and_scales = _mm_loadu_si128(x.scales.as_ptr() as *const __m128i); let scales8 = _mm_and_si128(mins_and_scales, m4); let mins8 = _mm_and_si128(_mm_srli_epi16(mins_and_scales, 4), m4); let mins = _mm256_cvtepi8_epi16(mins8); let prod = _mm256_madd_epi16(mins, _mm256_loadu_si256(y.bsums.as_ptr() as *const __m256i)); acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&dmin), _mm256_cvtepi32_ps(prod), acc); let all_scales = _mm256_cvtepi8_epi16(scales8); let l_scales = _mm256_extracti128_si256(all_scales, 0); let h_scales = _mm256_extracti128_si256(all_scales, 1); let scales = [ mm256_set_m128i(l_scales, l_scales), mm256_set_m128i(h_scales, h_scales), ]; let mut sumi = _mm256_setzero_si256(); for scale in scales { let q2bits = _mm256_loadu_si256(q2 as *const __m256i); q2 = q2.add(32); let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_2 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_3 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q2_0 = _mm256_and_si256(q2bits, m3); let q2_1 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 2), m3); let q2_2 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 4), m3); let q2_3 = _mm256_and_si256(_mm256_srli_epi16(q2bits, 6), m3); let p0 = _mm256_maddubs_epi16(q2_0, q8_0); let p1 = _mm256_maddubs_epi16(q2_1, q8_1); let p2 = _mm256_maddubs_epi16(q2_2, q8_2); let p3 = _mm256_maddubs_epi16(q2_3, q8_3); let p0 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(0)), p0); let p1 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(1)), p1); let p2 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(2)), p2); let p3 = _mm256_madd_epi16(_mm256_shuffle_epi8(scale, get_scale_shuffle_q3k(3)), p3); let p0 = _mm256_add_epi32(p0, p1); let p2 = _mm256_add_epi32(p2, p3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p0, p2)); } acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] pub(crate) fn vec_dot_q3k_q8k(n: usize, xs: &[BlockQ3K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; let mut aux = [0u32; 3]; unsafe { let m3 = _mm256_set1_epi8(3); let mone = _mm256_set1_epi8(1); let m32 = _mm_set1_epi8(32); let mut acc = _mm256_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let mut q3 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); LittleEndian::read_u32_into(&x.scales, &mut aux); let scales128 = _mm_set_epi32( (((aux[1] >> 4) & KMASK2) | (((aux[2] >> 6) & KMASK1) << 4)) as i32, (((aux[0] >> 4) & KMASK2) | (((aux[2] >> 4) & KMASK1) << 4)) as i32, ((aux[1] & KMASK2) | (((aux[2] >> 2) & KMASK1) << 4)) as i32, ((aux[0] & KMASK2) | (((aux[2]) & KMASK1) << 4)) as i32, ); let scales128 = _mm_sub_epi8(scales128, m32); let all_scales = _mm256_cvtepi8_epi16(scales128); let l_scales = _mm256_extracti128_si256(all_scales, 0); let h_scales = _mm256_extracti128_si256(all_scales, 1); let scales = [ mm256_set_m128i(l_scales, l_scales), mm256_set_m128i(h_scales, h_scales), ]; // high bit let hbits = _mm256_loadu_si256(x.hmask.as_ptr() as *const __m256i); let mut sumi = _mm256_setzero_si256(); for (j, scale) in scales.iter().enumerate() { // load low 2 bits let q3bits = _mm256_loadu_si256(q3 as *const __m256i); q3 = q3.add(32); // Prepare low and high bits // We hardcode the shifts here to avoid loading them into a seperate register let q3l_0 = _mm256_and_si256(q3bits, m3); let q3h_0 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 0)), 0) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 4)), 4) }; let q3h_0 = _mm256_slli_epi16(q3h_0, 2); let q3l_1 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 2), m3); let q3h_1 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 1)), 1) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 5)), 5) }; let q3h_1 = _mm256_slli_epi16(q3h_1, 2); let q3l_2 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 4), m3); let q3h_2 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 2)), 2) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 6)), 6) }; let q3h_2 = _mm256_slli_epi16(q3h_2, 2); let q3l_3 = _mm256_and_si256(_mm256_srli_epi16(q3bits, 6), m3); let q3h_3 = if j == 0 { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 3)), 3) } else { _mm256_srli_epi16(_mm256_andnot_si256(hbits, _mm256_slli_epi16(mone, 7)), 7) }; let q3h_3 = _mm256_slli_epi16(q3h_3, 2); // load Q8 quants let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_2 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_3 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); // Dot product: we multiply the 2 low bits and 1 high bit part separately, so we // can use _mm256_maddubs_epi16, and then subtract. The high bit part has the 2 // already subtracted (and so, it is zero if the high bit was not set, and 2 if the // high bit was set) let q8s_0 = _mm256_maddubs_epi16(q3h_0, q8_0); let q8s_1 = _mm256_maddubs_epi16(q3h_1, q8_1); let q8s_2 = _mm256_maddubs_epi16(q3h_2, q8_2); let q8s_3 = _mm256_maddubs_epi16(q3h_3, q8_3); let p16_0 = _mm256_maddubs_epi16(q3l_0, q8_0); let p16_1 = _mm256_maddubs_epi16(q3l_1, q8_1); let p16_2 = _mm256_maddubs_epi16(q3l_2, q8_2); let p16_3 = _mm256_maddubs_epi16(q3l_3, q8_3); let p16_0 = _mm256_sub_epi16(p16_0, q8s_0); let p16_1 = _mm256_sub_epi16(p16_1, q8s_1); let p16_2 = _mm256_sub_epi16(p16_2, q8s_2); let p16_3 = _mm256_sub_epi16(p16_3, q8s_3); // multiply with scales let p16_0 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(0)), p16_0); let p16_1 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(1)), p16_1); let p16_2 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(2)), p16_2); let p16_3 = _mm256_madd_epi16(_mm256_shuffle_epi8(*scale, get_scale_shuffle_q3k(3)), p16_3); // accumulate let p16_0 = _mm256_add_epi32(p16_0, p16_1); let p16_2 = _mm256_add_epi32(p16_2, p16_3); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_2)); } // multiply with block scale and accumulate acc = _mm256_fmadd_ps(_mm256_broadcast_ss(&d), _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } } #[inline(always)] pub(crate) fn vec_dot_q4k_q8k(n: usize, xs: &[BlockQ4K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } let mut utmp = [0u32; 4]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4 = _mm256_set1_epi8(0xF); let mut acc = _mm256_setzero_ps(); let mut acc_m = _mm_setzero_ps(); for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; let mut q4 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32( utmp[3] as i32, utmp[2] as i32, utmp[1] as i32, utmp[0] as i32, )); let q8sums = _mm256_loadu_si256(y.bsums.as_ptr() as *const __m256i); let q8s = _mm_hadd_epi16( _mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1), ); let prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); acc_m = _mm_fmadd_ps(_mm_set1_ps(dmin), _mm_cvtepi32_ps(prod), acc_m); let sc128 = _mm256_extracti128_si256(mins_and_scales, 0); let scales = mm256_set_m128i(sc128, sc128); let mut sumi = _mm256_setzero_si256(); for j in 0..QK_K / 64 { let scale_l = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j)); let scale_h = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 1)); let q4bits = _mm256_loadu_si256(q4 as *const __m256i); q4 = q4.add(32); let q4l = _mm256_and_si256(q4bits, m4); let q4h = _mm256_and_si256(_mm256_srli_epi16(q4bits, 4), m4); let q8l = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let p16l = _mm256_maddubs_epi16(q4l, q8l); let p16l = _mm256_madd_epi16(scale_l, p16l); sumi = _mm256_add_epi32(sumi, p16l); let q8h = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let p16h = _mm256_maddubs_epi16(q4h, q8h); let p16h = _mm256_madd_epi16(scale_h, p16h); sumi = _mm256_add_epi32(sumi, p16h); } let vd = _mm256_set1_ps(d); acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); } let acc_m = _mm_add_ps(acc_m, _mm_movehl_ps(acc_m, acc_m)); let acc_m = _mm_add_ss(acc_m, _mm_movehdup_ps(acc_m)); Ok(hsum_float_8(acc) + _mm_cvtss_f32(acc_m)) } } #[inline(always)] pub(crate) fn vec_dot_q5k_q8k(n: usize, xs: &[BlockQ5K], ys: &[BlockQ8K]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}") } let mut utmp = [0u32; 4]; const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; unsafe { let m4 = _mm256_set1_epi8(0xF); let mzero = _mm_setzero_si128(); let mone = _mm256_set1_epi8(1); let mut acc = _mm256_setzero_ps(); let mut summs = 0.0; for (x, y) in xs.iter().zip(ys.iter()) { let d = y.d * x.d.to_f32(); let dmin = -y.d * x.dmin.to_f32(); LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; let mut q5 = x.qs.as_ptr(); let mut q8 = y.qs.as_ptr(); let mins_and_scales = _mm256_cvtepu8_epi16(_mm_set_epi32( utmp[3] as i32, utmp[2] as i32, utmp[1] as i32, utmp[0] as i32, )); let q8sums = _mm256_loadu_si256(y.bsums.as_ptr() as *const __m256i); let q8s = _mm_hadd_epi16( _mm256_extracti128_si256(q8sums, 0), _mm256_extracti128_si256(q8sums, 1), ); let prod = _mm_madd_epi16(_mm256_extracti128_si256(mins_and_scales, 1), q8s); let hsum = _mm_hadd_epi32(_mm_hadd_epi32(prod, mzero), mzero); summs += dmin * _mm_extract_epi32(hsum, 0) as f32; let sc128 = _mm256_extracti128_si256(mins_and_scales, 0); let scales = mm256_set_m128i(sc128, sc128); let hbits = _mm256_loadu_si256(x.qh.as_ptr() as *const __m256i); let mut hmask = mone; let mut sumi = _mm256_setzero_si256(); for j in 0..QK_K / 64 { let scale_0 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j)); let scale_1 = _mm256_shuffle_epi8(scales, get_scale_shuffle_k4(2 * j + 1)); let q5bits = _mm256_loadu_si256(q5 as *const __m256i); q5 = q5.add(32); //Similar to q3k we hardcode the shifts here to avoid loading them into a seperate register let q5l_0 = _mm256_and_si256(q5bits, m4); let q5l_0_shift_input = _mm256_and_si256(hbits, hmask); let q5l_0_right_shift = match j { 0 => _mm256_srli_epi16(q5l_0_shift_input, 0), 1 => _mm256_srli_epi16(q5l_0_shift_input, 2), 2 => _mm256_srli_epi16(q5l_0_shift_input, 4), 3 => _mm256_srli_epi16(q5l_0_shift_input, 6), _ => unreachable!(), }; let q5h_0 = _mm256_slli_epi16(q5l_0_right_shift, 4); let q5_0 = _mm256_add_epi8(q5l_0, q5h_0); hmask = _mm256_slli_epi16(hmask, 1); let q5l_1 = _mm256_and_si256(_mm256_srli_epi16(q5bits, 4), m4); let q5l_1_shift_input = _mm256_and_si256(hbits, hmask); let q5l_1_right_shift = match j { 0 => _mm256_srli_epi16(q5l_1_shift_input, 1), 1 => _mm256_srli_epi16(q5l_1_shift_input, 3), 2 => _mm256_srli_epi16(q5l_1_shift_input, 5), 3 => _mm256_srli_epi16(q5l_1_shift_input, 7), _ => unreachable!(), }; let q5h_1 = _mm256_slli_epi16(q5l_1_right_shift, 4); let q5_1 = _mm256_add_epi8(q5l_1, q5h_1); hmask = _mm256_slli_epi16(hmask, 1); let q8_0 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let q8_1 = _mm256_loadu_si256(q8 as *const __m256i); q8 = q8.add(32); let p16_0 = _mm256_maddubs_epi16(q5_0, q8_0); let p16_1 = _mm256_maddubs_epi16(q5_1, q8_1); let p16_0 = _mm256_madd_epi16(scale_0, p16_0); let p16_1 = _mm256_madd_epi16(scale_1, p16_1); sumi = _mm256_add_epi32(sumi, _mm256_add_epi32(p16_0, p16_1)); } let vd = _mm256_set1_ps(d); acc = _mm256_fmadd_ps(vd, _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc) + summs) } } #[inline(always)] pub(crate) fn vec_dot_q8k_q8k(n: usize, xs: &[BlockQ8K], ys: &[BlockQ8K]) -> Result<f32> { let qk = QK_K; if n % qk != 0 { crate::bail!("vec_dot_q8k_8k: {n} is not divisible by {qk}") } unsafe { let mut acc = _mm256_setzero_ps(); for (xs, ys) in xs.iter().zip(ys.iter()) { let mut sumi = _mm256_setzero_si256(); let x_qs = xs.qs.as_ptr(); let y_qs = ys.qs.as_ptr(); for j in (0..QK_K).step_by(32) { let xs = _mm256_loadu_si256(x_qs.add(j) as *const __m256i); let ys = _mm256_loadu_si256(y_qs.add(j) as *const __m256i); let xs0 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(xs, 0)); let ys0 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(ys, 0)); sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(xs0, ys0)); let xs1 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(xs, 1)); let ys1 = _mm256_cvtepi8_epi16(_mm256_extracti128_si256(ys, 1)); sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(xs1, ys1)); } let d = _mm256_set1_ps(xs.d * ys.d); acc = _mm256_fmadd_ps(d, _mm256_cvtepi32_ps(sumi), acc); } Ok(hsum_float_8(acc)) } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/mod.rs
use crate::{Device, Result, Shape, Tensor}; #[cfg(target_feature = "avx")] pub mod avx; pub mod ggml_file; pub mod gguf_file; pub mod k_quants; #[cfg(target_feature = "neon")] pub mod neon; #[cfg(target_feature = "simd128")] pub mod simd128; pub mod utils; pub use k_quants::GgmlType; pub struct QTensor { data: Box<dyn QuantizedType>, shape: Shape, } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum GgmlDType { F32, F16, Q4_0, Q4_1, Q5_0, Q5_1, Q8_0, Q8_1, Q2K, Q3K, Q4K, Q5K, Q6K, Q8K, } impl GgmlDType { pub(crate) fn from_u32(u: u32) -> Result<Self> { let dtype = match u { 0 => Self::F32, 1 => Self::F16, 2 => Self::Q4_0, 3 => Self::Q4_1, 6 => Self::Q5_0, 7 => Self::Q5_1, 8 => Self::Q8_0, 9 => Self::Q8_1, 10 => Self::Q2K, 11 => Self::Q3K, 12 => Self::Q4K, 13 => Self::Q5K, 14 => Self::Q6K, 15 => Self::Q8K, _ => crate::bail!("unknown dtype for tensor {u}"), }; Ok(dtype) } pub(crate) fn to_u32(self) -> u32 { match self { Self::F32 => 0, Self::F16 => 1, Self::Q4_0 => 2, Self::Q4_1 => 3, Self::Q5_0 => 6, Self::Q5_1 => 7, Self::Q8_0 => 8, Self::Q8_1 => 9, Self::Q2K => 10, Self::Q3K => 11, Self::Q4K => 12, Self::Q5K => 13, Self::Q6K => 14, Self::Q8K => 15, } } /// The type size for blocks in bytes. pub fn type_size(&self) -> usize { use k_quants::*; match self { Self::F32 => 4, Self::F16 => 2, Self::Q4_0 => std::mem::size_of::<BlockQ4_0>(), Self::Q4_1 => std::mem::size_of::<BlockQ4_1>(), Self::Q5_0 => std::mem::size_of::<BlockQ5_0>(), Self::Q5_1 => std::mem::size_of::<BlockQ5_1>(), // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L932 Self::Q8_0 => std::mem::size_of::<BlockQ8_0>(), Self::Q8_1 => std::mem::size_of::<BlockQ8_1>(), Self::Q2K => std::mem::size_of::<BlockQ2K>(), Self::Q3K => std::mem::size_of::<BlockQ3K>(), Self::Q4K => std::mem::size_of::<BlockQ4K>(), Self::Q5K => std::mem::size_of::<BlockQ5K>(), Self::Q6K => std::mem::size_of::<BlockQ6K>(), Self::Q8K => std::mem::size_of::<BlockQ8K>(), } } /// The block size, i.e. the number of elements stored in each block. pub fn blck_size(&self) -> usize { match self { Self::F32 => 1, Self::F16 => 1, Self::Q4_0 => k_quants::QK4_0, Self::Q4_1 => k_quants::QK4_1, Self::Q5_0 => k_quants::QK5_0, Self::Q5_1 => k_quants::QK5_1, Self::Q8_0 => k_quants::QK8_0, Self::Q8_1 => k_quants::QK8_1, Self::Q2K | Self::Q3K | Self::Q4K | Self::Q5K | Self::Q6K | Self::Q8K => k_quants::QK_K, } } } // A version of GgmlType without `vec_dot` so that it can be dyn boxed. pub trait QuantizedType: Send + Sync { fn dtype(&self) -> GgmlDType; fn matmul_t(&self, mkn: (usize, usize, usize), lhs: &[f32], dst: &mut [f32]) -> Result<()>; fn to_float(&self, ys: &mut [f32]) -> Result<()>; fn storage_size_in_bytes(&self) -> usize; fn as_ptr(&self) -> *const u8; } impl<T: k_quants::GgmlType + Send + Sync> QuantizedType for Vec<T> { fn matmul_t(&self, mkn: (usize, usize, usize), lhs: &[f32], dst: &mut [f32]) -> Result<()> { k_quants::matmul(mkn, lhs, self.as_slice(), dst) } fn dtype(&self) -> GgmlDType { T::DTYPE } fn to_float(&self, ys: &mut [f32]) -> Result<()> { T::to_float(self.as_slice(), ys) } fn storage_size_in_bytes(&self) -> usize { self.len() * std::mem::size_of::<T>() } fn as_ptr(&self) -> *const u8 { self.as_ptr() as *const u8 } } impl std::fmt::Debug for QTensor { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { write!(f, "QTensor[{:?}; {:?}]", self.shape, self.dtype()) } } fn check_shape<T: k_quants::GgmlType>(shape: &Shape) -> Result<()> { let dims = shape.dims(); if dims.is_empty() { crate::bail!("scalar tensor cannot be quantized {shape:?}") } if dims[dims.len() - 1] % T::BLCK_SIZE != 0 { crate::bail!( "quantized tensor must have their last dim divisible by block size {shape:?} {}", T::BLCK_SIZE ) } Ok(()) } impl QTensor { pub fn new<S: Into<Shape>, T: k_quants::GgmlType + Send + Sync + 'static>( data: Vec<T>, shape: S, ) -> Result<Self> { let shape = shape.into(); check_shape::<T>(&shape)?; Ok(Self { data: Box::new(data), shape, }) } pub fn quantize<T: k_quants::GgmlType + Send + Sync + 'static>(src: &Tensor) -> Result<Self> { let shape = src.shape(); check_shape::<T>(shape)?; let src = src .to_dtype(crate::DType::F32)? .flatten_all()? .to_vec1::<f32>()?; if src.len() % T::BLCK_SIZE != 0 { crate::bail!( "tensor size ({shape:?}) is not divisible by block size {}", T::BLCK_SIZE ) } let mut data = vec![T::zeros(); src.len() / T::BLCK_SIZE]; T::from_float(&src, &mut data)?; Ok(Self { data: Box::new(data), shape: shape.clone(), }) } pub fn dtype(&self) -> GgmlDType { self.data.dtype() } pub fn rank(&self) -> usize { self.shape.rank() } pub fn shape(&self) -> &Shape { &self.shape } pub fn dequantize(&self, device: &Device) -> Result<Tensor> { let mut f32_data = vec![0f32; self.shape.elem_count()]; self.data.to_float(&mut f32_data)?; Tensor::from_vec(f32_data, &self.shape, device) } pub fn matmul_t(&self, mkn: (usize, usize, usize), lhs: &[f32], dst: &mut [f32]) -> Result<()> { self.data.matmul_t(mkn, lhs, dst) } pub fn storage_size_in_bytes(&self) -> usize { self.data.storage_size_in_bytes() } pub fn as_ptr(&self) -> *const u8 { self.data.as_ptr() } } #[derive(Clone, Debug)] pub enum QMatMul { QTensor(std::sync::Arc<QTensor>), Tensor(Tensor), } thread_local! { static DEQUANTIZE_ALL: bool = { match std::env::var("CANDLE_DEQUANTIZE_ALL") { Ok(s) => { !s.is_empty() && s != "0" }, Err(_) => false, } } } impl QMatMul { pub fn from_arc(qtensor: std::sync::Arc<QTensor>) -> Result<Self> { let dequantize = match qtensor.dtype() { GgmlDType::F32 | GgmlDType::F16 => true, _ => DEQUANTIZE_ALL.with(|b| *b), }; let t = if dequantize { let tensor = qtensor.dequantize(&Device::Cpu)?; Self::Tensor(tensor) } else { Self::QTensor(qtensor) }; Ok(t) } pub fn from_qtensor(qtensor: QTensor) -> Result<Self> { Self::from_arc(std::sync::Arc::new(qtensor)) } } impl crate::CustomOp1 for QTensor { fn name(&self) -> &'static str { "qmatmul" } fn cpu_fwd( &self, storage: &crate::CpuStorage, layout: &crate::Layout, ) -> Result<(crate::CpuStorage, Shape)> { if !layout.is_contiguous() { crate::bail!("input tensor is not contiguous {layout:?}") } let src_shape = layout.shape(); // self is transposed so n is first then k. let (n, k) = self.shape.dims2()?; if src_shape.rank() < 2 { crate::bail!("input tensor has only one dimension {layout:?}") } let mut dst_shape = src_shape.dims().to_vec(); let last_k = dst_shape.pop().unwrap(); if last_k != k { crate::bail!("input tensor {layout:?} incompatible with {:?}", self.shape) } dst_shape.push(n); let dst_shape = Shape::from(dst_shape); let storage = storage.as_slice::<f32>()?; let storage = &storage[layout.start_offset()..layout.start_offset() + src_shape.elem_count()]; let mut dst_storage = vec![0f32; dst_shape.elem_count()]; self.matmul_t( (dst_shape.elem_count() / n, k, n), storage, &mut dst_storage, )?; Ok((crate::CpuStorage::F32(dst_storage), dst_shape)) } } impl crate::Module for QMatMul { fn forward(&self, xs: &Tensor) -> Result<Tensor> { match self { Self::QTensor(t) => xs.apply_op1_no_bwd(t.as_ref()), Self::Tensor(w) => { let w = match *xs.dims() { [b1, b2, _, _] => w.broadcast_left((b1, b2))?.t()?, [bsize, _, _] => w.broadcast_left(bsize)?.t()?, _ => w.t()?, }; xs.matmul(&w) } } } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/gguf_file.rs
//! Support for the GGUF file format. //! //! Spec: https://github.com/philpax/ggml/blob/gguf-spec/docs/gguf.md use super::{GgmlDType, QTensor}; use crate::Result; use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt}; use std::collections::HashMap; pub const DEFAULT_ALIGNMENT: u64 = 32; #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Magic { Gguf, } impl TryFrom<u32> for Magic { type Error = crate::Error; fn try_from(value: u32) -> Result<Self> { let magic = match value { 0x46554747 | 0x47475546 => Self::Gguf, _ => crate::bail!("unknown magic 0x{value:08x}"), }; Ok(magic) } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum VersionedMagic { GgufV1, GgufV2, GgufV3, } impl VersionedMagic { fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> { let magic = reader.read_u32::<LittleEndian>()?; let magic = Magic::try_from(magic)?; let version = reader.read_u32::<LittleEndian>()?; let versioned_magic = match (magic, version) { (Magic::Gguf, 1) => Self::GgufV1, (Magic::Gguf, 2) => Self::GgufV2, (Magic::Gguf, 3) => Self::GgufV3, _ => crate::bail!("ggml: unsupported magic/version {magic:?}/{version}"), }; Ok(versioned_magic) } } #[derive(Debug)] pub struct TensorInfo { pub ggml_dtype: GgmlDType, pub shape: crate::Shape, pub offset: u64, } impl TensorInfo { pub fn read<R: std::io::Seek + std::io::Read>( &self, reader: &mut R, tensor_data_offset: u64, ) -> Result<QTensor> { let tensor_elems = self.shape.elem_count(); let blck_size = self.ggml_dtype.blck_size(); if tensor_elems % blck_size != 0 { crate::bail!( "the number of elements {tensor_elems} is not divisible by the block size {blck_size}" ) } let size_in_bytes = tensor_elems / blck_size * self.ggml_dtype.type_size(); let mut raw_data = vec![0u8; size_in_bytes]; reader.seek(std::io::SeekFrom::Start(tensor_data_offset + self.offset))?; reader.read_exact(&mut raw_data)?; super::ggml_file::qtensor_from_ggml(self.ggml_dtype, &raw_data, self.shape.dims().to_vec()) } } #[derive(Debug)] pub struct Content { pub magic: VersionedMagic, pub metadata: HashMap<String, Value>, pub tensor_infos: HashMap<String, TensorInfo>, pub tensor_data_offset: u64, } fn read_string<R: std::io::Read>(reader: &mut R, magic: &VersionedMagic) -> Result<String> { let len = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let mut v = vec![0u8; len]; reader.read_exact(&mut v)?; // GGUF strings are supposed to be non-null terminated but in practice this happens. while let Some(0) = v.last() { v.pop(); } // GGUF strings are utf8 encoded but there are cases that don't seem to be valid. Ok(String::from_utf8_lossy(&v).into_owned()) } #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum ValueType { // The value is a 8-bit unsigned integer. U8, // The value is a 8-bit signed integer. I8, // The value is a 16-bit unsigned little-endian integer. U16, // The value is a 16-bit signed little-endian integer. I16, // The value is a 32-bit unsigned little-endian integer. U32, // The value is a 32-bit signed little-endian integer. I32, // The value is a 64-bit unsigned little-endian integer. U64, // The value is a 64-bit signed little-endian integer. I64, // The value is a 32-bit IEEE754 floating point number. F32, // The value is a 64-bit IEEE754 floating point number. F64, // The value is a boolean. // 1-byte value where 0 is false and 1 is true. // Anything else is invalid, and should be treated as either the model being invalid or the reader being buggy. Bool, // The value is a UTF-8 non-null-terminated string, with length prepended. String, // The value is an array of other values, with the length and type prepended. /// // Arrays can be nested, and the length of the array is the number of elements in the array, not the number of bytes. Array, } #[derive(Debug, Clone)] pub enum Value { U8(u8), I8(i8), U16(u16), I16(i16), U32(u32), I32(i32), U64(u64), I64(i64), F32(f32), F64(f64), Bool(bool), String(String), Array(Vec<Value>), } impl Value { pub fn value_type(&self) -> ValueType { match self { Self::U8(_) => ValueType::U8, Self::I8(_) => ValueType::I8, Self::U16(_) => ValueType::U16, Self::I16(_) => ValueType::I16, Self::U32(_) => ValueType::U32, Self::I32(_) => ValueType::I32, Self::U64(_) => ValueType::U64, Self::I64(_) => ValueType::I64, Self::F32(_) => ValueType::F32, Self::F64(_) => ValueType::F64, Self::Bool(_) => ValueType::Bool, Self::String(_) => ValueType::String, Self::Array(_) => ValueType::Array, } } pub fn to_u8(&self) -> Result<u8> { match self { Self::U8(v) => Ok(*v), v => crate::bail!("not a u8 {v:?}"), } } pub fn to_i8(&self) -> Result<i8> { match self { Self::I8(v) => Ok(*v), v => crate::bail!("not a i8 {v:?}"), } } pub fn to_u16(&self) -> Result<u16> { match self { Self::U16(v) => Ok(*v), v => crate::bail!("not a u16 {v:?}"), } } pub fn to_i16(&self) -> Result<i16> { match self { Self::I16(v) => Ok(*v), v => crate::bail!("not a i16 {v:?}"), } } pub fn to_u32(&self) -> Result<u32> { match self { Self::U32(v) => Ok(*v), v => crate::bail!("not a u32 {v:?}"), } } pub fn to_i32(&self) -> Result<i32> { match self { Self::I32(v) => Ok(*v), v => crate::bail!("not a i32 {v:?}"), } } pub fn to_u64(&self) -> Result<u64> { match self { Self::U64(v) => Ok(*v), v => crate::bail!("not a u64 {v:?}"), } } pub fn to_i64(&self) -> Result<i64> { match self { Self::I64(v) => Ok(*v), v => crate::bail!("not a i64 {v:?}"), } } pub fn to_f32(&self) -> Result<f32> { match self { Self::F32(v) => Ok(*v), v => crate::bail!("not a f32 {v:?}"), } } pub fn to_f64(&self) -> Result<f64> { match self { Self::F64(v) => Ok(*v), v => crate::bail!("not a f64 {v:?}"), } } pub fn to_bool(&self) -> Result<bool> { match self { Self::Bool(v) => Ok(*v), v => crate::bail!("not a bool {v:?}"), } } pub fn to_vec(&self) -> Result<&Vec<Value>> { match self { Self::Array(v) => Ok(v), v => crate::bail!("not a vec {v:?}"), } } pub fn to_string(&self) -> Result<&String> { match self { Self::String(v) => Ok(v), v => crate::bail!("not a string {v:?}"), } } fn read<R: std::io::Read>( reader: &mut R, value_type: ValueType, magic: &VersionedMagic, ) -> Result<Self> { let v = match value_type { ValueType::U8 => Self::U8(reader.read_u8()?), ValueType::I8 => Self::I8(reader.read_i8()?), ValueType::U16 => Self::U16(reader.read_u16::<LittleEndian>()?), ValueType::I16 => Self::I16(reader.read_i16::<LittleEndian>()?), ValueType::U32 => Self::U32(reader.read_u32::<LittleEndian>()?), ValueType::I32 => Self::I32(reader.read_i32::<LittleEndian>()?), ValueType::U64 => Self::U64(reader.read_u64::<LittleEndian>()?), ValueType::I64 => Self::I64(reader.read_i64::<LittleEndian>()?), ValueType::F32 => Self::F32(reader.read_f32::<LittleEndian>()?), ValueType::F64 => Self::F64(reader.read_f64::<LittleEndian>()?), ValueType::Bool => match reader.read_u8()? { 0 => Self::Bool(false), 1 => Self::Bool(true), b => crate::bail!("unexpected bool value {b}"), }, ValueType::String => Self::String(read_string(reader, magic)?), ValueType::Array => { let value_type = reader.read_u32::<LittleEndian>()?; let value_type = ValueType::from_u32(value_type)?; let len = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let mut vs = Vec::with_capacity(len); for _ in 0..len { vs.push(Value::read(reader, value_type, magic)?) } Self::Array(vs) } }; Ok(v) } fn write<W: std::io::Write>(&self, w: &mut W) -> Result<()> { match self { &Self::U8(v) => w.write_u8(v)?, &Self::I8(v) => w.write_i8(v)?, &Self::U16(v) => w.write_u16::<LittleEndian>(v)?, &Self::I16(v) => w.write_i16::<LittleEndian>(v)?, &Self::U32(v) => w.write_u32::<LittleEndian>(v)?, &Self::I32(v) => w.write_i32::<LittleEndian>(v)?, &Self::U64(v) => w.write_u64::<LittleEndian>(v)?, &Self::I64(v) => w.write_i64::<LittleEndian>(v)?, &Self::F32(v) => w.write_f32::<LittleEndian>(v)?, &Self::F64(v) => w.write_f64::<LittleEndian>(v)?, &Self::Bool(v) => w.write_u8(u8::from(v))?, Self::String(v) => write_string(w, v.as_str())?, Self::Array(v) => { // The `Value` type does not enforce that all the values in an Array have the same // type. let value_type = if v.is_empty() { // Doesn't matter, the array is empty. ValueType::U32 } else { let value_type: std::collections::HashSet<_> = v.iter().map(|elem| elem.value_type()).collect(); if value_type.len() != 1 { crate::bail!("multiple value-types in the same array {value_type:?}") } value_type.into_iter().next().unwrap() }; w.write_u32::<LittleEndian>(value_type.to_u32())?; w.write_u64::<LittleEndian>(v.len() as u64)?; for elem in v.iter() { elem.write(w)? } } } Ok(()) } } impl ValueType { fn from_u32(v: u32) -> Result<Self> { let v = match v { 0 => Self::U8, 1 => Self::I8, 2 => Self::U16, 3 => Self::I16, 4 => Self::U32, 5 => Self::I32, 6 => Self::F32, 7 => Self::Bool, 8 => Self::String, 9 => Self::Array, 10 => Self::U64, 11 => Self::I64, 12 => Self::F64, v => crate::bail!("unrecognized value-type {v:#08x}"), }; Ok(v) } fn to_u32(self) -> u32 { match self { Self::U8 => 0, Self::I8 => 1, Self::U16 => 2, Self::I16 => 3, Self::U32 => 4, Self::I32 => 5, Self::F32 => 6, Self::Bool => 7, Self::String => 8, Self::Array => 9, Self::U64 => 10, Self::I64 => 11, Self::F64 => 12, } } } impl Content { pub fn read<R: std::io::Seek + std::io::Read>(reader: &mut R) -> Result<Self> { let magic = VersionedMagic::read(reader)?; let tensor_count = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let metadata_kv_count = match magic { VersionedMagic::GgufV1 => reader.read_u32::<LittleEndian>()? as usize, VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { reader.read_u64::<LittleEndian>()? as usize } }; let mut metadata = HashMap::new(); for _idx in 0..metadata_kv_count { let key = read_string(reader, &magic)?; let value_type = reader.read_u32::<LittleEndian>()?; let value_type = ValueType::from_u32(value_type)?; let value = Value::read(reader, value_type, &magic)?; metadata.insert(key, value); } let mut tensor_infos = HashMap::new(); for _idx in 0..tensor_count { let tensor_name = read_string(reader, &magic)?; let n_dimensions = reader.read_u32::<LittleEndian>()?; let mut dimensions: Vec<usize> = match magic { VersionedMagic::GgufV1 => { let mut dimensions = vec![0; n_dimensions as usize]; reader.read_u32_into::<LittleEndian>(&mut dimensions)?; dimensions.into_iter().map(|c| c as usize).collect() } VersionedMagic::GgufV2 | VersionedMagic::GgufV3 => { let mut dimensions = vec![0; n_dimensions as usize]; reader.read_u64_into::<LittleEndian>(&mut dimensions)?; dimensions.into_iter().map(|c| c as usize).collect() } }; dimensions.reverse(); let ggml_dtype = reader.read_u32::<LittleEndian>()?; let ggml_dtype = GgmlDType::from_u32(ggml_dtype)?; let offset = reader.read_u64::<LittleEndian>()?; tensor_infos.insert( tensor_name, TensorInfo { shape: crate::Shape::from(dimensions), offset, ggml_dtype, }, ); } let position = reader.stream_position()?; let alignment = match metadata.get("general.alignment") { Some(Value::U8(v)) => *v as u64, Some(Value::U16(v)) => *v as u64, Some(Value::U32(v)) => *v as u64, Some(Value::I8(v)) if *v >= 0 => *v as u64, Some(Value::I16(v)) if *v >= 0 => *v as u64, Some(Value::I32(v)) if *v >= 0 => *v as u64, _ => DEFAULT_ALIGNMENT, }; let tensor_data_offset = (position + alignment - 1) / alignment * alignment; Ok(Self { magic, metadata, tensor_infos, tensor_data_offset, }) } pub fn tensor<R: std::io::Seek + std::io::Read>( &self, reader: &mut R, name: &str, ) -> Result<QTensor> { let tensor_info = match self.tensor_infos.get(name) { Some(tensor_info) => tensor_info, None => crate::bail!("cannot find tensor-infor for {name}"), }; tensor_info.read(reader, self.tensor_data_offset) } } fn write_string<W: std::io::Write>(w: &mut W, str: &str) -> Result<()> { let bytes = str.as_bytes(); w.write_u64::<LittleEndian>(bytes.len() as u64)?; w.write_all(bytes)?; Ok(()) } pub fn write<W: std::io::Seek + std::io::Write>( w: &mut W, metadata: &[(&str, &Value)], tensors: &[(&str, &QTensor)], ) -> Result<()> { w.write_u32::<LittleEndian>(0x46554747)?; w.write_u32::<LittleEndian>(2)?; // version 2. w.write_u64::<LittleEndian>(tensors.len() as u64)?; w.write_u64::<LittleEndian>(metadata.len() as u64)?; for (name, value) in metadata.iter() { write_string(w, name)?; w.write_u32::<LittleEndian>(value.value_type().to_u32())?; value.write(w)?; } let mut offset = 0usize; let mut offsets = Vec::with_capacity(tensors.len()); for (name, tensor) in tensors.iter() { write_string(w, name)?; let dims = tensor.shape().dims(); w.write_u32::<LittleEndian>(dims.len() as u32)?; for &dim in dims.iter().rev() { w.write_u64::<LittleEndian>(dim as u64)?; } w.write_u32::<LittleEndian>(tensor.dtype().to_u32())?; w.write_u64::<LittleEndian>(offset as u64)?; offsets.push(offset); let size_in_bytes = tensor.storage_size_in_bytes(); let padding = 31 - (31 + size_in_bytes) % 32; offset += size_in_bytes + padding; } let pos = w.stream_position()? as usize; let padding = 31 - (31 + pos) % 32; w.write_all(&vec![0u8; padding])?; let tensor_start_pos = w.stream_position()? as usize; for (offset, (_name, tensor)) in offsets.iter().zip(tensors.iter()) { let pos = w.stream_position()? as usize; if tensor_start_pos + offset != pos { crate::bail!( "internal error, unexpected current position {tensor_start_pos} {offset} {pos}" ) } let data_ptr = tensor.as_ptr(); let size_in_bytes = tensor.storage_size_in_bytes(); let data = unsafe { std::slice::from_raw_parts(data_ptr, size_in_bytes) }; w.write_all(data)?; let padding = 31 - (31 + size_in_bytes) % 32; w.write_all(&vec![0u8; padding])?; } Ok(()) }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/ggml_file.rs
//! Support for the GGML file format. use super::{k_quants, GgmlDType}; use crate::Result; use byteorder::{LittleEndian, ReadBytesExt}; use std::collections::HashMap; // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.h#L37 #[derive(Debug, Clone, Copy, PartialEq, Eq)] enum Magic { Ggjt, Ggla, Ggmf, Ggml, Ggsn, } impl TryFrom<u32> for Magic { type Error = crate::Error; fn try_from(value: u32) -> Result<Self> { let magic = match value { 0x67676a74 => Self::Ggjt, 0x67676c61 => Self::Ggla, 0x67676d66 => Self::Ggmf, 0x67676d6c => Self::Ggml, 0x6767736e => Self::Ggsn, _ => crate::bail!("unknown magic {value:08x}"), }; Ok(magic) } } #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum VersionedMagic { GgmlUnversioned, GgmfV1, GgjtV1, GgjtV2, GgjtV3, } impl VersionedMagic { fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> { let magic = reader.read_u32::<LittleEndian>()?; let magic = Magic::try_from(magic)?; if magic == Magic::Ggml { return Ok(Self::GgmlUnversioned); } let version = reader.read_u32::<LittleEndian>()?; let versioned_magic = match (magic, version) { (Magic::Ggmf, 1) => Self::GgmfV1, (Magic::Ggjt, 1) => Self::GgjtV1, (Magic::Ggjt, 2) => Self::GgjtV2, (Magic::Ggjt, 3) => Self::GgjtV3, _ => crate::bail!("ggml: unsupported magic/version {magic:?}/{version}"), }; Ok(versioned_magic) } fn align32(&self) -> bool { match self { Self::GgmlUnversioned | Self::GgmfV1 => false, Self::GgjtV1 | Self::GgjtV2 | Self::GgjtV3 => true, } } } #[derive(Debug, Clone, PartialEq, Eq)] pub struct HParams { pub n_vocab: u32, pub n_embd: u32, pub n_mult: u32, pub n_head: u32, pub n_layer: u32, pub n_rot: u32, pub ftype: u32, } impl HParams { fn read<R: std::io::Read>(reader: &mut R) -> Result<Self> { let n_vocab = reader.read_u32::<LittleEndian>()?; let n_embd = reader.read_u32::<LittleEndian>()?; let n_mult = reader.read_u32::<LittleEndian>()?; let n_head = reader.read_u32::<LittleEndian>()?; let n_layer = reader.read_u32::<LittleEndian>()?; let n_rot = reader.read_u32::<LittleEndian>()?; let ftype = reader.read_u32::<LittleEndian>()?; Ok(Self { n_vocab, n_embd, n_mult, n_head, n_layer, n_rot, ftype, }) } } #[derive(Debug, Clone, PartialEq)] pub struct Vocab { pub token_score_pairs: Vec<(Vec<u8>, f32)>, } impl Vocab { fn read<R: std::io::Read>(reader: &mut R, n_vocab: usize) -> Result<Self> { // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L556 let mut token_score_pairs = Vec::with_capacity(n_vocab); for _index in 0..n_vocab { let len = reader.read_u32::<LittleEndian>()? as usize; let mut word = vec![0u8; len]; reader.read_exact(&mut word)?; let score = reader.read_f32::<LittleEndian>()?; token_score_pairs.push((word, score)) } Ok(Self { token_score_pairs }) } } fn from_raw_data<T: super::GgmlType + Send + Sync + 'static>( raw_data: &[u8], size_in_bytes: usize, dims: Vec<usize>, ) -> Result<super::QTensor> { let raw_data_ptr = raw_data.as_ptr(); let n_blocks = size_in_bytes / std::mem::size_of::<T>(); let data = unsafe { std::slice::from_raw_parts(raw_data_ptr as *const T, n_blocks) }; super::QTensor::new(data.to_vec(), dims) } /// Creates a [Tensor] from a raw GGML tensor. pub fn qtensor_from_ggml( ggml_dtype: GgmlDType, raw_data: &[u8], dims: Vec<usize>, ) -> Result<super::QTensor> { let tensor_elems = dims.iter().product::<usize>(); let blck_size = ggml_dtype.blck_size(); if tensor_elems % blck_size != 0 { crate::bail!( "the number of elements {tensor_elems} is not divisible by the block size {blck_size}" ) } let size_in_bytes = tensor_elems / blck_size * ggml_dtype.type_size(); match ggml_dtype { GgmlDType::F32 => from_raw_data::<f32>(raw_data, size_in_bytes, dims), GgmlDType::F16 => from_raw_data::<half::f16>(raw_data, size_in_bytes, dims), GgmlDType::Q4_0 => from_raw_data::<k_quants::BlockQ4_0>(raw_data, size_in_bytes, dims), GgmlDType::Q4_1 => from_raw_data::<k_quants::BlockQ4_1>(raw_data, size_in_bytes, dims), GgmlDType::Q5_0 => from_raw_data::<k_quants::BlockQ5_0>(raw_data, size_in_bytes, dims), GgmlDType::Q5_1 => from_raw_data::<k_quants::BlockQ5_1>(raw_data, size_in_bytes, dims), GgmlDType::Q8_0 => from_raw_data::<k_quants::BlockQ8_0>(raw_data, size_in_bytes, dims), GgmlDType::Q2K => from_raw_data::<k_quants::BlockQ2K>(raw_data, size_in_bytes, dims), GgmlDType::Q3K => from_raw_data::<k_quants::BlockQ3K>(raw_data, size_in_bytes, dims), GgmlDType::Q4K => from_raw_data::<k_quants::BlockQ4K>(raw_data, size_in_bytes, dims), GgmlDType::Q5K => from_raw_data::<k_quants::BlockQ5K>(raw_data, size_in_bytes, dims), GgmlDType::Q6K => from_raw_data::<k_quants::BlockQ6K>(raw_data, size_in_bytes, dims), _ => crate::bail!("quantized type {ggml_dtype:?} is not supported yet"), } } fn read_one_tensor<R: std::io::Seek + std::io::Read>( reader: &mut R, magic: VersionedMagic, ) -> Result<(String, super::QTensor)> { let n_dims = reader.read_u32::<LittleEndian>()?; let name_len = reader.read_u32::<LittleEndian>()?; let ggml_dtype = reader.read_u32::<LittleEndian>()?; let ggml_dtype = GgmlDType::from_u32(ggml_dtype)?; let mut dims = vec![0u32; n_dims as usize]; reader.read_u32_into::<LittleEndian>(&mut dims)?; // The dimensions are stored in reverse order, see for example: // https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/convert.py#L969 dims.reverse(); let mut name = vec![0u8; name_len as usize]; reader.read_exact(&mut name)?; let name = String::from_utf8_lossy(&name).into_owned(); if magic.align32() { let pos = reader.stream_position()?; reader.seek(std::io::SeekFrom::Current(((32 - pos % 32) % 32) as i64))?; } let dims = dims.iter().map(|&u| u as usize).collect::<Vec<_>>(); let tensor_elems = dims.iter().product::<usize>(); let size_in_bytes = tensor_elems * ggml_dtype.type_size() / ggml_dtype.blck_size(); // TODO: Mmap version to avoid copying the data around? let mut raw_data = vec![0u8; size_in_bytes]; reader.read_exact(&mut raw_data)?; match qtensor_from_ggml(ggml_dtype, &raw_data, dims) { Ok(tensor) => Ok((name, tensor)), Err(e) => crate::bail!("Error creating tensor {name}: {e}"), } } pub struct Content { pub magic: VersionedMagic, pub hparams: HParams, pub vocab: Vocab, pub tensors: HashMap<String, super::QTensor>, } impl Content { pub fn read<R: std::io::Seek + std::io::Read>(reader: &mut R) -> Result<Content> { // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/llama.cpp#L505 let last_position = reader.seek(std::io::SeekFrom::End(0))?; reader.seek(std::io::SeekFrom::Start(0))?; let magic = VersionedMagic::read(reader)?; let hparams = HParams::read(reader)?; let vocab = Vocab::read(reader, hparams.n_vocab as usize)?; let mut tensors = HashMap::new(); while reader.stream_position()? != last_position { let (name, tensor) = read_one_tensor(reader, magic)?; tensors.insert(name, tensor); } Ok(Self { magic, hparams, vocab, tensors, }) } pub fn remove(&mut self, name: &str) -> Result<super::QTensor> { match self.tensors.remove(name) { None => crate::bail!("cannot find tensor with name '{name}'"), Some(tensor) => Ok(tensor), } } }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/utils.rs
use crate::Result; pub(super) fn nearest_int(v: f32) -> i32 { v.round() as i32 } /// Validates that the input and output are the right size and returns an iterator which maps each /// input region `xs` to its corresponding output block in `ys`. Each output region is guaranteed /// to be `T::BLCK_SIZE` long. pub(super) fn group_for_quantization<'a, 'b, T: super::k_quants::GgmlType>( xs: &'b [f32], ys: &'a mut [T], ) -> Result<Vec<(&'a mut T, &'b [f32])>> { let block_size = T::BLCK_SIZE; let dtype = T::DTYPE; let expected_blocks = xs.len() / block_size; let actual_blocks = ys.len(); // Validate that the input is the right size if expected_blocks != actual_blocks { crate::bail!("quantize {dtype:?}: expected {expected_blocks} blocks but only {actual_blocks} were provided!") } Ok(ys.iter_mut().zip(xs.chunks_exact(block_size)).collect()) } /// Validates that the input and output are the right size and returns an iterator which maps each /// input block `xs` to its corresponding output region in `ys`. Each output region is guaranteed /// to be `T::BLCK_SIZE` long. pub(super) fn group_for_dequantization<'a, 'b, T: super::k_quants::GgmlType>( xs: &'a [T], ys: &'b mut [f32], ) -> Result<Vec<(&'a T, &'b mut [f32])>> { let block_size = T::BLCK_SIZE; let dtype = T::DTYPE; let actual_output_len = ys.len(); let expected_output_len = xs.len() * block_size; // Validate that the output is the right size if expected_output_len != actual_output_len { crate::bail!("dequantize {dtype:?}: ys (len = {actual_output_len}) does not match the expected length of {expected_output_len}!") } // Zip the blocks and outputs together Ok(xs.iter().zip(ys.chunks_exact_mut(block_size)).collect()) } pub(super) fn get_scale_min_k4(j: usize, q: &[u8]) -> (u8, u8) { if j < 4 { let d = q[j] & 63; let m = q[j + 4] & 63; (d, m) } else { let d = (q[j + 4] & 0xF) | ((q[j - 4] >> 6) << 4); let m = (q[j + 4] >> 4) | ((q[j] >> 6) << 4); (d, m) } } pub(super) unsafe fn make_qx_quants( n: usize, nmax: i32, x: *const f32, ls: *mut i8, rmse_type: i32, ) -> f32 { let mut max = 0f32; let mut amax = 0f32; for i in 0..n { let x = *x.add(i); let ax = x.abs(); if ax > amax { amax = ax; max = x; } } if amax == 0. { // all zero for i in 0..n { *ls.add(i) = 0; } return 0.; } let mut iscale = -(nmax as f32) / max; if rmse_type == 0 { for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); *ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8; } return 1.0 / iscale; } let weight_type = rmse_type % 2; let mut sumlx = 0f32; let mut suml2 = 0f32; for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); let l = l.clamp(-nmax, nmax - 1); *ls.add(i) = (l + nmax) as i8; let w = if weight_type == 1 { x * x } else { 1.0 }; let l = l as f32; sumlx += w * x * l; suml2 += w * l * l; } let mut scale = sumlx / suml2; let mut best = scale * sumlx; for _itry in 0..3 { let iscale = 1.0 / scale; let mut slx = 0f32; let mut sl2 = 0f32; let mut changed = false; for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); let l = l.clamp(-nmax, nmax - 1); if l + nmax != *ls.add(i) as i32 { changed = true; } let w = if weight_type == 1 { x * x } else { 1f32 }; let l = l as f32; slx += w * x * l; sl2 += w * l * l; } if !changed || sl2 == 0.0 || slx * slx <= best * sl2 { break; } for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); *ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8; } sumlx = slx; suml2 = sl2; scale = sumlx / suml2; best = scale * sumlx; } for _itry in 0..5 { let mut n_changed = 0; for i in 0..n { let x = *x.add(i); let w = if weight_type == 1 { x * x } else { 1. }; let l = *ls.add(i) as i32 - nmax; let mut slx = sumlx - w * x * l as f32; if slx > 0. { let mut sl2 = suml2 - w * l as f32 * l as f32; let new_l = nearest_int(x * sl2 / slx); let new_l = new_l.clamp(-nmax, nmax - 1); if new_l != l { slx += w * x * new_l as f32; sl2 += w * new_l as f32 * new_l as f32; if sl2 > 0. && slx * slx * suml2 > sumlx * sumlx * sl2 { *ls.add(i) = (nmax + new_l) as i8; sumlx = slx; suml2 = sl2; scale = sumlx / suml2; best = scale * sumlx; n_changed += 1; } } } } if n_changed == 0 { break; } } if rmse_type < 3 { return scale; } for is in -4..4 { if is == 0 { continue; } iscale = -(nmax as f32 + 0.1f32 * is as f32) / max; let mut sumlx = 0.; let mut suml2 = 0.; for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); let l = l.clamp(-nmax, nmax - 1); let w = if weight_type == 1 { x * x } else { 1. }; let l = l as f32; sumlx += w * x * l; suml2 += w * l * l; } if suml2 > 0. && sumlx * sumlx > best * suml2 { for i in 0..n { let x = *x.add(i); let l = nearest_int(iscale * x); *ls.add(i) = (nmax + l.clamp(-nmax, nmax - 1)) as i8; } scale = sumlx / suml2; best = scale * sumlx; } } scale } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L224 pub(super) fn make_qkx1_quants(nmax: i32, ntry: usize, x: &[f32]) -> (f32, f32) { let n = x.len(); let mut l = vec![0; n]; // Get min/max let min = *x .iter() .take(n) .min_by(|a, b| a.total_cmp(b)) .unwrap_or(&x[0]); let max = *x.iter().max_by(|a, b| a.total_cmp(b)).unwrap_or(&x[0]); // If min == max, all values are the same => nothing to do here if max == min { return (0.0, 0.0); } // Ensure min <= 0.0 let mut min = min.min(0.); // Compute scale and inverse scale let mut iscale = nmax as f32 / (max - min); let mut scale = 1.0 / iscale; for _ in 0..ntry { let mut sumlx = 0.0; let mut suml2 = 0; let mut did_change = false; for (i, value) in x.iter().enumerate().take(n) { let li = nearest_int(iscale * (value - min)).clamp(0, nmax); let clamped_li = li as u8; if clamped_li != l[i] { l[i] = clamped_li; did_change = true; } sumlx += (value - min) * li as f32; suml2 += li * li; } scale = sumlx / suml2 as f32; let sum: f32 = x .iter() .take(n) .zip(l.iter().take(n)) .map(|(xi, &li)| xi - scale * li as f32) .sum(); min = sum / n as f32; if min > 0.0 { min = 0.0; } iscale = 1.0 / scale; if !did_change { break; } } (scale, -min) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L165 pub(super) fn make_q3_quants(x: &[f32], nmax: i32, do_rmse: bool) -> f32 { let n = x.len(); let mut l = vec![0i8; n]; let mut max = 0.0; let mut amax = 0.0; for &xi in x.iter().take(n) { let ax = xi.abs(); if ax > amax { amax = ax; max = xi; } } if amax == 0.0 { return 0.0; } let iscale = -(nmax as f32) / max; if do_rmse { let mut sumlx = 0.0; let mut suml2 = 0.0; for i in 0..n { let li = (iscale * x[i]).round() as i32; let li = li.clamp(-nmax, nmax - 1); l[i] = li as i8; let w = x[i] * x[i]; sumlx += w * x[i] * li as f32; suml2 += w * (li * li) as f32; } for _ in 0..5 { let mut n_changed = 0; for i in 0..n { let w = x[i] * x[i]; let mut slx = sumlx - w * x[i] * l[i] as f32; if slx > 0.0 { let mut sl2 = suml2 - w * (l[i] as i32 * l[i] as i32) as f32; let mut new_l = (x[i] * sl2 / slx).round() as i32; new_l = new_l.clamp(-nmax, nmax - 1); if new_l != l[i] as i32 { slx += w * x[i] * new_l as f32; sl2 += w * (new_l * new_l) as f32; if sl2 > 0.0 && slx * slx * suml2 > sumlx * sumlx * sl2 { l[i] = new_l as i8; sumlx = slx; suml2 = sl2; n_changed += 1; } } } } if n_changed == 0 { break; } } for li in l.iter_mut() { *li += nmax as i8; } return sumlx / suml2; } for i in 0..n { let li = (iscale * x[i]).round() as i32; l[i] = (li.clamp(-nmax, nmax - 1) + nmax) as i8; } 1.0 / iscale }
0
hf_public_repos/candle/candle-core/src
hf_public_repos/candle/candle-core/src/quantized/k_quants.rs
use super::utils::{ get_scale_min_k4, group_for_dequantization, group_for_quantization, make_q3_quants, make_qkx1_quants, make_qx_quants, nearest_int, }; use super::GgmlDType; use crate::Result; use byteorder::{ByteOrder, LittleEndian}; use half::f16; use rayon::prelude::*; // Default to QK_K 256 rather than 64. pub const QK_K: usize = 256; pub const K_SCALE_SIZE: usize = 12; pub const QK4_0: usize = 32; pub const QK4_1: usize = 32; pub const QK5_0: usize = 32; pub const QK5_1: usize = 32; pub const QK8_0: usize = 32; pub const QK8_1: usize = 32; pub trait GgmlType: Sized + Clone + Send + Sync { const DTYPE: GgmlDType; const BLCK_SIZE: usize; type VecDotType: GgmlType; // This is only safe for types that include immediate values such as float/int/... fn zeros() -> Self { unsafe { std::mem::MaybeUninit::zeroed().assume_init() } } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()>; fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()>; /// Dot product used as a building block for quantized mat-mul. /// n is the number of elements to be considered. fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32>; /// Generic implementation of the dot product without simd optimizations. fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32>; } #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ4_0 { pub(crate) d: f16, pub(crate) qs: [u8; QK4_0 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ4_0>() == 18); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ4_1 { pub(crate) d: f16, pub(crate) m: f16, pub(crate) qs: [u8; QK4_1 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ4_1>() == 20); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ5_0 { pub(crate) d: f16, pub(crate) qh: [u8; 4], pub(crate) qs: [u8; QK5_0 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ5_0>() == 22); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ5_1 { pub(crate) d: f16, pub(crate) m: f16, pub(crate) qh: [u8; 4], pub(crate) qs: [u8; QK5_1 / 2], } const _: () = assert!(std::mem::size_of::<BlockQ5_1>() == 24); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ8_0 { pub(crate) d: f16, pub(crate) qs: [i8; QK8_0], } const _: () = assert!(std::mem::size_of::<BlockQ8_0>() == 34); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ8_1 { pub(crate) d: f16, pub(crate) s: f16, pub(crate) qs: [i8; QK8_1], } const _: () = assert!(std::mem::size_of::<BlockQ8_1>() == 36); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ2K { pub(crate) scales: [u8; QK_K / 16], pub(crate) qs: [u8; QK_K / 4], pub(crate) d: f16, pub(crate) dmin: f16, } const _: () = assert!(QK_K / 16 + QK_K / 4 + 2 * 2 == std::mem::size_of::<BlockQ2K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ3K { pub(crate) hmask: [u8; QK_K / 8], pub(crate) qs: [u8; QK_K / 4], pub(crate) scales: [u8; 12], pub(crate) d: f16, } const _: () = assert!(QK_K / 8 + QK_K / 4 + 12 + 2 == std::mem::size_of::<BlockQ3K>()); #[derive(Debug, Clone, PartialEq)] // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/k_quants.h#L82 #[repr(C)] pub struct BlockQ4K { pub(crate) d: f16, pub(crate) dmin: f16, pub(crate) scales: [u8; K_SCALE_SIZE], pub(crate) qs: [u8; QK_K / 2], } const _: () = assert!(QK_K / 2 + K_SCALE_SIZE + 2 * 2 == std::mem::size_of::<BlockQ4K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ5K { pub(crate) d: f16, pub(crate) dmin: f16, pub(crate) scales: [u8; K_SCALE_SIZE], pub(crate) qh: [u8; QK_K / 8], pub(crate) qs: [u8; QK_K / 2], } const _: () = assert!(QK_K / 8 + QK_K / 2 + 2 * 2 + K_SCALE_SIZE == std::mem::size_of::<BlockQ5K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ6K { pub(crate) ql: [u8; QK_K / 2], pub(crate) qh: [u8; QK_K / 4], pub(crate) scales: [i8; QK_K / 16], pub(crate) d: f16, } const _: () = assert!(3 * QK_K / 4 + QK_K / 16 + 2 == std::mem::size_of::<BlockQ6K>()); #[derive(Debug, Clone, PartialEq)] #[repr(C)] pub struct BlockQ8K { pub(crate) d: f32, pub(crate) qs: [i8; QK_K], pub(crate) bsums: [i16; QK_K / 16], } const _: () = assert!(4 + QK_K + QK_K / 16 * 2 == std::mem::size_of::<BlockQ8K>()); impl GgmlType for BlockQ4_0 { const DTYPE: GgmlDType = GgmlDType::Q4_0; const BLCK_SIZE: usize = QK4_0; type VecDotType = BlockQ8_0; // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1525 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); let qk = Self::BLCK_SIZE; if k % qk != 0 { crate::bail!("dequantize_row_q4_0: {k} is not divisible by {qk}") } let nb = k / qk; for i in 0..nb { let d = xs[i].d.to_f32(); for j in 0..(qk / 2) { let x0 = (xs[i].qs[j] & 0x0F) as i16 - 8; let x1 = (xs[i].qs[j] >> 4) as i16 - 8; ys[i * qk + j] = (x0 as f32) * d; ys[i * qk + j + qk / 2] = (x1 as f32) * d; } } Ok(()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q4_0 let qk = Self::BLCK_SIZE; let k = xs.len(); if k % qk != 0 { crate::bail!("{k} is not divisible by {}", qk); }; let nb = k / qk; if ys.len() != nb { crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,) } for (i, ys) in ys.iter_mut().enumerate() { let mut amax = 0f32; let mut max = 0f32; let xs = &xs[i * qk..(i + 1) * qk]; for &x in xs.iter() { if amax < x.abs() { amax = x.abs(); max = x; } } let d = max / -8.0; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); for (j, q) in ys.qs.iter_mut().enumerate() { let x0 = xs[j] * id; let x1 = xs[qk / 2 + j] * id; let xi0 = u8::min(15, (x0 + 8.5) as u8); let xi1 = u8::min(15, (x1 + 8.5) as u8); *q = xi0 | (xi1 << 4) } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L2361C10-L2361C122 #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q4_0_q8_0(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q4_0_q8_0(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q4_0_q8_0(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q4_0_q8_0: {n} is not divisible by {qk}") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let mut sum_i = 0; for j in 0..qk / 2 { let v0 = (xs.qs[j] & 0x0F) as i32 - 8; let v1 = (xs.qs[j] >> 4) as i32 - 8; sum_i += v0 * ys.qs[j] as i32 + v1 * ys.qs[j + qk / 2] as i32 } sumf += sum_i as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) } Ok(sumf) } } impl GgmlType for BlockQ4_1 { const DTYPE: GgmlDType = GgmlDType::Q4_1; const BLCK_SIZE: usize = QK4_1; type VecDotType = BlockQ8_1; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { // ggml_vec_dot_q4_1_q8_1 let qk = QK8_1; if n % qk != 0 { crate::bail!("vec_dot_q4_1_q8_1: {n} is not divisible by {qk}") } let nb = n / qk; if nb % 2 != 0 { crate::bail!("vec_dot_q4_1_q8_1: {n}, nb is not divisible by 2") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let mut sumi = 0i32; for j in 0..qk / 2 { let v0 = xs.qs[j] as i32 & 0x0F; let v1 = xs.qs[j] as i32 >> 4; sumi += (v0 * ys.qs[j] as i32) + (v1 * ys.qs[j + qk / 2] as i32); } sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) + f16::to_f32(xs.m) * f16::to_f32(ys.s) } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q4_1 let qk = Self::BLCK_SIZE; if ys.len() * qk != xs.len() { crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,) } for (i, ys) in ys.iter_mut().enumerate() { let xs = &xs[i * qk..(i + 1) * qk]; let mut min = f32::INFINITY; let mut max = f32::NEG_INFINITY; for &x in xs.iter() { min = f32::min(x, min); max = f32::max(x, max); } let d = (max - min) / ((1 << 4) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); ys.m = f16::from_f32(min); for (j, q) in ys.qs.iter_mut().take(qk / 2).enumerate() { let x0 = (xs[j] - min) * id; let x1 = (xs[qk / 2 + j] - min) * id; let xi0 = u8::min(15, (x0 + 0.5) as u8); let xi1 = u8::min(15, (x1 + 0.5) as u8); *q = xi0 | (xi1 << 4); } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1545 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK4_1 != 0 { crate::bail!("dequantize_row_q4_1: {k} is not divisible by {QK4_1}"); } let nb = k / QK4_1; for i in 0..nb { let d = xs[i].d.to_f32(); let m = xs[i].m.to_f32(); for j in 0..(QK4_1 / 2) { let x0 = xs[i].qs[j] & 0x0F; let x1 = xs[i].qs[j] >> 4; ys[i * QK4_1 + j] = (x0 as f32) * d + m; ys[i * QK4_1 + j + QK4_1 / 2] = (x1 as f32) * d + m; } } Ok(()) } } impl GgmlType for BlockQ5_0 { const DTYPE: GgmlDType = GgmlDType::Q5_0; const BLCK_SIZE: usize = QK5_0; type VecDotType = BlockQ8_0; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = Self::BLCK_SIZE; if n % Self::BLCK_SIZE != 0 { crate::bail!("vec_dot_q5_0_q8_0: {n} is not divisible by {qk}") } let nb = n / qk; if nb % 2 != 0 { crate::bail!("vec_dot_q5_0_q8_0: {n}, nb is not divisible by 2") } Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(_n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let qh = LittleEndian::read_u32(&xs.qh); let mut sumi = 0i32; for j in 0..Self::BLCK_SIZE / 2 { let xh_0 = (((qh & (1u32 << j)) >> j) << 4) as u8; let xh_1 = ((qh & (1u32 << (j + 16))) >> (j + 12)) as u8; let x0 = ((xs.qs[j] & 0x0F) as i32 | xh_0 as i32) - 16; let x1 = ((xs.qs[j] >> 4) as i32 | xh_1 as i32) - 16; sumi += (x0 * ys.qs[j] as i32) + (x1 * ys.qs[j + Self::BLCK_SIZE / 2] as i32); } sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q5_0 let k = xs.len(); if ys.len() * Self::BLCK_SIZE != k { crate::bail!("size mismatch {k} {} {}", ys.len(), Self::BLCK_SIZE) } for (i, ys) in ys.iter_mut().enumerate() { let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE]; let mut amax = 0f32; let mut max = 0f32; for &x in xs.iter() { if amax < x.abs() { amax = x.abs(); max = x; } } let d = max / -16.; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); let mut qh = 0u32; for j in 0..Self::BLCK_SIZE / 2 { let x0 = xs[j] * id; let x1 = xs[j + Self::BLCK_SIZE / 2] * id; let xi0 = ((x0 + 16.5) as i8).min(31) as u8; let xi1 = ((x1 + 16.5) as i8).min(31) as u8; ys.qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); qh |= ((xi0 as u32 & 0x10) >> 4) << j; qh |= ((xi1 as u32 & 0x10) >> 4) << (j + Self::BLCK_SIZE / 2); } LittleEndian::write_u32(&mut ys.qh, qh) } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1566 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK5_0 != 0 { crate::bail!("dequantize_row_q5_0: {k} is not divisible by {QK5_0}"); } let nb = k / QK5_0; for i in 0..nb { let d = xs[i].d.to_f32(); let qh: u32 = LittleEndian::read_u32(&xs[i].qh); for j in 0..(QK5_0 / 2) { let xh_0 = (((qh >> j) << 4) & 0x10) as u8; let xh_1 = ((qh >> (j + 12)) & 0x10) as u8; let x0 = ((xs[i].qs[j] & 0x0F) | xh_0) as i32 - 16; let x1 = ((xs[i].qs[j] >> 4) | xh_1) as i32 - 16; ys[i * QK5_0 + j] = (x0 as f32) * d; ys[i * QK5_0 + j + QK5_0 / 2] = (x1 as f32) * d; } } Ok(()) } } impl GgmlType for BlockQ5_1 { const DTYPE: GgmlDType = GgmlDType::Q5_1; const BLCK_SIZE: usize = QK5_1; type VecDotType = BlockQ8_1; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = Self::BLCK_SIZE; if n % Self::BLCK_SIZE != 0 { crate::bail!("vec_dot_q5_1_q8_1: {n} is not divisible by {qk}") } let nb = n / qk; if nb % 2 != 0 { crate::bail!("vec_dot_q5_1_q8_1: {n}, nb is not divisible by 2") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let qh = LittleEndian::read_u32(&xs.qh); let mut sumi = 0i32; for j in 0..Self::BLCK_SIZE / 2 { let xh_0 = ((qh >> j) << 4) & 0x10; let xh_1 = (qh >> (j + 12)) & 0x10; let x0 = (xs.qs[j] as i32 & 0xF) | xh_0 as i32; let x1 = (xs.qs[j] as i32 >> 4) | xh_1 as i32; sumi += (x0 * ys.qs[j] as i32) + (x1 * ys.qs[j + Self::BLCK_SIZE / 2] as i32); } sumf += sumi as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) + f16::to_f32(xs.m) * f16::to_f32(ys.s) } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q5_1 let qk = Self::BLCK_SIZE; if ys.len() * qk != xs.len() { crate::bail!("size mismatch {} {} {}", xs.len(), ys.len(), qk,) } for (i, ys) in ys.iter_mut().enumerate() { let xs = &xs[i * qk..(i + 1) * qk]; let mut min = f32::INFINITY; let mut max = f32::NEG_INFINITY; for &x in xs.iter() { min = f32::min(x, min); max = f32::max(x, max); } let d = (max - min) / ((1 << 5) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); ys.m = f16::from_f32(min); let mut qh = 0u32; for (j, q) in ys.qs.iter_mut().take(qk / 2).enumerate() { let x0 = (xs[j] - min) * id; let x1 = (xs[qk / 2 + j] - min) * id; let xi0 = (x0 + 0.5) as u8; let xi1 = (x1 + 0.5) as u8; *q = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4); // get the 5-th bit and store it in qh at the right position qh |= ((xi0 as u32 & 0x10) >> 4) << j; qh |= ((xi1 as u32 & 0x10) >> 4) << (j + qk / 2); } LittleEndian::write_u32(&mut ys.qh, qh); } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1592 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK5_1 != 0 { crate::bail!("dequantize_row_q5_1: {k} is not divisible by {QK5_1}"); } let nb = k / QK5_1; for i in 0..nb { let d = xs[i].d.to_f32(); let m = xs[i].m.to_f32(); let qh: u32 = LittleEndian::read_u32(&xs[i].qh); for j in 0..(QK5_1 / 2) { let xh_0 = (((qh >> j) << 4) & 0x10) as u8; let xh_1 = ((qh >> (j + 12)) & 0x10) as u8; let x0 = (xs[i].qs[j] & 0x0F) | xh_0; let x1 = (xs[i].qs[j] >> 4) | xh_1; ys[i * QK5_1 + j] = (x0 as f32) * d + m; ys[i * QK5_1 + j + QK5_1 / 2] = (x1 as f32) * d + m; } } Ok(()) } } impl GgmlType for BlockQ8_0 { const DTYPE: GgmlDType = GgmlDType::Q8_0; const BLCK_SIZE: usize = QK8_0; type VecDotType = BlockQ8_0; // https://github.com/ggerganov/llama.cpp/blob/468ea24fb4633a0d681f7ac84089566c1c6190cb/ggml.c#L1619 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK8_0 != 0 { crate::bail!("dequantize_row_q8_0: {k} is not divisible by {QK8_0}"); } let nb = k / QK8_0; for i in 0..nb { let d = xs[i].d.to_f32(); for j in 0..QK8_0 { ys[i * QK8_0 + j] = xs[i].qs[j] as f32 * d; } } Ok(()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q8_0 let k = xs.len(); if k % Self::BLCK_SIZE != 0 { crate::bail!("{k} is not divisible by {}", Self::BLCK_SIZE); }; let nb = k / Self::BLCK_SIZE; if ys.len() != nb { crate::bail!( "size mismatch {} {} {}", xs.len(), ys.len(), Self::BLCK_SIZE ) } for (i, ys) in ys.iter_mut().enumerate() { let mut amax = 0f32; let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE]; for &x in xs.iter() { amax = amax.max(x.abs()) } let d = amax / ((1 << 7) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); for (y, &x) in ys.qs.iter_mut().zip(xs.iter()) { *y = f32::round(x * id) as i8 } } Ok(()) } #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q8_0_q8_0(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q8_0_q8_0(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q8_0_q8_0(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = QK8_0; if n % QK8_0 != 0 { crate::bail!("vec_dot_q8_0_q8_0: {n} is not divisible by {qk}") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let sum_i = xs .qs .iter() .zip(ys.qs.iter()) .map(|(&x, &y)| x as i32 * y as i32) .sum::<i32>(); sumf += sum_i as f32 * f16::to_f32(xs.d) * f16::to_f32(ys.d) } Ok(sumf) } } impl GgmlType for BlockQ8_1 { const DTYPE: GgmlDType = GgmlDType::Q8_1; const BLCK_SIZE: usize = QK8_1; type VecDotType = BlockQ8_1; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(_n: usize, _xs: &[Self], _ys: &[Self::VecDotType]) -> Result<f32> { unimplemented!("no support for vec-dot on Q8_1") } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { // quantize_row_q8_1 let k = xs.len(); if ys.len() * Self::BLCK_SIZE != k { crate::bail!("size mismatch {k} {} {}", ys.len(), Self::BLCK_SIZE) } for (i, ys) in ys.iter_mut().enumerate() { let mut amax = 0f32; let xs = &xs[i * Self::BLCK_SIZE..(i + 1) * Self::BLCK_SIZE]; for &x in xs.iter() { amax = amax.max(x.abs()) } let d = amax / ((1 << 7) - 1) as f32; let id = if d != 0f32 { 1. / d } else { 0. }; ys.d = f16::from_f32(d); let mut sum = 0i32; for j in 0..Self::BLCK_SIZE / 2 { let v0 = xs[j] * id; let v1 = xs[j + Self::BLCK_SIZE / 2] * id; ys.qs[j] = f32::round(v0) as i8; ys.qs[j + Self::BLCK_SIZE / 2] = f32::round(v1) as i8; sum += ys.qs[j] as i32 + ys.qs[j + Self::BLCK_SIZE / 2] as i32; } ys.s = f16::from_f32(sum as f32) * ys.d; } Ok(()) } fn to_float(_xs: &[Self], _ys: &mut [f32]) -> Result<()> { unimplemented!("no support for vec-dot on Q8_1") } } impl GgmlType for BlockQ2K { const DTYPE: GgmlDType = GgmlDType::Q2K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q2k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q2k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q2k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q2k_q8k: {n} is not divisible by {QK_K}") } let mut sumf = 0.0; for (x, y) in xs.iter().zip(ys.iter()) { let mut q2: &[_] = &x.qs; let mut q8: &[_] = &y.qs; let sc = &x.scales; let mut summs = 0; for (bsum, scale) in y.bsums.iter().zip(sc) { summs += *bsum as i32 * ((scale >> 4) as i32); } let dall = y.d * x.d.to_f32(); let dmin = y.d * x.dmin.to_f32(); let mut isum = 0; let mut is = 0; for _ in 0..(QK_K / 128) { let mut shift = 0; for _ in 0..4 { let d = (sc[is] & 0xF) as i32; is += 1; let mut isuml = 0; for l in 0..16 { isuml += q8[l] as i32 * (((q2[l] >> shift) & 3) as i32); } isum += d * isuml; let d = (sc[is] & 0xF) as i32; is += 1; isuml = 0; for l in 16..32 { isuml += q8[l] as i32 * (((q2[l] >> shift) & 3) as i32); } isum += d * isuml; shift += 2; // adjust the indexing q8 = &q8[32..]; } // adjust the indexing q2 = &q2[32..]; } sumf += dall * isum as f32 - dmin * summs as f32; } Ok(sumf) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L279 fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { const Q4SCALE: f32 = 15.0; for (block, x) in group_for_quantization(xs, ys)? { //calculate scales and mins let mut mins: [f32; QK_K / 16] = [0.0; QK_K / 16]; let mut scales: [f32; QK_K / 16] = [0.0; QK_K / 16]; for (j, x_scale_slice) in x.chunks(16).enumerate() { (scales[j], mins[j]) = make_qkx1_quants(3, 5, x_scale_slice); } // get max scale and max min and ensure they are >= 0.0 let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max)); let max_min = mins.iter().fold(0.0, |max, &val| val.max(max)); if max_scale > 0.0 { let iscale = Q4SCALE / max_scale; for (j, scale) in scales.iter().enumerate().take(QK_K / 16) { block.scales[j] = nearest_int(iscale * scale) as u8; } block.d = f16::from_f32(max_scale / Q4SCALE); } else { for j in 0..QK_K / 16 { block.scales[j] = 0; } block.d = f16::from_f32(0.0); } if max_min > 0.0 { let iscale = Q4SCALE / max_min; for (j, scale) in block.scales.iter_mut().enumerate() { let l = nearest_int(iscale * mins[j]) as u8; *scale |= l << 4; } block.dmin = f16::from_f32(max_min / Q4SCALE); } else { block.dmin = f16::from_f32(0.0); } let mut big_l: [u8; QK_K] = [0; QK_K]; for j in 0..QK_K / 16 { let d = block.d.to_f32() * (block.scales[j] & 0xF) as f32; if d == 0.0 { continue; } let dm = block.dmin.to_f32() * (block.scales[j] >> 4) as f32; for ii in 0..16 { let ll = nearest_int((x[16 * j + ii] + dm) / d).clamp(0, 3); big_l[16 * j + ii] = ll as u8; } } for j in (0..QK_K).step_by(128) { for ll in 0..32 { block.qs[j / 4 + ll] = big_l[j + ll] | (big_l[j + ll + 32] << 2) | (big_l[j + ll + 64] << 4) | (big_l[j + ll + 96] << 6); } } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L354 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { for (block, y) in group_for_dequantization(xs, ys)? { let d = block.d.to_f32(); let min = block.dmin.to_f32(); let mut is = 0; for (y_block, qs) in y.chunks_exact_mut(128).zip(block.qs.chunks_exact(32)) { // Step by 32 over q. let mut shift = 0; let mut y_block_index = 0; for _j in 0..4 { let sc = block.scales[is]; is += 1; let dl = d * (sc & 0xF) as f32; let ml = min * (sc >> 4) as f32; for q in &qs[..16] { let y = dl * ((q >> shift) & 3) as f32 - ml; y_block[y_block_index] = y; y_block_index += 1; } let sc = block.scales[is]; is += 1; let dl = d * (sc & 0xF) as f32; let ml = min * (sc >> 4) as f32; for q in &qs[16..] { let y = dl * ((q >> shift) & 3) as f32 - ml; y_block[y_block_index] = y; y_block_index += 1; } shift += 2; } } } Ok(()) } } impl GgmlType for BlockQ3K { const DTYPE: GgmlDType = GgmlDType::Q3K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q3k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q3k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q3k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; let mut aux8: [i8; QK_K] = [0; QK_K]; let mut aux16: [i16; 8] = [0; 8]; let mut sums: [f32; 8] = [0.0; 8]; let mut aux32: [i32; 8] = [0; 8]; let mut auxs: [u32; 4] = [0; 4]; for (x, y) in xs.iter().zip(ys.iter()) { let mut q3: &[u8] = &x.qs; let hmask: &[u8] = &x.hmask; let mut q8: &[i8] = &y.qs; aux32.fill(0); let mut a = &mut aux8[..]; let mut m = 1; //Like the GGML original this is written this way to enable the compiler to vectorize it. for _ in 0..QK_K / 128 { a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = (q3_val & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 2) & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 4) & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; a.iter_mut() .take(32) .zip(q3) .for_each(|(a_val, q3_val)| *a_val = ((q3_val >> 6) & 3) as i8); a.iter_mut() .take(32) .zip(hmask) .for_each(|(a_val, hmask_val)| { *a_val -= if hmask_val & m != 0 { 0 } else { 4 } }); a = &mut a[32..]; m <<= 1; q3 = &q3[32..]; } a = &mut aux8[..]; LittleEndian::read_u32_into(&x.scales, &mut auxs[0..3]); let tmp = auxs[2]; auxs[2] = ((auxs[0] >> 4) & KMASK2) | (((tmp >> 4) & KMASK1) << 4); auxs[3] = ((auxs[1] >> 4) & KMASK2) | (((tmp >> 6) & KMASK1) << 4); auxs[0] = (auxs[0] & KMASK2) | (((tmp) & KMASK1) << 4); auxs[1] = (auxs[1] & KMASK2) | (((tmp >> 2) & KMASK1) << 4); for aux in auxs { for scale in aux.to_le_bytes() { let scale = i8::from_be_bytes([scale]); for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += (scale as i32 - 32) * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += (scale as i32 - 32) * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; } } let d = x.d.to_f32() * y.d; for l in 0..8 { sums[l] += d * aux32[l] as f32; } } Ok(sums.iter().sum()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { for (block, x) in group_for_quantization(xs, ys)? { let mut scales: [f32; QK_K / 16] = [0.0; QK_K / 16]; for (j, x_scale_slice) in x.chunks_exact(16).enumerate() { scales[j] = make_q3_quants(x_scale_slice, 4, true); } // Get max scale by absolute value. let mut max_scale: f32 = 0.0; for &scale in scales.iter() { if scale.abs() > max_scale.abs() { max_scale = scale; } } block.scales.fill(0); if max_scale != 0.0 { let iscale = -32.0 / max_scale; for (j, scale) in scales.iter().enumerate() { let l_val = nearest_int(iscale * scale); let l_val = l_val.clamp(-32, 31) + 32; if j < 8 { block.scales[j] = (l_val & 0xF) as u8; } else { block.scales[j - 8] |= ((l_val & 0xF) << 4) as u8; } let l_val = l_val >> 4; block.scales[j % 4 + 8] |= (l_val << (2 * (j / 4))) as u8; } block.d = f16::from_f32(1.0 / iscale); } else { block.d = f16::from_f32(0.0); } let mut l: [i8; QK_K] = [0; QK_K]; for j in 0..QK_K / 16 { let sc = if j < 8 { block.scales[j] & 0xF } else { block.scales[j - 8] >> 4 }; let sc = (sc | (((block.scales[8 + j % 4] >> (2 * (j / 4))) & 3) << 4)) as i8 - 32; let d = block.d.to_f32() * sc as f32; if d != 0.0 { for ii in 0..16 { let l_val = nearest_int(x[16 * j + ii] / d); l[16 * j + ii] = (l_val.clamp(-4, 3) + 4) as i8; } } } block.hmask.fill(0); let mut m = 0; let mut hm = 1; for ll in l.iter_mut() { if *ll > 3 { block.hmask[m] |= hm; *ll -= 4; } m += 1; if m == QK_K / 8 { m = 0; hm <<= 1; } } for j in (0..QK_K).step_by(128) { for l_val in 0..32 { block.qs[j / 4 + l_val] = (l[j + l_val] | (l[j + l_val + 32] << 2) | (l[j + l_val + 64] << 4) | (l[j + l_val + 96] << 6)) as u8; } } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L533 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { const KMASK1: u32 = 0x03030303; const KMASK2: u32 = 0x0f0f0f0f; for (block, y) in group_for_dequantization(xs, ys)? { //Reconstruct the scales let mut aux = [0; 4]; LittleEndian::read_u32_into(&block.scales, &mut aux[0..3]); let tmp = aux[2]; aux[2] = ((aux[0] >> 4) & KMASK2) | (((tmp >> 4) & KMASK1) << 4); aux[3] = ((aux[1] >> 4) & KMASK2) | (((tmp >> 6) & KMASK1) << 4); aux[0] = (aux[0] & KMASK2) | (((tmp) & KMASK1) << 4); aux[1] = (aux[1] & KMASK2) | (((tmp >> 2) & KMASK1) << 4); //Transfer the scales into an i8 array let scales: &mut [i8] = unsafe { std::slice::from_raw_parts_mut(aux.as_mut_ptr() as *mut i8, 16) }; let d_all = block.d.to_f32(); let mut m = 1; let mut is = 0; // Dequantize both 128 long blocks // 32 qs values per 128 long block // Each 16 elements get a scale for (y, qs) in y.chunks_exact_mut(128).zip(block.qs.chunks_exact(32)) { let mut shift = 0; for shift_scoped_y in y.chunks_exact_mut(32) { for (scale_index, scale_scoped_y) in shift_scoped_y.chunks_exact_mut(16).enumerate() { let dl = d_all * (scales[is] as f32 - 32.0); for (i, inner_y) in scale_scoped_y.iter_mut().enumerate() { let new_y = dl * (((qs[i + 16 * scale_index] >> shift) & 3) as i8 - if (block.hmask[i + 16 * scale_index] & m) == 0 { 4 } else { 0 }) as f32; *inner_y = new_y; } // 16 block finished => advance scale index is += 1; } // 32 block finished => increase shift and m shift += 2; m <<= 1; } } } Ok(()) } } impl GgmlType for BlockQ4K { const DTYPE: GgmlDType = GgmlDType::Q4K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q4k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q4k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q4k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q4k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [i8; QK_K] = [0; QK_K]; let mut aux16: [i16; 8] = [0; 8]; let mut sums: [f32; 8] = [0.0; 8]; let mut aux32: [i32; 8] = [0; 8]; let mut sumf = 0.0; for (y, x) in ys.iter().zip(xs.iter()) { let q4 = &x.qs; let q8 = &y.qs; aux32.fill(0); let mut a = &mut aux8[..]; let mut q4 = &q4[..]; for _ in 0..QK_K / 64 { for l in 0..32 { a[l] = (q4[l] & 0xF) as i8; } a = &mut a[32..]; for l in 0..32 { a[l] = (q4[l] >> 4) as i8; } a = &mut a[32..]; q4 = &q4[32..]; } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = 0; for j in 0..QK_K / 16 { sumi += y.bsums[j] as i32 * mins[j / 2] as i32; } let mut a = &mut aux8[..]; let mut q8 = &q8[..]; for scale in scales { let scale = scale as i32; for _ in 0..4 { for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; } } let d = x.d.to_f32() * y.d; for l in 0..8 { sums[l] += d * aux32[l] as f32; } let dmin = x.dmin.to_f32() * y.d; sumf -= dmin * sumi as f32; } Ok(sumf + sums.iter().sum::<f32>()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { for (block, x) in group_for_quantization(xs, ys)? { let mut mins: [f32; QK_K / 32] = [0.0; QK_K / 32]; let mut scales: [f32; QK_K / 32] = [0.0; QK_K / 32]; for (j, x_scale_slice) in x.chunks_exact(32).enumerate() { (scales[j], mins[j]) = make_qkx1_quants(15, 5, x_scale_slice); } // get max scale and max min and ensure they are >= 0.0 let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max)); let max_min = mins.iter().fold(0.0, |max, &val| val.max(max)); let inv_scale = if max_scale > 0.0 { 63.0 / max_scale } else { 0.0 }; let inv_min = if max_min > 0.0 { 63.0 / max_min } else { 0.0 }; for j in 0..QK_K / 32 { let ls = nearest_int(inv_scale * scales[j]).min(63) as u8; let lm = nearest_int(inv_min * mins[j]).min(63) as u8; if j < 4 { block.scales[j] = ls; block.scales[j + 4] = lm; } else { block.scales[j + 4] = (ls & 0xF) | ((lm & 0xF) << 4); block.scales[j - 4] |= (ls >> 4) << 6; block.scales[j] |= (lm >> 4) << 6; } } block.d = f16::from_f32(max_scale / 63.0); block.dmin = f16::from_f32(max_min / 63.0); let mut l: [u8; QK_K] = [0; QK_K]; for j in 0..QK_K / 32 { let (sc, m) = get_scale_min_k4(j, &block.scales); let d = block.d.to_f32() * sc as f32; if d != 0.0 { let dm = block.dmin.to_f32() * m as f32; for ii in 0..32 { let l_val = nearest_int((x[32 * j + ii] + dm) / d); l[32 * j + ii] = l_val.clamp(0, 15) as u8; } } } let q = &mut block.qs; for j in (0..QK_K).step_by(64) { for l_val in 0..32 { let offset_index = (j / 64) * 32 + l_val; q[offset_index] = l[j + l_val] | (l[j + l_val + 32] << 4); } } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L735 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { for (block, y) in group_for_dequantization(xs, ys)? { let d = block.d.to_f32(); let min = block.dmin.to_f32(); let q = &block.qs; let mut is = 0; let mut ys_index = 0; for j in (0..QK_K).step_by(64) { let q = &q[j / 2..j / 2 + 32]; let (sc, m) = get_scale_min_k4(is, &block.scales); let d1 = d * sc as f32; let m1 = min * m as f32; let (sc, m) = get_scale_min_k4(is + 1, &block.scales); let d2 = d * sc as f32; let m2 = min * m as f32; for q in q { y[ys_index] = d1 * (q & 0xF) as f32 - m1; ys_index += 1; } for q in q { y[ys_index] = d2 * (q >> 4) as f32 - m2; ys_index += 1; } is += 2; } } Ok(()) } } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L928 impl GgmlType for BlockQ5K { const DTYPE: GgmlDType = GgmlDType::Q5K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q5k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q5k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q5k_q8k: {n} is not divisible by {QK_K}") } const KMASK1: u32 = 0x3f3f3f3f; const KMASK2: u32 = 0x0f0f0f0f; const KMASK3: u32 = 0x03030303; let mut utmp: [u32; 4] = [0; 4]; let mut scales: [u8; 8] = [0; 8]; let mut mins: [u8; 8] = [0; 8]; let mut aux8: [i8; QK_K] = [0; QK_K]; let mut aux16: [i16; 8] = [0; 8]; let mut sums: [f32; 8] = [0.0; 8]; let mut aux32: [i32; 8] = [0; 8]; let mut sumf = 0.0; for (y, x) in ys.iter().zip(xs.iter()) { let q5 = &x.qs; let hm = &x.qh; let q8 = &y.qs; aux32.fill(0); let mut a = &mut aux8[..]; let mut q5 = &q5[..]; let mut m = 1u8; for _ in 0..QK_K / 64 { for l in 0..32 { a[l] = (q5[l] & 0xF) as i8; a[l] += if hm[l] & m != 0 { 16 } else { 0 }; } a = &mut a[32..]; m <<= 1; for l in 0..32 { a[l] = (q5[l] >> 4) as i8; a[l] += if hm[l] & m != 0 { 16 } else { 0 }; } a = &mut a[32..]; m <<= 1; q5 = &q5[32..]; } LittleEndian::read_u32_into(&x.scales, &mut utmp[0..3]); utmp[3] = ((utmp[2] >> 4) & KMASK2) | (((utmp[1] >> 6) & KMASK3) << 4); let uaux = utmp[1] & KMASK1; utmp[1] = (utmp[2] & KMASK2) | (((utmp[0] >> 6) & KMASK3) << 4); utmp[2] = uaux; utmp[0] &= KMASK1; //extract scales and mins LittleEndian::write_u32_into(&utmp[0..2], &mut scales); LittleEndian::write_u32_into(&utmp[2..4], &mut mins); let mut sumi = 0; for j in 0..QK_K / 16 { sumi += y.bsums[j] as i32 * mins[j / 2] as i32; } let mut a = &mut aux8[..]; let mut q8 = &q8[..]; for scale in scales { let scale = scale as i32; for _ in 0..4 { for l in 0..8 { aux16[l] = q8[l] as i16 * a[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as i32; } q8 = &q8[8..]; a = &mut a[8..]; } } let d = x.d.to_f32() * y.d; for l in 0..8 { sums[l] += d * aux32[l] as f32; } let dmin = x.dmin.to_f32() * y.d; sumf -= dmin * sumi as f32; } Ok(sumf + sums.iter().sum::<f32>()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L793 fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { for (block, x) in group_for_quantization(xs, ys)? { let mut mins: [f32; QK_K / 32] = [0.0; QK_K / 32]; let mut scales: [f32; QK_K / 32] = [0.0; QK_K / 32]; for (j, x_scale_slice) in x.chunks_exact(32).enumerate() { (scales[j], mins[j]) = make_qkx1_quants(31, 5, x_scale_slice); } // get max scale and max min and ensure they are >= 0.0 let max_scale = scales.iter().fold(0.0, |max, &val| val.max(max)); let max_min = mins.iter().fold(0.0, |max, &val| val.max(max)); let inv_scale = if max_scale > 0.0 { 63.0 / max_scale } else { 0.0 }; let inv_min = if max_min > 0.0 { 63.0 / max_min } else { 0.0 }; for j in 0..QK_K / 32 { let ls = nearest_int(inv_scale * scales[j]).min(63) as u8; let lm = nearest_int(inv_min * mins[j]).min(63) as u8; if j < 4 { block.scales[j] = ls; block.scales[j + 4] = lm; } else { block.scales[j + 4] = (ls & 0xF) | ((lm & 0xF) << 4); block.scales[j - 4] |= (ls >> 4) << 6; block.scales[j] |= (lm >> 4) << 6; } } block.d = f16::from_f32(max_scale / 63.0); block.dmin = f16::from_f32(max_min / 63.0); let mut l: [u8; QK_K] = [0; QK_K]; for j in 0..QK_K / 32 { let (sc, m) = get_scale_min_k4(j, &block.scales); let d = block.d.to_f32() * sc as f32; if d == 0.0 { continue; } let dm = block.dmin.to_f32() * m as f32; for ii in 0..32 { let ll = nearest_int((x[32 * j + ii] + dm) / d); l[32 * j + ii] = ll.clamp(0, 31) as u8; } } let qh = &mut block.qh; let ql = &mut block.qs; qh.fill(0); let mut m1 = 1; let mut m2 = 2; for n in (0..QK_K).step_by(64) { let offset = (n / 64) * 32; for j in 0..32 { let mut l1 = l[n + j]; if l1 > 15 { l1 -= 16; qh[j] |= m1; } let mut l2 = l[n + j + 32]; if l2 > 15 { l2 -= 16; qh[j] |= m2; } ql[offset + j] = l1 | (l2 << 4); } m1 <<= 2; m2 <<= 2; } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L928 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { for (block, y) in group_for_dequantization(xs, ys)? { let d = block.d.to_f32(); let min = block.dmin.to_f32(); let ql = &block.qs; let qh = &block.qh; let mut is = 0; let mut u1 = 1; let mut u2 = 2; let mut ys_index = 0; for j in (0..QK_K).step_by(64) { let ql = &ql[j / 2..j / 2 + 32]; let (sc, m) = get_scale_min_k4(is, &block.scales); let d1 = d * sc as f32; let m1 = min * m as f32; let (sc, m) = get_scale_min_k4(is + 1, &block.scales); let d2 = d * sc as f32; let m2 = min * m as f32; for (ql, qh) in ql.iter().zip(qh) { let to_add = if qh & u1 != 0 { 16 } else { 1 }; y[ys_index] = d1 * ((ql & 0xF) + to_add) as f32 - m1; ys_index += 1; } for (ql, qh) in ql.iter().zip(qh) { let to_add = if qh & u2 != 0 { 16 } else { 1 }; y[ys_index] = d2 * ((ql >> 4) + to_add) as f32 - m2; ys_index += 1; } is += 2; u1 <<= 2; u2 <<= 2; } } Ok(()) } } impl GgmlType for BlockQ6K { const DTYPE: GgmlDType = GgmlDType::Q6K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q6k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q6k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q6k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if n % QK_K != 0 { crate::bail!("vec_dot_q6k_q8k: {n} is not divisible by {QK_K}") } let mut aux8 = [0i8; QK_K]; let mut aux16 = [0i16; 8]; let mut sums = [0f32; 8]; let mut aux32 = [0f32; 8]; for (x, y) in xs.iter().zip(ys.iter()) { let q4 = &x.ql; let qh = &x.qh; let q8 = &y.qs; aux32.fill(0f32); for j in (0..QK_K).step_by(128) { let aux8 = &mut aux8[j..]; let q4 = &q4[j / 2..]; let qh = &qh[j / 4..]; for l in 0..32 { aux8[l] = (((q4[l] & 0xF) | ((qh[l] & 3) << 4)) as i32 - 32) as i8; aux8[l + 32] = (((q4[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i32 - 32) as i8; aux8[l + 64] = (((q4[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i32 - 32) as i8; aux8[l + 96] = (((q4[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i32 - 32) as i8; } } for (j, &scale) in x.scales.iter().enumerate() { let scale = scale as f32; let q8 = &q8[16 * j..]; let aux8 = &aux8[16 * j..]; for l in 0..8 { aux16[l] = q8[l] as i16 * aux8[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as f32 } let q8 = &q8[8..]; let aux8 = &aux8[8..]; for l in 0..8 { aux16[l] = q8[l] as i16 * aux8[l] as i16; } for l in 0..8 { aux32[l] += scale * aux16[l] as f32 } } let d = x.d.to_f32() * y.d; for (sum, &a) in sums.iter_mut().zip(aux32.iter()) { *sum += a * d; } } Ok(sums.iter().sum()) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { if xs.len() != ys.len() * Self::BLCK_SIZE { crate::bail!( "quantize_row_q6k: size mismatch {} {} {}", xs.len(), ys.len(), Self::BLCK_SIZE ) } let mut l = [0i8; QK_K]; let mut scales = [0f32; QK_K / 16]; let mut x = xs.as_ptr(); let l = l.as_mut_ptr(); unsafe { for y in ys.iter_mut() { let mut max_scale = 0f32; let mut max_abs_scale = 0f32; for (ib, scale_) in scales.iter_mut().enumerate() { let scale = make_qx_quants(16, 32, x.add(16 * ib), l.add(16 * ib), 1); *scale_ = scale; let abs_scale = scale.abs(); if abs_scale > max_abs_scale { max_abs_scale = abs_scale; max_scale = scale } } let iscale = -128f32 / max_scale; y.d = f16::from_f32(1.0 / iscale); for (y_scale, scale) in y.scales.iter_mut().zip(scales.iter()) { *y_scale = nearest_int(iscale * scale).min(127) as i8 } for (j, &y_scale) in y.scales.iter().enumerate() { let d = y.d.to_f32() * y_scale as f32; if d == 0. { continue; } for ii in 0..16 { let ll = nearest_int(*x.add(16 * j + ii) / d).clamp(-32, 31); *l.add(16 * j + ii) = (ll + 32) as i8 } } let mut ql = y.ql.as_mut_ptr(); let mut qh = y.qh.as_mut_ptr(); for j in (0..QK_K).step_by(128) { for l_idx in 0..32 { let q1 = *l.add(j + l_idx) & 0xF; let q2 = *l.add(j + l_idx + 32) & 0xF; let q3 = *l.add(j + l_idx + 64) & 0xF; let q4 = *l.add(j + l_idx + 96) & 0xF; *ql.add(l_idx) = (q1 | (q3 << 4)) as u8; *ql.add(l_idx + 32) = (q2 | (q4 << 4)) as u8; *qh.add(l_idx) = ((*l.add(j + l_idx) >> 4) | ((*l.add(j + l_idx + 32) >> 4) << 2) | ((*l.add(j + l_idx + 64) >> 4) << 4) | ((*l.add(j + l_idx + 96) >> 4) << 6)) as u8; } ql = ql.add(64); qh = qh.add(32); } x = x.add(QK_K) } } Ok(()) } // https://github.com/ggerganov/llama.cpp/blob/8183159cf3def112f6d1fe94815fce70e1bffa12/k_quants.c#L1067 fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK_K != 0 { crate::bail!("dequantize_row_q6k: {k} is not divisible by {QK_K}") } for (idx_x, x) in xs.iter().enumerate() { let d = x.d.to_f32(); let ql = &x.ql; let qh = &x.qh; let sc = &x.scales; for n in (0..QK_K).step_by(128) { let idx = n / 128; let ys = &mut ys[idx_x * QK_K + n..]; let sc = &sc[8 * idx..]; let ql = &ql[64 * idx..]; let qh = &qh[32 * idx..]; for l in 0..32 { let is = l / 16; let q1 = ((ql[l] & 0xF) | ((qh[l] & 3) << 4)) as i8 - 32; let q2 = ((ql[l + 32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) as i8 - 32; let q3 = ((ql[l] >> 4) | (((qh[l] >> 4) & 3) << 4)) as i8 - 32; let q4 = ((ql[l + 32] >> 4) | (((qh[l] >> 6) & 3) << 4)) as i8 - 32; ys[l] = d * sc[is] as f32 * q1 as f32; ys[l + 32] = d * sc[is + 2] as f32 * q2 as f32; ys[l + 64] = d * sc[is + 4] as f32 * q3 as f32; ys[l + 96] = d * sc[is + 6] as f32 * q4 as f32; } } } Ok(()) } } impl GgmlType for BlockQ8K { const DTYPE: GgmlDType = GgmlDType::Q8K; const BLCK_SIZE: usize = QK_K; type VecDotType = BlockQ8K; #[allow(unreachable_code)] fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { #[cfg(target_feature = "avx")] return super::avx::vec_dot_q8k_q8k(n, xs, ys); #[cfg(target_feature = "neon")] return super::neon::vec_dot_q8k_q8k(n, xs, ys); #[cfg(target_feature = "simd128")] return super::simd128::vec_dot_q8k_q8k(n, xs, ys); Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { let qk = QK_K; if n % QK_K != 0 { crate::bail!("vec_dot_q8k_q8k: {n} is not divisible by {qk}") } // Generic implementation. let mut sumf = 0f32; for (xs, ys) in xs.iter().zip(ys.iter()) { let sum_i = xs .qs .iter() .zip(ys.qs.iter()) .map(|(&x, &y)| x as i32 * y as i32) .sum::<i32>(); sumf += sum_i as f32 * xs.d * ys.d } Ok(sumf) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { let k = xs.len(); if k % QK_K != 0 { crate::bail!("quantize_row_q8k: {k} is not divisible by {QK_K}") } for (i, y) in ys.iter_mut().enumerate() { let mut max = 0f32; let mut amax = 0f32; let xs = &xs[i * QK_K..(i + 1) * QK_K]; for &x in xs.iter() { if amax < x.abs() { amax = x.abs(); max = x; } } if amax == 0f32 { y.d = 0f32; y.qs.fill(0) } else { let iscale = -128f32 / max; for (j, q) in y.qs.iter_mut().enumerate() { // ggml uses nearest_int with bit magic here, maybe we want the same // but we would have to test and benchmark it. let v = (iscale * xs[j]).round(); *q = v.min(127.) as i8 } for j in 0..QK_K / 16 { let mut sum = 0i32; for ii in 0..16 { sum += y.qs[j * 16 + ii] as i32 } y.bsums[j] = sum as i16 } y.d = 1.0 / iscale } } Ok(()) } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { let k = ys.len(); if k % QK_K != 0 { crate::bail!("dequantize_row_q8k: {k} is not divisible by {QK_K}") } for (i, x) in xs.iter().enumerate() { for (j, &q) in x.qs.iter().enumerate() { ys[i * QK_K + j] = x.d * q as f32 } } Ok(()) } } // https://github.com/ggerganov/llama.cpp/blob/b5ffb2849d23afe73647f68eec7b68187af09be6/ggml.c#L10605 pub fn matmul<T: GgmlType>( mkn: (usize, usize, usize), lhs: &[f32], rhs_t: &[T], dst: &mut [f32], ) -> Result<()> { let (m, k, n) = mkn; if m * k != lhs.len() { crate::bail!("unexpected lhs length {} {mkn:?}", lhs.len()); } let k_in_lhs_blocks = (k + T::BLCK_SIZE - 1) / T::BLCK_SIZE; let k_in_rhs_blocks = (k + T::VecDotType::BLCK_SIZE - 1) / T::VecDotType::BLCK_SIZE; // TODO: Do not make this copy if the DotType is f32. // TODO: Pre-allocate this. let mut lhs_b = vec![T::VecDotType::zeros(); m * k_in_lhs_blocks]; for row_idx in 0..m { let lhs_b = &mut lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks]; let lhs = &lhs[row_idx * k..(row_idx + 1) * k]; T::VecDotType::from_float(lhs, lhs_b)? } let lhs_b = lhs_b.as_slice(); for row_idx in 0..m { let lhs_row = &lhs_b[row_idx * k_in_lhs_blocks..(row_idx + 1) * k_in_lhs_blocks]; let dst_row = &mut dst[row_idx * n..(row_idx + 1) * n]; let result: Result<Vec<_>> = dst_row .into_par_iter() .enumerate() .with_min_len(128) .with_max_len(512) .map(|(col_idx, dst)| { let rhs_col = &rhs_t[col_idx * k_in_rhs_blocks..(col_idx + 1) * k_in_rhs_blocks]; T::vec_dot(k, rhs_col, lhs_row).map(|value| *dst = value) }) .collect(); result?; } Ok(()) } impl GgmlType for f32 { const DTYPE: GgmlDType = GgmlDType::F32; const BLCK_SIZE: usize = 1; type VecDotType = f32; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if xs.len() < n { crate::bail!("size mismatch {} < {n}", xs.len()) } if ys.len() < n { crate::bail!("size mismatch {} < {n}", ys.len()) } let mut res = 0f32; unsafe { crate::cpu::vec_dot_f32(xs.as_ptr(), ys.as_ptr(), &mut res, n) }; Ok(res) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } ys.copy_from_slice(xs); Ok(()) } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } ys.copy_from_slice(xs); Ok(()) } } impl GgmlType for f16 { const DTYPE: GgmlDType = GgmlDType::F16; const BLCK_SIZE: usize = 1; type VecDotType = f16; fn vec_dot(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { Self::vec_dot_unopt(n, xs, ys) } fn vec_dot_unopt(n: usize, xs: &[Self], ys: &[Self::VecDotType]) -> Result<f32> { if xs.len() < n { crate::bail!("size mismatch {} < {n}", xs.len()) } if ys.len() < n { crate::bail!("size mismatch {} < {n}", ys.len()) } let mut res = 0f32; unsafe { crate::cpu::vec_dot_f16(xs.as_ptr(), ys.as_ptr(), &mut res, n) }; Ok(res) } fn from_float(xs: &[f32], ys: &mut [Self]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } // TODO: vectorize for (x, y) in xs.iter().zip(ys.iter_mut()) { *y = f16::from_f32(*x) } Ok(()) } fn to_float(xs: &[Self], ys: &mut [f32]) -> Result<()> { if xs.len() != ys.len() { crate::bail!("size mismatch {} {}", xs.len(), ys.len()); } // TODO: vectorize for (x, y) in xs.iter().zip(ys.iter_mut()) { *y = x.to_f32() } Ok(()) } }
0
hf_public_repos/candle
hf_public_repos/candle/.vscode/settings.json
{ "[python]": { "editor.defaultFormatter": "ms-python.black-formatter" }, "python.formatting.provider": "none", "python.testing.pytestArgs": [ "candle-pyo3" ], "python.testing.unittestEnabled": false, "python.testing.pytestEnabled": true }
0
hf_public_repos/candle
hf_public_repos/candle/candle-flash-attn/Cargo.toml
[package] name = "candle-flash-attn" version = "0.3.1" edition = "2021" description = "Flash attention layer for the candle ML framework." repository = "https://github.com/huggingface/candle" keywords = ["blas", "tensor", "machine-learning"] categories = ["science"] license = "MIT OR Apache-2.0" readme = "README.md" [dependencies] candle = { path = "../candle-core", features = ["cuda"], version = "0.3.1", package = "candle-core" } half = { version = "2.3.1", features = ["num-traits"] } [build-dependencies] anyhow = { version = "1", features = ["backtrace"] } num_cpus = "1.15.0" rayon = "1.7.0" [dev-dependencies] anyhow = { version = "1", features = ["backtrace"] } candle-nn = { path = "../candle-nn", version = "0.3.1", features = ["cuda"] }
0
hf_public_repos/candle
hf_public_repos/candle/candle-flash-attn/build.rs
// Build script to run nvcc and generate the C glue code for launching the flash-attention kernel. // The cuda build time is very long so one can set the CANDLE_FLASH_ATTN_BUILD_DIR environment // variable in order to cache the compiled artifacts and avoid recompiling too often. use anyhow::{Context, Result}; use rayon::prelude::*; use std::path::PathBuf; use std::str::FromStr; const KERNEL_FILES: [&str; 17] = [ "flash_api.cu", "flash_fwd_hdim128_fp16_sm80.cu", "flash_fwd_hdim160_fp16_sm80.cu", "flash_fwd_hdim192_fp16_sm80.cu", "flash_fwd_hdim224_fp16_sm80.cu", "flash_fwd_hdim256_fp16_sm80.cu", "flash_fwd_hdim32_fp16_sm80.cu", "flash_fwd_hdim64_fp16_sm80.cu", "flash_fwd_hdim96_fp16_sm80.cu", "flash_fwd_hdim128_bf16_sm80.cu", "flash_fwd_hdim160_bf16_sm80.cu", "flash_fwd_hdim192_bf16_sm80.cu", "flash_fwd_hdim224_bf16_sm80.cu", "flash_fwd_hdim256_bf16_sm80.cu", "flash_fwd_hdim32_bf16_sm80.cu", "flash_fwd_hdim64_bf16_sm80.cu", "flash_fwd_hdim96_bf16_sm80.cu", ]; fn main() -> Result<()> { let num_cpus = std::env::var("RAYON_NUM_THREADS").map_or_else( |_| num_cpus::get_physical(), |s| usize::from_str(&s).unwrap(), ); rayon::ThreadPoolBuilder::new() .num_threads(num_cpus) .build_global() .unwrap(); println!("cargo:rerun-if-changed=build.rs"); for kernel_file in KERNEL_FILES.iter() { println!("cargo:rerun-if-changed=kernels/{kernel_file}"); } println!("cargo:rerun-if-changed=kernels/flash_fwd_kernel.h"); println!("cargo:rerun-if-changed=kernels/flash_fwd_launch_template.h"); println!("cargo:rerun-if-changed=kernels/flash.h"); println!("cargo:rerun-if-changed=kernels/philox.cuh"); println!("cargo:rerun-if-changed=kernels/softmax.h"); println!("cargo:rerun-if-changed=kernels/utils.h"); println!("cargo:rerun-if-changed=kernels/kernel_traits.h"); println!("cargo:rerun-if-changed=kernels/block_info.h"); println!("cargo:rerun-if-changed=kernels/static_switch.h"); let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?); let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") { Err(_) => { #[allow(clippy::redundant_clone)] out_dir.clone() } Ok(build_dir) => { let path = PathBuf::from(build_dir); path.canonicalize().expect(&format!( "Directory doesn't exists: {} (the current directory is {})", &path.display(), std::env::current_dir()?.display() )) } }; set_cuda_include_dir()?; let ccbin_env = std::env::var("CANDLE_NVCC_CCBIN"); println!("cargo:rerun-if-env-changed=CANDLE_NVCC_CCBIN"); let compute_cap = compute_cap()?; let out_file = build_dir.join("libflashattention.a"); let kernel_dir = PathBuf::from("kernels"); let cu_files: Vec<_> = KERNEL_FILES .iter() .map(|f| { let mut obj_file = out_dir.join(f); obj_file.set_extension("o"); (kernel_dir.join(f), obj_file) }) .collect(); let out_modified: Result<_, _> = out_file.metadata().and_then(|m| m.modified()); let should_compile = if out_file.exists() { kernel_dir .read_dir() .expect("kernels folder should exist") .any(|entry| { if let (Ok(entry), Ok(out_modified)) = (entry, &out_modified) { let in_modified = entry.metadata().unwrap().modified().unwrap(); in_modified.duration_since(*out_modified).is_ok() } else { true } }) } else { true }; if should_compile { cu_files .par_iter() .map(|(cu_file, obj_file)| { let mut command = std::process::Command::new("nvcc"); command .arg("-std=c++17") .arg("-O3") .arg("-U__CUDA_NO_HALF_OPERATORS__") .arg("-U__CUDA_NO_HALF_CONVERSIONS__") .arg("-U__CUDA_NO_HALF2_OPERATORS__") .arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__") .arg(format!("--gpu-architecture=sm_{compute_cap}")) .arg("-c") .args(["-o", obj_file.to_str().unwrap()]) .args(["--default-stream", "per-thread"]) .arg("-Icutlass/include") .arg("--expt-relaxed-constexpr") .arg("--expt-extended-lambda") .arg("--use_fast_math") .arg("--verbose"); if let Ok(ccbin_path) = &ccbin_env { command .arg("-allow-unsupported-compiler") .args(["-ccbin", ccbin_path]); } command.arg(cu_file); let output = command .spawn() .context("failed spawning nvcc")? .wait_with_output()?; if !output.status.success() { anyhow::bail!( "nvcc error while executing compiling: {:?}\n\n# stdout\n{:#}\n\n# stderr\n{:#}", &command, String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ) } Ok(()) }) .collect::<Result<()>>()?; let obj_files = cu_files.iter().map(|c| c.1.clone()).collect::<Vec<_>>(); let mut command = std::process::Command::new("nvcc"); command .arg("--lib") .args(["-o", out_file.to_str().unwrap()]) .args(obj_files); let output = command .spawn() .context("failed spawning nvcc")? .wait_with_output()?; if !output.status.success() { anyhow::bail!( "nvcc error while linking: {:?}\n\n# stdout\n{:#}\n\n# stderr\n{:#}", &command, String::from_utf8_lossy(&output.stdout), String::from_utf8_lossy(&output.stderr) ) } } println!("cargo:rustc-link-search={}", build_dir.display()); println!("cargo:rustc-link-lib=flashattention"); println!("cargo:rustc-link-lib=dylib=cudart"); println!("cargo:rustc-link-lib=dylib=stdc++"); /* laurent: I tried using the cc cuda integration as below but this lead to ptaxs never finishing to run for some reason. Calling nvcc manually worked fine. cc::Build::new() .cuda(true) .include("cutlass/include") .flag("--expt-relaxed-constexpr") .flag("--default-stream") .flag("per-thread") .flag(&format!("--gpu-architecture=sm_{compute_cap}")) .file("kernels/flash_fwd_hdim32_fp16_sm80.cu") .compile("flashattn"); */ Ok(()) } fn set_cuda_include_dir() -> Result<()> { // NOTE: copied from cudarc build.rs. let env_vars = [ "CUDA_PATH", "CUDA_ROOT", "CUDA_TOOLKIT_ROOT_DIR", "CUDNN_LIB", ]; let env_vars = env_vars .into_iter() .map(std::env::var) .filter_map(Result::ok) .map(Into::<PathBuf>::into); let roots = [ "/usr", "/usr/local/cuda", "/opt/cuda", "/usr/lib/cuda", "C:/Program Files/NVIDIA GPU Computing Toolkit", "C:/CUDA", ]; let roots = roots.into_iter().map(Into::<PathBuf>::into); let root = env_vars .chain(roots) .find(|path| path.join("include").join("cuda.h").is_file()) .context("cannot find include/cuda.h")?; println!( "cargo:rustc-env=CUDA_INCLUDE_DIR={}", root.join("include").display() ); Ok(()) } #[allow(unused)] fn compute_cap() -> Result<usize> { println!("cargo:rerun-if-env-changed=CUDA_COMPUTE_CAP"); // Try to parse compute caps from env let mut compute_cap = if let Ok(compute_cap_str) = std::env::var("CUDA_COMPUTE_CAP") { println!("cargo:rustc-env=CUDA_COMPUTE_CAP={compute_cap_str}"); compute_cap_str .parse::<usize>() .context("Could not parse compute cap")? } else { // Use nvidia-smi to get the current compute cap let out = std::process::Command::new("nvidia-smi") .arg("--query-gpu=compute_cap") .arg("--format=csv") .output() .context("`nvidia-smi` failed. Ensure that you have CUDA installed and that `nvidia-smi` is in your PATH.")?; let out = std::str::from_utf8(&out.stdout).context("stdout is not a utf8 string")?; let mut lines = out.lines(); assert_eq!( lines.next().context("missing line in stdout")?, "compute_cap" ); let cap = lines .next() .context("missing line in stdout")? .replace('.', ""); let cap = cap .parse::<usize>() .with_context(|| format!("cannot parse as int {cap}"))?; println!("cargo:rustc-env=CUDA_COMPUTE_CAP={cap}"); cap }; // Grab available GPU codes from nvcc and select the highest one let (supported_nvcc_codes, max_nvcc_code) = { let out = std::process::Command::new("nvcc") .arg("--list-gpu-code") .output() .expect("`nvcc` failed. Ensure that you have CUDA installed and that `nvcc` is in your PATH."); let out = std::str::from_utf8(&out.stdout).unwrap(); let out = out.lines().collect::<Vec<&str>>(); let mut codes = Vec::with_capacity(out.len()); for code in out { let code = code.split('_').collect::<Vec<&str>>(); if !code.is_empty() && code.contains(&"sm") { if let Ok(num) = code[1].parse::<usize>() { codes.push(num); } } } codes.sort(); let max_nvcc_code = *codes.last().context("no gpu codes parsed from nvcc")?; (codes, max_nvcc_code) }; // Check that nvcc supports the asked compute caps if !supported_nvcc_codes.contains(&compute_cap) { anyhow::bail!( "nvcc cannot target gpu arch {compute_cap}. Available nvcc targets are {supported_nvcc_codes:?}." ); } if compute_cap > max_nvcc_code { anyhow::bail!( "CUDA compute cap {compute_cap} is higher than the highest gpu code from nvcc {max_nvcc_code}" ); } Ok(compute_cap) }
0
hf_public_repos/candle
hf_public_repos/candle/candle-flash-attn/README.md
# candle-flash-attn
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/softmax.h
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include <cmath> #include <cute/tensor.hpp> #include <cutlass/cutlass.h> #include <cutlass/array.h> #include "philox.cuh" #include "utils.h" namespace flash { using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator> __device__ inline void thread_reduce_(Tensor<Engine0, Layout0> const &tensor, Tensor<Engine1, Layout1> &summary, Operator &op) { static_assert(Layout0::rank == 2, "Only support 2D Tensor"); static_assert(Layout1::rank == 1, "Only support 1D Tensor"); CUTE_STATIC_ASSERT_V(size<0>(summary) == size<0>(tensor)); #pragma unroll for (int mi = 0; mi < size<0>(tensor); mi++) { summary(mi) = zero_init ? tensor(mi, 0) : op(summary(mi), tensor(mi, 0)); #pragma unroll for (int ni = 1; ni < size<1>(tensor); ni++) { summary(mi) = op(summary(mi), tensor(mi, ni)); } } } template<typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator> __device__ inline void quad_allreduce_(Tensor<Engine0, Layout0> &dst, Tensor<Engine1, Layout1> &src, Operator &op) { CUTE_STATIC_ASSERT_V(size(dst) == size(src)); #pragma unroll for (int i = 0; i < size(dst); i++){ dst(i) = Allreduce<4>::run(src(i), op); } } template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Operator> __device__ inline void reduce_(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &summary, Operator &op) { thread_reduce_<zero_init>(tensor, summary, op); quad_allreduce_(summary, summary, op); } template<bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1> __device__ inline void reduce_max(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &max){ MaxOp<float> max_op; reduce_<zero_init>(tensor, max, max_op); } template<typename Engine0, typename Layout0, typename Engine1, typename Layout1> __device__ inline void reduce_sum(Tensor<Engine0, Layout0> const& tensor, Tensor<Engine1, Layout1> &sum){ SumOp<float> sum_op; reduce_(tensor, sum, sum_op); } // Apply the exp to all the elements. template <bool Scale_max=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1> inline __device__ void scale_apply_exp2(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &max, const float scale) { static_assert(Layout0::rank == 2, "Only support 2D Tensor"); static_assert(Layout1::rank == 1, "Only support 1D Tensor"); CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor)); #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { // If max is -inf, then all elements must have been -inf (possibly due to masking). // We don't want (-inf - (-inf)) since that would give NaN. // If we don't have float around M_LOG2E the multiplication is done in fp64. const float max_scaled = max(mi) == -INFINITY ? 0.f : max(mi) * (Scale_max ? scale : float(M_LOG2E)); #pragma unroll for (int ni = 0; ni < size<1>(tensor); ++ni) { // Instead of computing exp(x - max), we compute exp2(x * log_2(e) - // max * log_2(e)) This allows the compiler to use the ffma // instruction instead of fadd and fmul separately. tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled); } } } // Apply the exp to all the elements. template <bool zero_init=true, typename Engine0, typename Layout0, typename Engine1, typename Layout1> inline __device__ void max_scale_exp2_sum(Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> &max, Tensor<Engine1, Layout1> &sum, const float scale) { static_assert(Layout0::rank == 2, "Only support 2D Tensor"); static_assert(Layout1::rank == 1, "Only support 1D Tensor"); CUTE_STATIC_ASSERT_V(size<0>(max) == size<0>(tensor)); #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { MaxOp<float> max_op; max(mi) = zero_init ? tensor(mi, 0) : max_op(max(mi), tensor(mi, 0)); #pragma unroll for (int ni = 1; ni < size<1>(tensor); ni++) { max(mi) = max_op(max(mi), tensor(mi, ni)); } max(mi) = Allreduce<4>::run(max(mi), max_op); // If max is -inf, then all elements must have been -inf (possibly due to masking). // We don't want (-inf - (-inf)) since that would give NaN. const float max_scaled = max(mi) == -INFINITY ? 0.f : max(mi) * scale; sum(mi) = 0; #pragma unroll for (int ni = 0; ni < size<1>(tensor); ++ni) { // Instead of computing exp(x - max), we compute exp2(x * log_2(e) - // max * log_2(e)) This allows the compiler to use the ffma // instruction instead of fadd and fmul separately. tensor(mi, ni) = exp2f(tensor(mi, ni) * scale - max_scaled); sum(mi) += tensor(mi, ni); } SumOp<float> sum_op; sum(mi) = Allreduce<4>::run(sum(mi), sum_op); } } template <typename Engine, typename Layout> inline __device__ void apply_mask(Tensor<Engine, Layout> &tensor, const uint32_t max_seqlen_k) { // tensor has shape (ncol=(2, MMA_M), nrow=(2, MMA_N)) static_assert(Layout::rank == 2, "Only support 2D Tensor"); const uint32_t lane_id = threadIdx.x % 32; #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const uint32_t col_idx = nj * 8 + j + (lane_id % 4) * 2; if (col_idx >= max_seqlen_k) { // Without the "make_coord" we get wrong results #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { tensor(mi, make_coord(j, nj)) = -INFINITY; } } } } } template <typename Engine, typename Layout> inline __device__ void apply_mask_causal(Tensor<Engine, Layout> &tensor, const uint32_t col_idx_offset_, const uint32_t max_seqlen_k, const uint32_t row_idx_offset_, const uint32_t warp_row_stride) { // tensor has shape (ncol=(2, MMA_M), nrow=(2, MMA_N)) static_assert(Layout::rank == 2, "Only support 2D Tensor"); const uint32_t lane_id = threadIdx.x % 32; // const uint32_t row_idx_offset = row_idx_offset_ + lane_id / 4; const uint32_t row_idx_offset = row_idx_offset_; const uint32_t col_idx_offset = col_idx_offset_ + (lane_id % 4) * 2; #pragma unroll for (int mi = 0; mi < size<0, 1>(tensor); ++mi) { const uint32_t row_idx_base = row_idx_offset + mi * warp_row_stride; #pragma unroll for (int i = 0; i < size<0, 0>(tensor); ++i) { const uint32_t row_idx = row_idx_base + i * 8; const uint32_t col_idx_limit = std::min(max_seqlen_k, row_idx + 1); #pragma unroll for (int nj = 0; nj < size<1, 1>(tensor); ++nj) { const uint32_t col_idx_base = col_idx_offset + nj * 8; #pragma unroll for (int j = 0; j < size<1, 0>(tensor); ++j) { const uint32_t col_idx = col_idx_base + j; if (col_idx >= col_idx_limit) { tensor(make_coord(i, mi), make_coord(j, nj)) = -INFINITY; } } } // if (cute::thread0()) { // printf("mi = %d, i = %d, row_idx = %d, max_seqlen_k = %d\n", mi, i, row_idx, max_seqlen_k); // print(tensor(make_coord(i, mi), _)); // // print(tensor(_, j + nj * size<1, 0>(tensor))); // } } } } template <typename Engine0, typename Layout0, typename Engine1, typename Layout1> inline __device__ void apply_mask_causal_w_idx( Tensor<Engine0, Layout0> &tensor, Tensor<Engine1, Layout1> const &idx_rowcol, const uint32_t col_idx_offset_, const uint32_t max_seqlen_k, const uint32_t row_idx_offset_) { // tensor has shape (ncol=(2, MMA_M), nrow=(2, MMA_N)) static_assert(Layout0::rank == 2, "Only support 2D Tensor"); static_assert(Layout1::rank == 2, "Only support 2D Tensor"); CUTE_STATIC_ASSERT_V(size<0>(tensor) == size<0>(idx_rowcol)); CUTE_STATIC_ASSERT_V(size<1>(tensor) == size<1>(idx_rowcol)); #pragma unroll for (int mi = 0; mi < size<0>(tensor); ++mi) { const uint32_t col_idx_limit = std::min(max_seqlen_k, 1 + row_idx_offset_ + get<0>(idx_rowcol(mi, 0))); #pragma unroll for (int ni = 0; ni < size<1, 1>(tensor); ++ni) { if (col_idx_offset_ + get<1>(idx_rowcol(0, ni)) >= col_idx_limit) { tensor(mi, ni) = -INFINITY; } } // if (cute::thread0()) { // printf("ni = %d, j = %d, col_idx = %d, max_seqlen_k = %d\n", ni, j, col_idx, max_seqlen_k); // print(tensor(_, make_coord(j, ni))); // // print(tensor(_, j + ni * size<1, 0>(tensor))); // } } } template <bool encode_dropout_in_sign_bit=false, typename Engine, typename Layout> inline __device__ void apply_dropout(Tensor<Engine, Layout> &tensor, uint8_t p_dropout_in_uint8_t, unsigned long long seed, unsigned long long offset, uint32_t block_row_start, uint32_t block_col_start, uint32_t block_row_stride) { // tensor has shape (8, MMA_M, MMA_N / 2) using T = typename Engine::value_type; auto encode_dropout = [](bool keep, T val) { return keep ? val : (encode_dropout_in_sign_bit ? -val : T(0)); }; static_assert(decltype(size<2>(tensor))::value % 2 == 0); const uint16_t p_dropout_8bit_in_uint16_t = uint16_t(p_dropout_in_uint8_t); const uint32_t p_dropout_8bit_in_uint32_t = (uint32_t(p_dropout_8bit_in_uint16_t) << 16) | uint32_t(p_dropout_8bit_in_uint16_t); // if (cute::thread0()) { printf("threshold2 = 0x%x\n", p_dropout_8bit_in_uint32_t); } #pragma unroll for (int m = 0; m < size<1>(tensor); ++m, block_row_start += block_row_stride) { uint2 rowcol = make_uint2(block_row_start, block_col_start); #pragma unroll for (int n = 0; n < size<2>(tensor) / 2; ++n, ++rowcol.y) { // if (cute::thread(32, 0)) { printf("m = %d, n = %d, row = %d, col = %d\n", m, n, int(rowcol.x), int(rowcol.y));} uint4 random_uint4 = flash::philox(seed, reinterpret_cast<unsigned long long&>(rowcol), offset); // if (cute::thread0()) { printf("philox = %u, %d, %d, %d\n", random_uint4.x, random_uint4.y, random_uint4.z, random_uint4.w);} uint8_t (&rnd_8)[16] = reinterpret_cast<uint8_t (&)[16]>(random_uint4); // Special implementation for 16-bit types: we duplicate the threshold to the // low and high 16 bits of a 32-bit value, then use the f16x2 comparison instruction // to get a mask. The low 16 bits of the mask will be either 0xffff or 0x0000, // and the high 16 bits will be either 0xffff or 0x0000, depending on whether // the random value is less than the threshold. // We then do a bit-wise AND between the mask and the original value (in 32-bit). // We're exploiting the fact that floating point comparison is equivalent to integer // comparison, since we're comparing unsigned integers whose top 8-bits are zero. if (!encode_dropout_in_sign_bit && (std::is_same<T, cutlass::half_t>::value || std::is_same<T, cutlass::bfloat16_t>::value)) { uint16_t rnd_16[16]; #pragma unroll for (int i = 0; i < 16; i++) { rnd_16[i] = uint16_t(rnd_8[i]); } uint32_t (&rnd_32)[8] = reinterpret_cast<uint32_t (&)[8]>(rnd_16); #pragma unroll for (int j = 0; j < 2; j++) { Tensor tensor_uint32 = recast<uint32_t>(tensor(_, m, n * 2 + j)); // if (cute::thread0()) { printf("random = 0x%x, 0x%x, 0x%x, 0x%x\n", rnd_32[j * 4 + 0], rnd_32[j * 4 + 1], rnd_32[j * 4 + 2], rnd_32[j * 4 + 3]); } // if (cute::thread0()) { printf("tensor_uint32 = 0x%x, 0x%x, 0x%x, 0x%x\n", tensor_uint32(0), tensor_uint32(1), tensor_uint32(2), tensor_uint32(3)); } #pragma unroll for (int i = 0; i < 4; i++) { uint32_t mask; asm volatile("set.le.u32.f16x2 %0, %1, %2;\n" : "=r"(mask) : "r"(rnd_32[j * 4 + i]), "r"(p_dropout_8bit_in_uint32_t)); tensor_uint32(i) &= mask; } // if (cute::thread0()) { printf("tensor_uint32 = 0x%x, 0x%x, 0x%x, 0x%x\n", tensor_uint32(0), tensor_uint32(1), tensor_uint32(2), tensor_uint32(3)); } } } else { #pragma unroll for (int j = 0; j < 2; j++) { #pragma unroll for (int i = 0; i < 8; i++) { tensor(i, m, n * 2 + j) = encode_dropout(rnd_8[j * 8 + i] <= p_dropout_in_uint8_t, tensor(i, m, n * 2 + j)); } Tensor tensor_uint32 = recast<uint32_t>(tensor(_, m, n * 2 + j)); // if (cute::thread0()) { printf("tensor_uint32 = 0x%x, 0x%x, 0x%x, 0x%x\n", tensor_uint32(0), tensor_uint32(1), tensor_uint32(2), tensor_uint32(3)); } } } // // if ((threadIdx.x == 0) && (blockIdx.x == 0) && (blockIdx.y == 0)) { // // printf("n = %d, ph Philox: %u, %u, %u, %u\n", n, rnd_8.x, rnd_8.y, rnd_8.z, rnd_8.w); // // } } } } } // namespace flash
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_kernel.h
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include <cmath> #include <cute/algorithm/copy.hpp> #include <cute/algorithm/gemm.hpp> #include <cutlass/cutlass.h> #include <cutlass/array.h> #include <cutlass/numeric_types.h> #include <cutlass/numeric_conversion.h> #include "block_info.h" #include "kernel_traits.h" #include "utils.h" #include "softmax.h" #include "philox.cuh" namespace flash { using namespace cute; //////////////////////////////////////////////////////////////////////////////////////////////////// template <int MMA_M, class... Args, class TiledMMA> CUTE_HOST_DEVICE auto make_tiled_copy_A_warpcontiguousM(Copy_Atom<Args...> const& copy_atom, TiledMMA const& tiled_mma) { using TileShape_MNK = typename TiledMMA::TiledShape_MNK; using AtomShape_MNK = typename TiledMMA::AtomShape_MNK; constexpr int AtomShape_M = decltype(size<0>(AtomShape_MNK{}))::value; constexpr int kNWarps = decltype(size<0>(TileShape_MNK{}))::value / AtomShape_M; constexpr int MMAStride_M = MMA_M * AtomShape_M; auto t = make_tile(Layout<Shape<Int<AtomShape_M>, Int<kNWarps>>, Stride<_1, Int<MMAStride_M>> >{}, make_layout(size<2>(TileShape_MNK{}))); // if (cute::thread0()) {printf("make_tiled_copy_A_warpcontiguousM "); print(t); printf("\n"); } return make_tiled_copy_impl(copy_atom, tiled_mma.get_layoutA_TV(), t); } //////////////////////////////////////////////////////////////////////////////////////////////////// template <int MMA_M, class... Args, class TiledMMA> CUTE_HOST_DEVICE auto make_tiled_copy_C_warpcontiguousM(Copy_Atom<Args...> const& copy_atom, TiledMMA const& tiled_mma) { using TileShape_MNK = typename TiledMMA::TiledShape_MNK; using AtomShape_MNK = typename TiledMMA::AtomShape_MNK; constexpr int AtomShape_M = decltype(size<0>(AtomShape_MNK{}))::value; constexpr int kNWarps = decltype(size<0>(TileShape_MNK{}))::value / AtomShape_M; constexpr int MMAStride_M = MMA_M * AtomShape_M; auto t = make_tile(Layout<Shape<Int<AtomShape_M>, Int<kNWarps>>, Stride<_1, Int<MMAStride_M>> >{}, // TODO: Shouldn't this be size<1>? make_layout(size<2>(TileShape_MNK{}))); // if (cute::thread0()) {printf("make_tiled_copy_C_warpcontiguousM "); print(t); printf("\n"); } return make_tiled_copy_impl(copy_atom, tiled_mma.get_layoutC_TV(), t); } //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool Is_first, bool Check_inf=false, typename Tensor0, typename Tensor1, typename Tensor2> inline __device__ void softmax_rescale_o(Tensor0 &scores, Tensor1 &scores_max, Tensor1 &scores_sum, Tensor2 &acc_o, float softmax_scale_log2) { if (Is_first) { flash::template reduce_max</*zero_init=*/true>(scores, scores_max); flash::scale_apply_exp2(scores, scores_max, softmax_scale_log2); flash::reduce_sum(scores, scores_sum); } else { Tensor scores_max_prev = make_fragment_like(scores_max); copy(scores_max, scores_max_prev); flash::template reduce_max</*zero_init=*/false>(scores, scores_max); // Reshape acc_o from (MMA=4, MMA_M, MMA_K) to (nrow=(2, MMA_M), ncol=(2, MMA_K)) Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout())); #pragma unroll for (int mi = 0; mi < size(scores_max); ++mi) { float scores_max_cur = !Check_inf ? scores_max(mi) : (scores_max(mi) == -INFINITY ? 0.0f : scores_max(mi)); float scores_scale = exp2f((scores_max_prev(mi) - scores_max_cur) * softmax_scale_log2); scores_sum(mi) *= scores_scale; #pragma unroll for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scores_scale; } } flash::scale_apply_exp2(scores, scores_max, softmax_scale_log2); Tensor scores_sum_cur = make_fragment_like(scores_sum); flash::reduce_sum(scores, scores_sum_cur); #pragma unroll for (int mi = 0; mi < size(scores_sum); ++mi) { scores_sum(mi) += scores_sum_cur(mi); } } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename TiledCopy> inline __device__ void write_softmax_to_gmem( Tensor<Engine0, Layout0> const &tOrP, Tensor<Engine1, Layout1> &tPgP, TiledCopy gmem_thr_copy_P ) { // Reshape tOrP from (8, MMA_M, MMA_N) to (8, MMA_M * MMA_N) Layout l = tOrP.layout(); Tensor tPrP = make_tensor(tOrP.data(), make_layout(get<0>(l), make_layout(get<1>(l), get<2>(l)))); CUTE_STATIC_ASSERT_V(size<2>(tPgP) == _1{}); // TODO(laurent): reactivate the following // CUTE_STATIC_ASSERT_V(size<1>(tPrP) == size<1>(tPgP)); #pragma unroll for (int mi = 0; mi < size<1>(tPrP); ++mi) { copy(gmem_thr_copy_P, tPrP(_, mi), tPgP(_, mi, 0)); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Is_even_N, bool Is_even_K, bool Return_softmax, typename Params> inline __device__ void compute_attn_1rowblock(const Params &params, const int bidb, const int bidh, const int m_block) { using Element = typename Kernel_traits::Element; using ElementAccum = typename Kernel_traits::ElementAccum; using index_t = typename Kernel_traits::index_t; // Shared memory. extern __shared__ char smem_[]; // The thread index. const int tidx = threadIdx.x; constexpr int kBlockM = Kernel_traits::kBlockM; constexpr int kBlockN = Kernel_traits::kBlockN; constexpr int kHeadDim = Kernel_traits::kHeadDim; constexpr int kNWarps = Kernel_traits::kNWarps; constexpr int MMA_M = kBlockM / decltype(size<0>(typename Kernel_traits::TiledMma::TiledShape_MNK{}))::value; const BlockInfo</*Varlen=*/!Is_even_N> binfo(params, bidb); if (m_block * kBlockM >= binfo.actual_seqlen_q || binfo.actual_seqlen_k == 0) return; int n_block_max = cute::ceil_div(binfo.actual_seqlen_k, kBlockN); if (Is_causal) { n_block_max = std::min(n_block_max, cute::ceil_div((m_block + 1) * kBlockM, kBlockN)); // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z == 0) { // printf("m_block = %d, n_block_max = %d\n", m_block, n_block_max); // } } // We iterate over the blocks in reverse order. This is because the last block is the only one // that needs masking when we read K and V from global memory. Moreover, iterating in reverse // might save us 1 register (we just need n_block instead of both n_block and n_block_max). const index_t row_offset_q = binfo.q_offset(params.q_batch_stride, params.q_row_stride, bidb) + m_block * kBlockM * params.q_row_stride + bidh * params.q_head_stride; // We move K and V to the last block. const index_t row_offset_k = binfo.k_offset(params.k_batch_stride, params.k_row_stride, bidb) + (n_block_max - 1) * kBlockN * params.k_row_stride + (bidh / params.h_h_k_ratio) * params.k_head_stride; const index_t row_offset_v = binfo.k_offset(params.v_batch_stride, params.v_row_stride, bidb) + (n_block_max - 1) * kBlockN * params.v_row_stride + (bidh / params.h_h_k_ratio) * params.v_head_stride; const index_t row_offset_p = ((bidb * params.h + bidh) * params.seqlen_q_rounded + m_block * kBlockM) * params.seqlen_k_rounded + (n_block_max - 1) * kBlockN; Tensor gQ = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.q_ptr) + row_offset_q), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_stride(params.q_row_stride, _1{})); Tensor gK = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.k_ptr) + row_offset_k), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.k_row_stride, _1{})); Tensor gV = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.v_ptr) + row_offset_v), Shape<Int<kBlockN>, Int<kHeadDim>>{}, make_stride(params.v_row_stride, _1{})); Tensor gP = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.p_ptr) + row_offset_p), Shape<Int<kBlockM>, Int<kBlockN>>{}, make_stride(params.seqlen_k_rounded, _1{})); Tensor sQ = make_tensor(make_smem_ptr(reinterpret_cast<Element *>(smem_)), typename Kernel_traits::SmemLayoutQ{}); // Careful we're using the same smem for sQ and sK | sV if Share_Q_K_smem; Tensor sK = make_tensor(sQ.data() + (Kernel_traits::Share_Q_K_smem ? 0 : size(sQ)), typename Kernel_traits::SmemLayoutKV{}); Tensor sV = make_tensor(sK.data() + size(sK), typename Kernel_traits::SmemLayoutKV{}); Tensor sVt = make_tensor(sV.data(), typename Kernel_traits::SmemLayoutVtransposed{}); Tensor sVtNoSwizzle = make_tensor(sV.data(), typename Kernel_traits::SmemLayoutVtransposedNoSwizzle{}); auto gmem_thr_copy_QKV = typename Kernel_traits::GmemTiledCopyQKV{}.get_thread_slice(tidx); auto gmem_thr_copy_P = typename Kernel_traits::GmemTiledCopyP{}.get_thread_slice(tidx); Tensor tQgQ = gmem_thr_copy_QKV.partition_S(gQ); Tensor tQsQ = gmem_thr_copy_QKV.partition_D(sQ); Tensor tKgK = gmem_thr_copy_QKV.partition_S(gK); // (KCPY, KCPY_N, KCPY_K) Tensor tKsK = gmem_thr_copy_QKV.partition_D(sK); Tensor tVgV = gmem_thr_copy_QKV.partition_S(gV); // (VCPY, VCPY_N, VCPY_K) Tensor tVsV = gmem_thr_copy_QKV.partition_D(sV); Tensor tPgP = gmem_thr_copy_P.partition_D(gP); typename Kernel_traits::TiledMma tiled_mma; auto thr_mma = tiled_mma.get_thread_slice(tidx); Tensor tSrQ = thr_mma.partition_fragment_A(sQ); // (MMA,MMA_M,MMA_K) Tensor tSrK = thr_mma.partition_fragment_B(sK); // (MMA,MMA_N,MMA_K) Tensor tOrVt = thr_mma.partition_fragment_B(sVtNoSwizzle); // (MMA, MMA_K,MMA_N) Tensor acc_o = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kHeadDim>>{}); // MMA, MMA_M, MMA_K // // Copy Atom retiling // auto smem_thr_copy_Q = make_tiled_copy_A(typename Kernel_traits::SmemCopyAtom{}, tiled_mma).get_thread_slice(tidx); // auto smem_thr_copy_Q = make_tiled_copy_A_warpcontiguousM<MMA_M>(typename Kernel_traits::SmemCopyAtom{}, tiled_mma).get_thread_slice(tidx); // if (cute::thread0()) {smem_thr_copy_Q.print_all();} Tensor tSsQ = smem_thr_copy_Q.partition_S(sQ); // if (cute::thread0()) {print(tSsQ.layout()); printf("\n");} auto smem_thr_copy_K = make_tiled_copy_B(typename Kernel_traits::SmemCopyAtom{}, tiled_mma).get_thread_slice(tidx); Tensor tSsK = smem_thr_copy_K.partition_S(sK); auto smem_thr_copy_V = make_tiled_copy_B(typename Kernel_traits::SmemCopyAtomTransposed{}, tiled_mma).get_thread_slice(tidx); Tensor tOsVt = smem_thr_copy_V.partition_S(sVt); // TODO: this might need to change if we change the mma instruction in SM70 Tensor scores_max = make_tensor<ElementAccum>(Shape<Int<2 * size<1>(acc_o)>>{}); Tensor scores_sum = make_fragment_like(scores_max); // // PREDICATES // // // Allocate predicate tensors for m and n // Tensor tQpQ = make_tensor<bool>(make_shape(size<1>(tQsQ), size<2>(tQsQ)), Stride<_1,_0>{}); // Tensor tKVpKV = make_tensor<bool>(make_shape(size<1>(tKsK), size<2>(tKsK)), Stride<_1,_0>{}); // Construct identity layout for sQ and sK Tensor cQ = make_identity_tensor(make_shape(size<0>(sQ), size<1>(sQ))); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor cKV = make_identity_tensor(make_shape(size<0>(sK), size<1>(sK))); // (BLK_N,BLK_K) -> (blk_n,blk_k) // Tensor tScQ = thr_mma.partition_A(cQ); // (MMA,MMA_M,MMA_K) // if (cute::thread0()) { // print(tScQ.layout()); printf("\n"); // for (int i = 0; i < size(tScQ); ++i) { // printf("%d ", get<0>(tScQ(i))); // } // printf("\n"); // for (int i = 0; i < size(tScQ); ++i) { // printf("%d ", get<1>(tScQ(i))); // } // printf("\n"); // } // Repeat the partitioning with identity layouts Tensor tQcQ = gmem_thr_copy_QKV.partition_S(cQ); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tKVcKV = gmem_thr_copy_QKV.partition_S(cKV); // (BCPY,BCPY_N,BCPY_K) -> (blk_n,blk_k) // Allocate predicate tensors for k Tensor tQpQ = make_tensor<bool>(make_shape(size<2>(tQsQ))); Tensor tKVpKV = make_tensor<bool>(make_shape(size<2>(tKsK))); // Set predicates for k bounds if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tQpQ); ++k) { tQpQ(k) = get<1>(tQcQ(0, 0, k)) < params.d; } #pragma unroll for (int k = 0; k < size(tKVpKV); ++k) { tKVpKV(k) = get<1>(tKVcKV(0, 0, k)) < params.d; } } // Prologue Tensor tQrQ = make_fragment_like(tQgQ); // We don't need to clear the sQ smem tiles since we'll only write out the valid outputs flash::copy</*Is_even_MN=*/false, Is_even_K>(gmem_thr_copy_QKV, tQgQ, tQsQ, tQcQ, tQpQ, binfo.actual_seqlen_q - m_block * kBlockM); if (Kernel_traits::Is_Q_in_regs) { cute::cp_async_fence(); } // // Copy rmem to smem // // copy(tQrQ, tQsQ); // flash::cp_async_wait<0>(); // __syncthreads(); // // if (cute::thread(1, 0)) { print(tQsQ); } // // Tensor sQNoSwizzle = make_tensor(make_smem_ptr(reinterpret_cast<Element *>(smem_)), typename Kernel_traits::SmemLayoutQNoSwizzle{}); // // if (cute::thread0()) { print(sQNoSwizzle); } if (Kernel_traits::Share_Q_K_smem) { flash::cp_async_wait<0>(); __syncthreads(); Tensor tSrQ_copy_view = smem_thr_copy_Q.retile_D(tSrQ); CUTE_STATIC_ASSERT_V(size<1>(tSsQ) == size<1>(tSrQ_copy_view)); // M copy(smem_thr_copy_Q, tSsQ, tSrQ_copy_view); __syncthreads(); } int n_block = n_block_max - 1; // We don't need to clear the sK smem tiles since we'll mask out the scores anyway. flash::copy<Is_even_N, Is_even_K>(gmem_thr_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN); cute::cp_async_fence(); // if (threadIdx.x == 0 && blockIdx.y == 0 && blockIdx.z < 2) { print(tKgK); } // __syncthreads(); if (Kernel_traits::Is_Q_in_regs && !Kernel_traits::Share_Q_K_smem) { flash::cp_async_wait<1>(); __syncthreads(); Tensor tSrQ_copy_view = smem_thr_copy_Q.retile_D(tSrQ); CUTE_STATIC_ASSERT_V(size<1>(tSsQ) == size<1>(tSrQ_copy_view)); // M copy(smem_thr_copy_Q, tSsQ, tSrQ_copy_view); } // auto seeds = at::cuda::philox::unpack(params.philox_args); // unsigned long long seed = std::get<0>(seeds); // unsigned long long offset = std::get<1>(seeds) + (bidb * params.h + bidh) * 32 + tidx % 32; unsigned long long seed = 0; unsigned long long offset = 0; clear(acc_o); // For performance reason, we separate out two kinds of iterations: // those that need masking on S, and those that don't. // We need masking on S for the very last block when K and V has length not multiple of kBlockN. // We also need masking on S if it's causal, for the last ceil_div(kBlockM, kBlockN) blocks. // We will have at least 1 "masking" iteration. constexpr int n_masking_steps = Is_causal ? cute::ceil_div(kBlockM, kBlockN) : 1; #pragma unroll for (int masking_step = 0; masking_step < n_masking_steps; ++masking_step, --n_block) { Tensor acc_s = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kBlockN>>{}); // (MMA=4, MMA_M, MMA_N) clear(acc_s); flash::cp_async_wait<0>(); __syncthreads(); // Advance gV if (masking_step > 0) { tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_thr_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV); } else { // Clear the smem tiles to account for predicated off loads flash::copy<Is_even_N, Is_even_K, /*Clear_OOB_MN=*/true>( gmem_thr_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV, binfo.actual_seqlen_k - n_block * kBlockN ); } cute::cp_async_fence(); flash::gemm</*A_in_regs=*/Kernel_traits::Is_Q_in_regs>( acc_s, tSrQ, tSrK, tSsQ, tSsK, tiled_mma, smem_thr_copy_Q, smem_thr_copy_K ); // if (cute::thread0()) { print(acc_s); } // Reshape acc_s from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout())); // if (cute::thread0()) { print(scores); } // We don't put the masking before the matmul S = Q K^T because we don't clear sK // for rows outside actual_seqlen_k. So those rows could have Inf / NaN, and the matmul // can produce Inf / NaN. if (!Is_causal) { if (!Is_even_N) { flash::apply_mask(scores, binfo.actual_seqlen_k - n_block * kBlockN); } } else { // Tensor caccS = make_identity_tensor(Shape<Int<kBlockM>, Int<kBlockN>>{}); // (BLK_M,BLK_N) -> (blk_m,blk_n) // Tensor taccScS = thr_mma.partition_C(caccS); // (MMA,MMA_M,MMA_N) // static_assert(decltype(size<0>(taccScS))::value == 4); // // Convert to ((2, 2), MMA_M, MMA_N) then take only the row indices. // Tensor idx_row = logical_divide(taccScS, Shape<_2>{})(make_coord(0, _), _, 0); // Tensor idx_rowcol = make_tensor(taccScS.data(), flash::convert_layout_acc_rowcol(taccScS.layout())); // flash::apply_mask_causal_w_idx(scores, idx_rowcol, n_block * kBlockN, binfo.actual_seqlen_k, // m_block * kBlockM); // Idk why it's get<1> and not get<0> of the stride. // if (cute::thread0()) { print(idx_row.layout()); print(stride<1>(idx_row)); printf("stride = %d \n", get<1>(stride<1>(idx_row))); } // I can't get the stride from idx_row flash::apply_mask_causal(scores, n_block * kBlockN, binfo.actual_seqlen_k, // m_block * kBlockM + get<0>(idx_row(0)), m_block * kBlockM + (tidx / 32) * 16 + (tidx % 32) / 4, kNWarps * 16); // m_block * kBlockM + (tidx / 32) * 16, kNWarps * 16); // m_block * kBlockM + (tidx / 32) * (kBlockM / kNWarps), 16); } flash::cp_async_wait<0>(); __syncthreads(); if (n_block > 0) { // Advance gK tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_thr_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); } // TODO: when we have key_padding_mask we'll need to Check_inf masking_step == 0 ? softmax_rescale_o</*Is_first=*/true, /*Check_inf=*/Is_causal>(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2) : softmax_rescale_o</*Is_first=*/false, /*Check_inf=*/Is_causal>(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); // Convert scores from fp32 to fp16/bf16 Tensor rP = flash::convert_type<Element>(scores); // Reshape rP from (nrow=(2, MMA_M), ncol=(2, MMA_N)) to ((2, 2, 2), MMA_M, MMA_N / 2) // if using m16n8k16 or ((2, 2, 1), MMA_M, MMA_N) if using m16n8k8. Tensor tOrP = make_tensor(rP.data(), flash::convert_layout_rowcol_Aregs<Kernel_traits::TiledMma>(rP.layout())); uint32_t block_row_idx = m_block * (kBlockM / 16) + tidx / 32; uint32_t block_col_idx = n_block * (kBlockN / 32); if (Return_softmax) { Tensor tOrP_copy = make_fragment_like(tOrP); copy(tOrP, tOrP_copy); flash::apply_dropout</*encode_dropout_in_sign_bit=*/true>( tOrP_copy, params.p_dropout_in_uint8_t, seed, offset, block_row_idx, block_col_idx, kNWarps ); flash::write_softmax_to_gmem(tOrP_copy, tPgP, gmem_thr_copy_P); tPgP.data() = tPgP.data() + (-kBlockN); } if (Is_dropout) { flash::apply_dropout(tOrP, params.p_dropout_in_uint8_t, seed, offset, block_row_idx, block_col_idx, kNWarps); } // if (cute::thread0()) { print(tOrP); } flash::gemm_A_in_regs(acc_o, tOrP, tOrVt, tOsVt, tiled_mma, smem_thr_copy_V); // if (cute::thread0()) { print(scores); } // This check is at the end of the loop since we always have at least 1 iteration if (n_masking_steps > 1 && n_block <= 0) { --n_block; break; } } // These are the iterations where we don't need masking on S for (; n_block >= 0; --n_block) { Tensor acc_s = partition_fragment_C(tiled_mma, Shape<Int<kBlockM>, Int<kBlockN>>{}); // (MMA=4, MMA_M, MMA_N) clear(acc_s); flash::cp_async_wait<0>(); __syncthreads(); // Advance gV tVgV.data() = tVgV.data() + (-int(kBlockN * params.v_row_stride)); flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_thr_copy_QKV, tVgV, tVsV, tKVcKV, tKVpKV); cute::cp_async_fence(); flash::gemm</*A_in_regs=*/Kernel_traits::Is_Q_in_regs>( acc_s, tSrQ, tSrK, tSsQ, tSsK, tiled_mma, smem_thr_copy_Q, smem_thr_copy_K ); flash::cp_async_wait<0>(); __syncthreads(); if (n_block > 0) { // Advance gK tKgK.data() = tKgK.data() + (-int(kBlockN * params.k_row_stride)); flash::copy</*Is_even_MN=*/true, Is_even_K>(gmem_thr_copy_QKV, tKgK, tKsK, tKVcKV, tKVpKV); // This cp_async_fence needs to be in the if block, otherwise the synchronization // isn't right and we get race conditions. cute::cp_async_fence(); } // Reshape acc_s from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) Tensor scores = make_tensor(acc_s.data(), flash::convert_layout_acc_rowcol(acc_s.layout())); softmax_rescale_o</*Is_first=*/false>(scores, scores_max, scores_sum, acc_o, params.scale_softmax_log2); Tensor rP = flash::convert_type<Element>(scores); // Reshape rP from (nrow=(2, MMA_M), ncol=(2, MMA_N)) to ((2, 2, 2), MMA_M, MMA_N / 2) // if using m16n8k16 or ((2, 2, 1), MMA_M, MMA_N) if using m16n8k8. Tensor tOrP = make_tensor(rP.data(), flash::convert_layout_rowcol_Aregs<Kernel_traits::TiledMma>(rP.layout())); uint32_t block_row_idx = m_block * (kBlockM / 16) + tidx / 32; uint32_t block_col_idx = n_block * (kBlockN / 32); if (Return_softmax) { Tensor tOrP_copy = make_fragment_like(tOrP); copy(tOrP, tOrP_copy); flash::apply_dropout</*encode_dropout_in_sign_bit=*/true>( tOrP_copy, params.p_dropout_in_uint8_t, seed, offset, block_row_idx, block_col_idx, kNWarps ); flash::write_softmax_to_gmem(tOrP_copy, tPgP, gmem_thr_copy_P); tPgP.data() = tPgP.data() + (-kBlockN); } if (Is_dropout) { flash::apply_dropout(tOrP, params.p_dropout_in_uint8_t, seed, offset, block_row_idx, block_col_idx, kNWarps); } flash::gemm_A_in_regs(acc_o, tOrP, tOrVt, tOsVt, tiled_mma, smem_thr_copy_V); } // Epilogue // Reshape acc_o from (MMA=4, MMA_M, MMA_K) to (nrow=(2, MMA_M), ncol=(2, MMA_K)) Tensor acc_o_rowcol = make_tensor(acc_o.data(), flash::convert_layout_acc_rowcol(acc_o.layout())); Tensor lse = make_fragment_like(scores_sum); #pragma unroll for (int mi = 0; mi < size<0>(acc_o_rowcol); ++mi) { float sum = scores_sum(mi); float inv_sum = (sum == 0.f || sum != sum) ? 1.f : 1.f / sum; lse(mi) = (sum == 0.f || sum != sum) ? INFINITY : scores_max(mi) * params.scale_softmax + __logf(sum); float scale = !Is_dropout ? inv_sum : inv_sum * params.rp_dropout; #pragma unroll for (int ni = 0; ni < size<1>(acc_o_rowcol); ++ni) { acc_o_rowcol(mi, ni) *= scale; } } // if (cute::thread0()) { print(acc_o_rowcol); } // Convert acc_o from fp32 to fp16/bf16 Tensor rO = flash::convert_type<Element>(acc_o); Tensor sO = make_tensor(sQ.data(), typename Kernel_traits::SmemLayoutO{}); // (SMEM_M,SMEM_N) // Partition sO to match the accumulator partitioning auto smem_thr_copy_O = make_tiled_copy_C(typename Kernel_traits::SmemCopyAtomO{}, tiled_mma).get_thread_slice(tidx); // auto smem_thr_copy_O = make_tiled_copy_C_warpcontiguousM<MMA_M>(typename Kernel_traits::SmemCopyAtomO{}, tiled_mma).get_thread_slice(tidx); Tensor taccOrO = smem_thr_copy_O.retile_S(rO); // ((Atom,AtomNum), MMA_M, MMA_N) Tensor taccOsO = smem_thr_copy_O.partition_D(sO); // ((Atom,AtomNum),PIPE_M,PIPE_N) // sO has the same size as sQ, so we don't need to sync here. if (Kernel_traits::Share_Q_K_smem) { __syncthreads(); } copy(smem_thr_copy_O, taccOrO, taccOsO); const index_t row_offset_o = binfo.q_offset(params.o_batch_stride, params.o_row_stride, bidb) + m_block * kBlockM * params.o_row_stride + bidh * params.o_head_stride; const index_t row_offset_lse = (bidb * params.h + bidh) * params.seqlen_q + m_block * kBlockM; Tensor gO = make_tensor(make_gmem_ptr(reinterpret_cast<Element *>(params.o_ptr) + row_offset_o), Shape<Int<kBlockM>, Int<kHeadDim>>{}, make_stride(params.o_row_stride, _1{})); Tensor gLSE = make_tensor(make_gmem_ptr(reinterpret_cast<ElementAccum *>(params.softmax_lse_ptr) + row_offset_lse), Shape<Int<kBlockM>>{}, Stride<_1>{}); auto gmem_thr_copy_O = typename Kernel_traits::GmemTiledCopyO{}.get_thread_slice(tidx); Tensor tOsO = gmem_thr_copy_O.partition_S(sO); // ((Atom,AtomNum),ATOM_M,ATOM_N) Tensor tOgO = gmem_thr_copy_O.partition_D(gO); __syncthreads(); Tensor tOrO = make_tensor<Element>(shape(tOgO)); copy(gmem_thr_copy_O, tOsO, tOrO); Tensor caccO = make_identity_tensor(Shape<Int<kBlockM>, Int<kHeadDim>>{}); // (BLK_M,BLK_K) -> (blk_m,blk_k) Tensor taccOcO = thr_mma.partition_C(caccO); // (MMA,MMA_M,MMA_K) static_assert(decltype(size<0>(taccOcO))::value == 4); // Convert to ((2, 2), MMA_M, MMA_K) then take only the row indices. Tensor taccOcO_row = logical_divide(taccOcO, Shape<_2>{})(make_coord(0, _), _, 0); CUTE_STATIC_ASSERT_V(size(lse) == size(taccOcO_row)); // MMA_M if (get<1>(taccOcO_row(0)) == 0) { #pragma unroll for (int mi = 0; mi < size(lse); ++mi) { const int row = get<0>(taccOcO_row(mi)); if (row < binfo.actual_seqlen_q - m_block * kBlockM) { gLSE(row) = lse(mi); } } } // Construct identity layout for sO Tensor cO = make_identity_tensor(make_shape(size<0>(sO), size<1>(sO))); // (BLK_M,BLK_K) -> (blk_m,blk_k) // Repeat the partitioning with identity layouts Tensor tOcO = gmem_thr_copy_O.partition_D(cO); // (ACPY,ACPY_M,ACPY_K) -> (blk_m,blk_k) Tensor tOpO = make_tensor<bool>(make_shape(size<2>(tOgO))); if (!Is_even_K) { #pragma unroll for (int k = 0; k < size(tOpO); ++k) { tOpO(k) = get<1>(tOcO(0, 0, k)) < params.d; } } // Clear_OOB_K must be false since we don't want to write zeros to gmem flash::copy</*Is_even_MN=*/false, Is_even_K, /*Clear_OOB_MN=*/false, /*Clear_OOB_K=*/false>( gmem_thr_copy_O, tOrO, tOgO, tOcO, tOpO, binfo.actual_seqlen_q - m_block * kBlockM ); } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Is_even_N, bool Is_even_K, bool Return_softmax, typename Params> inline __device__ void compute_attn(const Params &params) { const int m_block = blockIdx.x; // The block index for the batch. const int bidb = blockIdx.y; // The block index for the head. const int bidh = blockIdx.z; // We want the fwd and bwd to generate the same dropout pattern (RNG), without restricting // them to have the same number of threads or have to traverse the attention matrix // in the same order. // In the Philox RNG, we use the offset to store the batch, head, and the lane id // (within a warp). We use the subsequence to store the location of the 16 x 32 blocks within // the attention matrix. This way, as long as we have the batch, head, and the location of // the 16 x 32 block within the attention matrix, we can generate the exact same dropout pattern. flash::compute_attn_1rowblock<Kernel_traits, Is_dropout, Is_causal, Is_even_N, Is_even_K, Return_softmax>(params, bidb, bidh, m_block); } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/utils.h
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once #include <assert.h> #include <stdint.h> #include <stdlib.h> #include <cuda_fp16.h> #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 #include <cuda_bf16.h> #endif #include <cute/algorithm/copy.hpp> #include <cute/algorithm/gemm.hpp> #include <cutlass/array.h> #include <cutlass/cutlass.h> #include <cutlass/numeric_conversion.h> #include <cutlass/numeric_types.h> //////////////////////////////////////////////////////////////////////////////////////////////////// namespace flash { //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> inline __device__ uint32_t relu2(const uint32_t x); template<> inline __device__ uint32_t relu2<cutlass::half_t>(const uint32_t x) { uint32_t res; const uint32_t zero = 0u; #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 asm volatile("max.f16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero)); #else asm volatile( \ "{\n" \ "\t .reg .f16x2 sela;\n" \ "\t set.gtu.u32.f16x2 sela, %1, %2;\n" \ "\t and.b32 %0, sela, %1;\n" "}\n" : "=r"(res) : "r"(x), "r"(zero)); #endif return res; } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 template<> inline __device__ uint32_t relu2<cutlass::bfloat16_t>(const uint32_t x) { uint32_t res; const uint32_t zero = 0u; asm volatile("max.bf16x2 %0, %1, %2;\n" : "=r"(res) : "r"(x), "r"(zero)); return res; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 template<typename T> inline __device__ uint32_t convert_relu2(const float2 x); template<> inline __device__ uint32_t convert_relu2<cutlass::half_t>(const float2 x) { uint32_t res; const uint32_t a = reinterpret_cast<const uint32_t&>(x.x); const uint32_t b = reinterpret_cast<const uint32_t&>(x.y); asm volatile("cvt.rn.relu.f16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a)); return res; } template<> inline __device__ uint32_t convert_relu2<cutlass::bfloat16_t>(const float2 x) { uint32_t res; const uint32_t a = reinterpret_cast<const uint32_t&>(x.x); const uint32_t b = reinterpret_cast<const uint32_t&>(x.y); asm volatile("cvt.rn.relu.bf16x2.f32 %0, %1, %2;\n" : "=r"(res) : "r"(b), "r"(a)); return res; } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> inline __device__ float2 half2_unpack(uint32_t a); template <> inline __device__ float2 half2_unpack<__half>(uint32_t a) { return __half22float2(reinterpret_cast<__half2 (&)>(a)); } #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 template <> inline __device__ float2 half2_unpack<__nv_bfloat16>(uint32_t a) { return __bfloat1622float2(reinterpret_cast<__nv_bfloat162 (&)>(a)); } #endif //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert two half2's or bf162's into float, then take their dot product. template <typename T> inline __device__ float hfma2_to_float(const uint32_t a, const uint32_t b) { float2 af = flash::half2_unpack<T>(a); float2 bf = flash::half2_unpack<T>(b); return af.x * bf.x + af.y * bf.y; } //////////////////////////////////////////////////////////////////////////////////////////////////// // Converted two vectors of 8 half's or bf16's into float, then take their dot product. template<typename T> inline __device__ float hmulsum8(const uint4 a, const uint4 b) { float sum; sum = flash::hfma2_to_float<T>(a.x, b.x); sum += flash::hfma2_to_float<T>(a.y, b.y); sum += flash::hfma2_to_float<T>(a.z, b.z); sum += flash::hfma2_to_float<T>(a.w, b.w); return sum; } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> struct MaxOp { __device__ inline T operator()(T const & x, T const & y) { return x > y ? x : y; } }; template <> struct MaxOp<float> { // This is slightly faster __device__ inline float operator()(float const &x, float const &y) { return max(x, y); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename T> struct SumOp { __device__ inline T operator()(T const & x, T const & y) { return x + y; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<int THREADS> struct Allreduce { static_assert(THREADS == 32 || THREADS == 16 || THREADS == 8 || THREADS == 4); template<typename T, typename Operator> static __device__ inline T run(T x, Operator &op) { constexpr int OFFSET = THREADS / 2; x = op(x, __shfl_xor_sync(uint32_t(-1), x, OFFSET)); return Allreduce<OFFSET>::run(x, op); } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<> struct Allreduce<2> { template<typename T, typename Operator> static __device__ inline T run(T x, Operator &op) { x = op(x, __shfl_xor_sync(uint32_t(-1), x, 1)); return x; } }; //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool A_in_regs=false, bool B_in_regs=false, typename Tensor0, typename Tensor1, typename Tensor2, typename Tensor3, typename Tensor4, typename TiledMma, typename TiledCopy0, typename TiledCopy1> inline __device__ void gemm(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsA, Tensor4 const& tCsB, TiledMma tiled_mma, TiledCopy0 smem_thr_copy_A, TiledCopy1 smem_thr_copy_B) { CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K Tensor tCrA_copy_view = smem_thr_copy_A.retile_D(tCrA); CUTE_STATIC_ASSERT_V(size<1>(tCsA) == size<1>(tCrA_copy_view)); // M Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N if (!A_in_regs) { copy(smem_thr_copy_A, tCsA(_, _, _0{}), tCrA_copy_view(_, _, _0{})); } if (!B_in_regs) { copy(smem_thr_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{})); } #pragma unroll for (int i = 0; i < size<2>(tCrA); ++i) { if (i < size<2>(tCrA) - 1) { if (!A_in_regs) { copy(smem_thr_copy_A, tCsA(_, _, i + 1), tCrA_copy_view(_, _, i + 1)); } if (!B_in_regs) { copy(smem_thr_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1)); } } cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc); } } //////////////////////////////////////////////////////////////////////////////////////////////////// template<typename Tensor0, typename Tensor1, typename Tensor2, typename Tensor3, typename TiledMma, typename TiledCopy> inline __device__ void gemm_A_in_regs(Tensor0 &acc, Tensor1 &tCrA, Tensor2 &tCrB, Tensor3 const& tCsB, TiledMma tiled_mma, TiledCopy smem_thr_copy_B) { CUTE_STATIC_ASSERT_V(size<1>(tCrA) == size<1>(acc)); // MMA_M CUTE_STATIC_ASSERT_V(size<1>(tCrB) == size<2>(acc)); // MMA_N CUTE_STATIC_ASSERT_V(size<2>(tCrA) == size<2>(tCrB)); // MMA_K Tensor tCrB_copy_view = smem_thr_copy_B.retile_D(tCrB); CUTE_STATIC_ASSERT_V(size<1>(tCsB) == size<1>(tCrB_copy_view)); // N copy(smem_thr_copy_B, tCsB(_, _, _0{}), tCrB_copy_view(_, _, _0{})); #pragma unroll for (int i = 0; i < size<2>(tCrA); ++i) { if (i < size<2>(tCrA) - 1) { copy(smem_thr_copy_B, tCsB(_, _, i + 1), tCrB_copy_view(_, _, i + 1)); } cute::gemm(tiled_mma, tCrA(_, _, i), tCrB(_, _, i), acc); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert acc_layout from (MMA=4, MMA_M, MMA_N) to (nrow=(2, MMA_M), ncol=(2, MMA_N)) template<typename Layout> inline __device__ auto convert_layout_acc_rowcol(Layout acc_layout) { static_assert(decltype(size<0>(acc_layout))::value == 4); static_assert(decltype(rank(acc_layout))::value == 3); auto l = logical_divide(acc_layout, Shape<_2>{}); // ((2, 2), MMA_M, MMA_N) return make_layout(make_layout(get<0, 1>(l), get<1>(l)), make_layout(get<0, 0>(l), get<2>(l))); }; //////////////////////////////////////////////////////////////////////////////////////////////////// // Convert rowcol_layout from (nrow=(2, MMA_M), ncol=(2, MMA_N)) to ((2, 2, 2), MMA_M, MMA_N / 2) // if using m16n8k16, or to ((2, 2, 1), MMA_M, MMA_N) if using m16n8k8. template<typename MMA_traits, typename Layout> inline __device__ auto convert_layout_rowcol_Aregs(Layout rowcol_layout) { using X = Underscore; static_assert(decltype(size<0, 0>(rowcol_layout))::value == 2); static_assert(decltype(size<1, 0>(rowcol_layout))::value == 2); constexpr int mma_shape_K = get<2>(typename MMA_traits::Shape_MNK{}); static_assert(mma_shape_K == 8 || mma_shape_K == 16); constexpr int MMA_N_divisor = mma_shape_K == 8 ? 1 : 2; auto l = logical_divide(rowcol_layout, Shape<X, Shape<X, Int<MMA_N_divisor>>>{}); // ((2, MMA_M), (2, (2, MMA_N / 2))) return make_layout(make_layout(get<1, 0>(l), get<0, 0>(l), get<1, 1, 0>(l)), get<0, 1>(l), get<1, 1, 1>(l)); }; //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename To_type, typename Engine, typename Layout> inline __device__ auto convert_type(Tensor<Engine, Layout> const &tensor) { using From_type = typename Engine::value_type; constexpr int numel = decltype(size(tensor))::value; cutlass::NumericArrayConverter<To_type, From_type, numel> convert_op; // HACK: this requires tensor to be "contiguous" auto frag = convert_op(*reinterpret_cast<const cutlass::Array<From_type, numel> *>(tensor.data())); return make_tensor(make_rmem_ptr<To_type>(&frag), tensor.layout()); } //////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Engine, typename Layout> inline __device__ void relu_(Tensor<Engine, Layout> &tensor) { constexpr int numel = decltype(size(tensor))::value; static_assert(numel % 2 == 0); using value_t = typename Engine::value_type; // HACK: this requires tensor to be "contiguous" Tensor tensor_uint32 = recast<uint32_t>(tensor); #pragma unroll for (int i = 0; i < size(tensor_uint32); ++i) { tensor_uint32(i) = relu2<value_t>(tensor_uint32(i)); } } //////////////////////////////////////////////////////////////////////////////////////////////////// // On SM80 and above, we can fuse fp32 -> fp16/bf16 conversion and relu into 1 instruction template <typename To_type, typename Engine, typename Layout> inline __device__ auto convert_type_relu(Tensor<Engine, Layout> const &tensor) { using From_type = typename Engine::value_type; static_assert(std::is_same_v<To_type, cutlass::half_t> || std::is_same_v<To_type, cutlass::bfloat16_t>); static_assert(std::is_same_v<float, From_type>); constexpr int numel = decltype(size(tensor))::value; static_assert(numel % 2 == 0); #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 800 // HACK: this requires tensor to be "contiguous" Tensor tensor_float2 = recast<float2>(tensor); Tensor out_uint32 = make_tensor<uint32_t>(tensor_float2.layout()); #pragma unroll for (int i = 0; i < size(out_uint32); ++i) { out_uint32(i) = convert_relu2<To_type>(tensor_float2(i)); } Tensor out = make_tensor(make_rmem_ptr<To_type>(out_uint32.data()), tensor.layout()); #else Tensor out = flash::convert_type<To_type>(tensor); flash::relu_(out); #endif return out; } //////////////////////////////////////////////////////////////////////////////////////////////////// // Blocks until all but N previous cp.async.commit_group operations have committed. // This differs from cute::cp_async_wait in that when N = 0 we don't call cp.async.wait_all // (which is equivalent to commit_group then wait_group 0). // Instead we just call cp.async.wait_group 0, which is slightly faster. // https://github.com/NVIDIA/cutlass/blob/master/include/cute/arch/copy_sm80.hpp#L113 template <int N> CUTE_HOST_DEVICE void cp_async_wait() { #if defined(CUTE_ARCH_CP_ASYNC_SM80_ENABLED) asm volatile("cp.async.wait_group %0;\n" :: "n"(N)); #endif } //////////////////////////////////////////////////////////////////////////////////////////////////// template <bool Is_even_MN=true, bool Is_even_K=true, bool Clear_OOB_MN=false, bool Clear_OOB_K=true, typename TiledCopy, typename Engine0, typename Layout0, typename Engine1, typename Layout1, typename Engine2, typename Layout2, typename Engine3, typename Layout3> inline __device__ void copy(TiledCopy thr_copy, Tensor<Engine0, Layout0> const &S, Tensor<Engine1, Layout1> &D, Tensor<Engine2, Layout2> const &identity_MN, Tensor<Engine3, Layout3> const &predicate_K, int max_MN=0) { CUTE_STATIC_ASSERT_V(rank(S) == Int<3>{}); CUTE_STATIC_ASSERT_V(rank(D) == Int<3>{}); CUTE_STATIC_ASSERT_V(size<0>(S) == size<0>(D)); // MMA CUTE_STATIC_ASSERT_V(size<1>(S) == size<1>(D)); // MMA_M CUTE_STATIC_ASSERT_V(size<2>(S) == size<2>(D)); // MMA_K // There's no case where !Clear_OOB_K && Clear_OOB_MN static_assert(!(Clear_OOB_MN && !Clear_OOB_K)); #pragma unroll for (int m = 0; m < size<1>(S); ++m) { if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { #pragma unroll for (int k = 0; k < size<2>(S); ++k) { if (Is_even_K || predicate_K(k)) { copy(thr_copy, S(_, m, k), D(_, m, k)); } else if (Clear_OOB_K) { clear(D(_, m, k)); } } } else if (Clear_OOB_MN) { clear(D(_, m, _)); } } // TD [2023-04-13]: Strange that the code below can cause race condition. // I think it's because the copies are under an if statement. // if (Is_even_K) { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { // copy(thr_copy, S(_, m, _), D(_, m, _)); // } else if (Clear_OOB_MN) { // clear(D(_, m, _)); // } // } // } else { // It's slightly faster in this case if iterate over K first // #pragma unroll // for (int k = 0; k < size<2>(S); ++k) { // if (predicate_K(k)) { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN) { // copy(thr_copy, S(_, m, k), D(_, m, k)); // } else if (Clear_OOB_MN) { // clear(D(_, m, k)); // } // } // } else if (Clear_OOB_K) { // There's no case where !Clear_OOB_K && Clear_OOB_MN // if (Clear_OOB_MN || Is_even_MN) { // clear(D(_, _, k)); // } else { // #pragma unroll // for (int m = 0; m < size<1>(S); ++m) { // if (!(Is_even_MN || get<0>(identity_MN(0, m, 0)) < max_MN)) { // clear(D(_, m, k)); // } // } // } // } // } // } } //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim224_bf16_sm80.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include "flash_fwd_launch_template.h" template<> void run_mha_fwd_<cutlass::bfloat16_t, 224>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim224<cutlass::bfloat16_t>(params, stream); }
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/block_info.h
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once namespace flash { //////////////////////////////////////////////////////////////////////////////////////////////////// template<bool Varlen=true> struct BlockInfo { template<typename Params> __device__ BlockInfo(const Params &params, const int bidb) : sum_s_q(!Varlen || params.cu_seqlens_q == nullptr ? -1 : params.cu_seqlens_q[bidb]) , sum_s_k(!Varlen || params.cu_seqlens_k == nullptr ? -1 : params.cu_seqlens_k[bidb]) , actual_seqlen_q(!Varlen || params.cu_seqlens_q == nullptr ? params.seqlen_q : params.cu_seqlens_q[bidb + 1] - sum_s_q) , actual_seqlen_k(!Varlen || params.cu_seqlens_k == nullptr ? params.seqlen_k : params.cu_seqlens_k[bidb + 1] - sum_s_k) { } template <typename index_t> inline __device__ index_t q_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const { return sum_s_q == -1 ? bidb * batch_stride : uint32_t(sum_s_q) * row_stride; } template <typename index_t> inline __device__ index_t k_offset(const index_t batch_stride, const index_t row_stride, const int bidb) const { return sum_s_k == -1 ? bidb * batch_stride : uint32_t(sum_s_k) * row_stride; } const int sum_s_q; const int sum_s_k; const uint32_t actual_seqlen_q; const uint32_t actual_seqlen_k; }; //////////////////////////////////////////////////////////////////////////////////////////////////// } // namespace flash
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_api.cu
#include "flash_fwd_launch_template.h" // void run_mha_fwd(Flash_fwd_params &params, cudaStream_t stream) { // FWD_HEADDIM_SWITCH(params.d, [&] { // run_mha_fwd_<cutlass::half_t, kHeadDim>(params, stream); // }); // } void run_mha_fwd(Flash_fwd_params &params, cudaStream_t stream) { FP16_SWITCH(!params.is_bf16, [&] { FWD_HEADDIM_SWITCH(params.d, [&] { run_mha_fwd_<elem_type, kHeadDim>(params, stream); }); }); } extern "C" void run_mha( void *q_ptr, void *k_ptr, void *v_ptr, void *o_ptr, void *softmax_lse_ptr, int32_t *cu_seqlens_q_ptr, int32_t *cu_seqlens_k_ptr, uint32_t q_batch_stride, uint32_t k_batch_stride, uint32_t v_batch_stride, uint32_t o_batch_stride, uint32_t q_row_stride, uint32_t k_row_stride, uint32_t v_row_stride, uint32_t o_row_stride, uint32_t q_head_stride, uint32_t k_head_stride, uint32_t v_head_stride, uint32_t o_head_stride, uint32_t b, uint32_t h, uint32_t h_k, uint32_t d, uint32_t d_rounded, float softmax_scale, uint32_t seqlen_q, uint32_t seqlen_k, uint32_t seqlen_q_rounded, uint32_t seqlen_k_rounded, int is_causal, int is_bf16 ) { Flash_fwd_params params; // Reset the parameters memset(&params, 0, sizeof(params)); // Set the pointers and strides. params.q_ptr = q_ptr; params.k_ptr = k_ptr; params.v_ptr = v_ptr; params.o_ptr = o_ptr; params.softmax_lse_ptr = softmax_lse_ptr; // All stride are in elements, not bytes. params.q_batch_stride = q_batch_stride; params.k_batch_stride = k_batch_stride; params.v_batch_stride = v_batch_stride; params.o_batch_stride = o_batch_stride; params.q_row_stride = q_row_stride; params.k_row_stride = k_row_stride; params.v_row_stride = v_row_stride; params.o_row_stride = o_row_stride; params.q_head_stride = q_head_stride; params.k_head_stride = k_head_stride; params.v_head_stride = v_head_stride; params.o_head_stride = o_head_stride; // Set the dimensions. params.b = b; params.h = h; params.h_k = h_k; params.h_h_k_ratio = h / h_k; params.seqlen_q = seqlen_q; params.seqlen_k = seqlen_k; params.seqlen_q_rounded = seqlen_q_rounded; params.seqlen_k_rounded = seqlen_k_rounded; params.d = d; params.d_rounded = d_rounded; params.is_causal = is_causal; // Set the different scale values. params.scale_softmax = softmax_scale; params.scale_softmax_log2 = softmax_scale * M_LOG2E; params.p_dropout = 1.; // probability to keep params.p_dropout_in_uint8_t = uint8_t(std::floor(params.p_dropout * 255.0)); params.rp_dropout = 1.f / params.p_dropout; params.scale_softmax_rp_dropout = params.rp_dropout * params.scale_softmax; params.is_bf16 = is_bf16; params.cu_seqlens_q = cu_seqlens_q_ptr; params.cu_seqlens_k = cu_seqlens_k_ptr; params.p_ptr = nullptr; // used for `return_softmax`. cudaStream_t stream = 0; // Use the default stream. run_mha_fwd(params, stream); }
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim128_bf16_sm80.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include "flash_fwd_launch_template.h" // template<> // void run_mha_fwd_<cutlass::bfloat16_t, 128>(Flash_fwd_params &params, cudaStream_t stream) { // using elem_type = cutlass::bfloat16_t; // if (params.p_dropout == 1.f) { // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream); // } else { // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream); // } // } template<> void run_mha_fwd_<cutlass::bfloat16_t, 128>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim128<cutlass::bfloat16_t>(params, stream); }
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim32_fp16_sm80.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include "flash_fwd_launch_template.h" // template<> // void run_mha_fwd_<cutlass::half_t, 32>(Flash_fwd_params &params, cudaStream_t stream) { // using elem_type = cutlass::half_t; // BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { // run_flash_fwd<Flash_fwd_kernel_traits<32, 128, 128, 4, false, false, elem_type>, Is_dropout>(params, stream); // // For dropout there might be a lot of register spilling? // // These two are very slow due to register spilling // // run_flash_fwd<Flash_fwd_kernel_traits<32, 256, 128, 4, false, elem_type>>(params, stream); // // run_flash_fwd<Flash_fwd_kernel_traits<32, 128, 256, 4, false, elem_type>>(params, stream); // // This one is slightly slower // // run_flash_fwd<Flash_fwd_kernel_traits<32, 256, 64, 4, false, elem_type>>(params, stream); // }); // } template<> void run_mha_fwd_<cutlass::half_t, 32>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim32<cutlass::half_t>(params, stream); }
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_launch_template.h
/****************************************************************************** * Copyright (c) 2023, Tri Dao. ******************************************************************************/ #pragma once // #include <ATen/cuda/CUDAContext.h> #include "static_switch.h" #include "flash.h" #include "flash_fwd_kernel.h" template<typename Kernel_traits, bool Is_dropout, bool Is_causal, bool Is_even_N, bool Is_even_K, bool Return_softmax> __global__ void flash_fwd_kernel(Flash_fwd_params params) { flash::compute_attn<Kernel_traits, Is_dropout, Is_causal, Is_even_N, Is_even_K, Return_softmax>(params); } template<typename Kernel_traits, bool Is_dropout, bool Is_causal> void run_flash_fwd(Flash_fwd_params &params, cudaStream_t stream) { constexpr size_t smem_size = Kernel_traits::kSmemSize; // printf("smem_size = %d\n", smem_size); // Work-around for gcc 7. It doesn't like nested BOOL_SWITCH. // https://github.com/kokkos/kokkos-kernels/issues/349 // https://github.com/HazyResearch/flash-attention/issues/21 const int num_m_block = (params.seqlen_q + Kernel_traits::kBlockM - 1) / Kernel_traits::kBlockM; dim3 grid(num_m_block, params.b, params.h); // We also use is_even_N to set Unpadded in the BlockInfo constructor, so we need to check // for cu_seqlens_q as well. const bool is_even_N = params.cu_seqlens_q == nullptr && params.cu_seqlens_k == nullptr && params.seqlen_k % Kernel_traits::kBlockN == 0; const bool is_even_K = params.d == Kernel_traits::kHeadDim; const bool return_softmax = params.p_ptr != nullptr; BOOL_SWITCH(is_even_N, IsEvenNConst, [&] { BOOL_SWITCH(is_even_K, IsEvenKConst, [&] { BOOL_SWITCH(return_softmax, ReturnSoftmaxConst, [&] { // Will only return softmax if dropout, to reduce compilation time. auto kernel = &flash_fwd_kernel<Kernel_traits, Is_dropout, Is_causal, IsEvenNConst, IsEvenKConst, ReturnSoftmaxConst && Is_dropout>; // auto kernel = &flash_fwd_kernel<Kernel_traits, Is_dropout, Is_causal, IsEvenNConst, true, ReturnSoftmaxConst && Is_dropout>; // if (smem_size >= 48 * 1024) { // C10_CUDA_CHECK(cudaFuncSetAttribute( // kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size)); // } int ctas_per_sm; cudaError status_ = cudaOccupancyMaxActiveBlocksPerMultiprocessor( &ctas_per_sm, kernel, Kernel_traits::kNThreads, smem_size); // printf("smem_size = %d, CTAs per SM = %d\n", int(smem_size), ctas_per_sm); kernel<<<grid, Kernel_traits::kNThreads, smem_size, stream>>>(params); // C10_CUDA_KERNEL_LAUNCH_CHECK(); }); }); }); } template<typename T> void run_mha_fwd_hdim32(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 32; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim64(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 64; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if constexpr(!Is_dropout) { // Using 8 warps is 18% slower for seqlen=2k, 2 warps is 5% slower // Using block size (64 x 256) is 27% slower for seqlen=2k // Using block size (256 x 64) is 85% slower for seqlen=2k, because of register spilling run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } }); }); } template<typename T> void run_mha_fwd_hdim96(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 96; // auto dprops = at::cuda::getCurrentDeviceProperties(); bool is_sm8x = true; // dprops->major == 8 && dprops->minor > 0; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // These two are always slower // run_flash_fwd<Flash_fwd_kernel_traits<96, 128, 128, 4, true, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<96, 64, 128, 4, true, T>>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim128(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 128; // auto dprops = at::cuda::getCurrentDeviceProperties(); bool is_sm8x = true; // dprops->major == 8 && dprops->minor > 0; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if constexpr(!Is_dropout) { // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), // and 128 x 32 (48 KB smem) is the fastest for non-causal since we get 2 CTAs per SM. if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // 1st ones are good for H100, A100 // 2nd one is good for A6000 bc we get slightly better occupancy } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, true, true, T>, Is_dropout, Is_causal>(params, stream); } }); }); } template<typename T> void run_mha_fwd_hdim160(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 160; // auto dprops = at::cuda::getCurrentDeviceProperties(); bool is_sm8x = true; // dprops->major == 8 && dprops->minor > 0; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For A100, H100, 128 x 32 is the fastest. // For sm86 or sm89, 64 x 64 is the fastest for causal (because it's square), // and 128 x 64 with 8 warps is the fastest for non-causal. if (is_sm8x) { if constexpr(!Is_causal) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, true, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim192(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 192; BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if constexpr(!Is_dropout) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 128, 4, false, T>>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 128, 8, false, T>>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim224(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 224; int device; cudaGetDevice(&device); int max_smem_per_block; cudaError status_ = cudaDeviceGetAttribute( &max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); // printf("max_smem_per_block = %d\n", max_smem_per_block); BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64)) { // 112 KB run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // We can't do 128 x 32 with 8 warps because with headdim 224, kBlockKSmem = 32. // If we have N = 32, there are only 1024 elements to load at once, where each load // is 8 elements. This means we can only use 128 threads and not 256 threads. // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); }); }); } template<typename T> void run_mha_fwd_hdim256(Flash_fwd_params &params, cudaStream_t stream) { constexpr int Headdim = 256; int device; cudaGetDevice(&device); int max_smem_per_sm, max_smem_per_block; cudaError status_ = cudaDeviceGetAttribute( &max_smem_per_sm, cudaDevAttrMaxSharedMemoryPerMultiprocessor, device); status_ = cudaDeviceGetAttribute( &max_smem_per_block, cudaDevAttrMaxSharedMemoryPerBlockOptin, device); // printf("max_smem_per_sm = %d, max_smem_per_block = %d\n", max_smem_per_sm, max_smem_per_block); BOOL_SWITCH(params.p_dropout < 1.f, Is_dropout, [&] { BOOL_SWITCH(params.is_causal, Is_causal, [&] { // For A100, we want to run with 128 x 64 (128KB smem). // For H100 we want to run with 64 x 64 (96KB smem) since then we can get 2 CTAs per SM. if (max_smem_per_block >= 2 * Headdim * (128 + 2 * 64) && max_smem_per_sm < 4 * Headdim * (64 + 2 * 64)) { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 64, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); } else { run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 64, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); } // 64 KB // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 64, 32, 4, false, false, T>, Is_dropout, Is_causal>(params, stream); // 96 KB // run_flash_fwd<Flash_fwd_kernel_traits<Headdim, 128, 32, 8, false, false, T>, Is_dropout, Is_causal>(params, stream); }); }); }
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim32_bf16_sm80.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include "flash_fwd_launch_template.h" template<> void run_mha_fwd_<cutlass::bfloat16_t, 32>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim32<cutlass::bfloat16_t>(params, stream); }
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim128_fp16_sm80.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include "flash_fwd_launch_template.h" // template<> // void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params &params, cudaStream_t stream) { // using elem_type = cutlass::half_t; // if (params.p_dropout == 1.f) { // // Using 8 warps (128 x 128 and 256 x 64) is 28% slower for seqlen=2k // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, false, elem_type>, false>(params, stream); // // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, false, elem_type>, false>(params, stream); // // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, false, true, elem_type>, false>(params, stream); // // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 64, 4, true, true, elem_type>, false>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, false>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 64, 4, false, false, elem_type>, false>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<128, 64, 128, 4, false, false, elem_type>, false>(params, stream); // // 1st ones are good for H100, A100 // // 2nd one is good for A6000 bc we get slightly better occupancy // } else { // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, false, false, elem_type>, true>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, false, elem_type>, true>(params, stream); // run_flash_fwd<Flash_fwd_kernel_traits<128, 128, 32, 4, true, true, elem_type>, true>(params, stream); // // 1st one is good for H100, A100, A6000 // } // } template<> void run_mha_fwd_<cutlass::half_t, 128>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim128<cutlass::half_t>(params, stream); }
0
hf_public_repos/candle/candle-flash-attn
hf_public_repos/candle/candle-flash-attn/kernels/flash_fwd_hdim224_fp16_sm80.cu
// Copyright (c) 2023, Tri Dao. // Splitting the different head dimensions to different files to speed up compilation. #include "flash_fwd_launch_template.h" template<> void run_mha_fwd_<cutlass::half_t, 224>(Flash_fwd_params &params, cudaStream_t stream) { run_mha_fwd_hdim224<cutlass::half_t>(params, stream); }
0