repo_id
stringlengths
15
89
file_path
stringlengths
27
180
content
stringlengths
1
2.23M
__index_level_0__
int64
0
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/pool_tests.rs
use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor}; // https://github.com/huggingface/candle/issues/364 fn avg_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[0.5f32, 1.], [1., 1.]]); let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 2, 8), dev)?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[5. / 4., 6. / 4., 6. / 4., 1.]]); Ok(()) } fn max_pool2d(dev: &Device) -> Result<()> { let data: Vec<f32> = vec![ 1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1., ]; let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2f32, 3.], [5., 1.]]); let t = t.reshape((1, 1, 2, 8))?; let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!(pool.to_vec2::<f32>()?, [[2.0, 3.0, 5.0, 1.0]]); Ok(()) } /* This test corresponds to the following PyTorch script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 4)) print(t.flatten()) res = torch.nn.functional.avg_pool2d(t, 2) print(res) */ fn avg_pool2d_pytorch(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, ], dev, )? .reshape((1, 2, 4, 4))?; let pool = t.avg_pool2d(2)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [ [[-1.1926, -0.0395], [0.2688, 0.1871]], [[0.1835, -0.1606], [0.6249, 0.3217]] ] ); let pool = t.avg_pool2d(3)?.squeeze(0)?; assert_eq!( test_utils::to_vec3_round(&pool, 4)?, [[[0.085]], [[0.0078]]] ); let t = t.reshape((1, 1, 4, 8))?; let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?; assert_eq!( test_utils::to_vec2_round(&pool, 4)?, [ [0.7745, 0.0276, -1.6983, 0.12], [0.3542, 0.1625, 0.4542, -0.0014] ] ); Ok(()) } fn upsample_nearest2d(dev: &Device) -> Result<()> { let t = Tensor::arange(0f32, 6f32, dev)?.reshape((1, 1, 2, 3))?; let upsampled = t.upsample_nearest2d(4, 6)?.i(0)?.i(0)?; assert_eq!( t.i(0)?.i(0)?.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]] ); assert_eq!( upsampled.to_vec2::<f32>()?, [ [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [0.0, 0.0, 1.0, 1.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0], [3.0, 3.0, 4.0, 4.0, 5.0, 5.0] ] ); Ok(()) } test_device!(avg_pool2d, avg_pool2d_cpu, avg_pool2d_gpu, avg_pool2d_metal); test_device!( avg_pool2d_pytorch, avg_pool2d_pytorch_cpu, avg_pool2d_pytorch_gpu, avg_pool2d_pytorch_metal ); test_device!(max_pool2d, max_pool2d_cpu, max_pool2d_gpu, max_pool2d_metal); test_device!( upsample_nearest2d, upsample_nearest2d_cpu, upsample_nearest2d_gpu, upsample_nearest2d_metal );
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/tensor_tests.rs
use candle_core::{test_device, test_utils, DType, Device, IndexOp, Result, Tensor}; fn zeros(device: &Device) -> Result<()> { let tensor = Tensor::zeros((5, 2), DType::F32, device)?; let (dim1, dim2) = tensor.dims2()?; assert_eq!(dim1, 5); assert_eq!(dim2, 2); Ok(()) } fn ones(device: &Device) -> Result<()> { assert_eq!( Tensor::ones((2, 3), DType::U8, device)?.to_vec2::<u8>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::U32, device)?.to_vec2::<u32>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::I64, device)?.to_vec2::<i64>()?, [[1, 1, 1], [1, 1, 1]], ); assert_eq!( Tensor::ones((2, 3), DType::F32, device)?.to_vec2::<f32>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); assert_eq!( Tensor::ones((2, 3), DType::F64, device)?.to_vec2::<f64>()?, [[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]], ); Ok(()) } fn arange(device: &Device) -> Result<()> { assert_eq!( Tensor::arange(0u8, 5u8, device)?.to_vec1::<u8>()?, [0, 1, 2, 3, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 2, device)?.to_vec1::<u8>()?, [0, 2, 4], ); assert_eq!( Tensor::arange_step(0u8, 5u8, 3, device)?.to_vec1::<u8>()?, [0, 3], ); assert_eq!( Tensor::arange_step(5i64, 0i64, -1, device)?.to_vec1::<i64>()?, [5, 4, 3, 2, 1], ); Ok(()) } fn add_mul(device: &Device) -> Result<()> { let tensor = Tensor::new(&[3f32, 1., 4.], device)?; let dim1 = tensor.dims1()?; assert_eq!(dim1, 3); let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [3., 1., 4.]); let tensor = Tensor::add(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [6., 2., 8.]); let tensor = Tensor::mul(&tensor, &tensor)?; let content: Vec<f32> = tensor.to_vec1()?; assert_eq!(content, [36., 4., 64.]); Ok(()) } fn tensor_2d(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content, data); Ok(()) } fn clamp(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?; let tensor = tensor.clamp(1.5, 6.2)?; assert_eq!( tensor.to_vec2::<f32>()?, [[3.0, 1.5, 4.0, 1.5, 5.0], [2.0, 1.5, 6.2, 6.2, 2.0]], ); Ok(()) } fn unary_op(device: &Device) -> Result<()> { let data = &[[-3f32, 1., 4., -0.1, 0.5], [2.7, -1.8, -0.28, 1.8, 2.8]]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.gelu()?, 4)?, [ [-0.0036, 0.8412, 3.9999, -0.046, 0.3457], [2.6911, -0.0647, -0.1091, 1.7353, 2.7933] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.gelu_erf()?, 4)?, [ [-0.004, 0.8413, 3.9999, -0.046, 0.3457], [2.6906, -0.0647, -0.1091, 1.7353, 2.7928] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.erf()?, 4)?, [ [-1.0, 0.8427, 1.0, -0.1125, 0.5205], [0.9999, -0.9891, -0.3079, 0.9891, 0.9999] ] ); assert_eq!( test_utils::to_vec2_round(&tensor.ceil()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -1.0, -0.0, 2.0, 3.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.floor()?, 4)?, [[-3.0, 1.0, 4.0, -1.0, 0.0], [2.0, -2.0, -1.0, 1.0, 2.0]] ); assert_eq!( test_utils::to_vec2_round(&tensor.round()?, 4)?, [[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -2.0, -0.0, 2.0, 3.0]] ); let tensor = Tensor::new(&[2997.9246, 314.15926f32], device)?; assert_eq!( test_utils::to_vec1_round(&tensor.round_to(2)?, 4)?, [2997.92, 314.16] ); assert_eq!( test_utils::to_vec1_round(&tensor.round_to(-2)?, 4)?, [3000.0, 300.] ); Ok(()) } fn binary_op(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 1., 7., 8., 2.]]; let tensor2 = Tensor::new(data2, device)?; let tensor = (&tensor1 + (&tensor1 * &tensor1)? / (&tensor1 + &tensor2))?; let dims = tensor.dims2()?; assert_eq!(dims, (2, 5)); let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [4.125, 1.1666666, 5.7777777, 1.1666666, 7.5]); assert_eq!(content[1], [3.0, 1.5, 10.5, 12.0, 3.0]); #[allow(clippy::eq_op)] let tensor = (&tensor - &tensor)?; let content: Vec<Vec<f32>> = tensor.to_vec2()?; assert_eq!(content[0], [0., 0., 0., 0., 0.]); let min = tensor1.minimum(&(&tensor2 * 0.5)?)?; let max = tensor1.maximum(&(&tensor2 * 0.5)?)?; assert_eq!( min.to_vec2::<f32>()?, [[2.5, 1.0, 2.5, 1.0, 2.5], [1.0, 0.5, 3.5, 4.0, 1.0]], ); assert_eq!( max.to_vec2::<f32>()?, [[3.0, 2.5, 4.0, 2.5, 5.0], [2.0, 1.0, 7.0, 8.0, 2.0]] ); Ok(()) } fn transpose(device: &Device) -> Result<()> { let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let tensor = Tensor::new(data, device)?.t()?; let dims = tensor.dims2()?; assert_eq!(dims, (5, 2)); assert_eq!( tensor.to_vec2::<f32>()?, &[[3f32, 2.], [1., 1.], [4., 7.], [1., 8.], [5., 2.]] ); assert_eq!(tensor.t()?.to_vec2::<f32>()?, data); assert_eq!(tensor.contiguous()?.t()?.to_vec2::<f32>()?, data); assert_eq!(((tensor + 1.)?.t()? - 1.)?.to_vec2::<f32>()?, data); Ok(()) } fn var(device: &Device) -> Result<()> { // Values taken from https://pytorch.org/docs/stable/generated/torch.var.html let data = &[ [0.2035f32, 1.2959, 1.8101, -0.4644], [1.5027, -0.3270, 0.5905, 0.6538], [-1.5745, 1.3330, -0.5596, -0.6548], [0.1264, -0.5080, 1.6420, 0.1992], ]; let tensor = Tensor::new(data, device)?; assert_eq!( test_utils::to_vec2_round(&tensor.var_keepdim(1)?, 4)?, &[[1.0631], [0.559], [1.4893], [0.8258]] ); Ok(()) } fn sum(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.sum_keepdim(2)?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[[5, 2, 11], [9, 7, 17]]], ); assert_eq!(tensor.sum_keepdim((0, 2, 1))?.to_vec3::<u32>()?, &[[[51]]],); assert_eq!( tensor.t()?.sum_keepdim(1)?.t()?.to_vec3::<u32>()?, &[[[8], [15]], [[10], [18]]] ); assert_eq!( tensor.sum_keepdim((2, 1))?.to_vec3::<u32>()?, &[[[8 + 15]], [[10 + 18]]] ); let data: Vec<u32> = (0..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.sum_keepdim(0)?.to_vec1::<u32>()?, &[7998000]); let tensor = tensor.reshape((2000, 2))?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]); assert_eq!( tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?, &[[7998000]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec2::<u32>()?, &[[3998000, 4000000]] ); let t1 = tensor.reshape((200, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor.sum_keepdim((0, 1, 2))?.to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim(2)? .sum_keepdim(1)? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(0)? .sum_keepdim((1, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor .sum_keepdim(1)? .sum_keepdim((0, 2))? .to_vec3::<u32>()?, &[[[7998000]]] ); assert_eq!( tensor.sum_keepdim(0)?.to_vec3::<u32>()?, &[[ [398000, 398200, 398400, 398600], [398800, 399000, 399200, 399400], [399600, 399800, 400000, 400200], [400400, 400600, 400800, 401000], [401200, 401400, 401600, 401800] ]] ); } Ok(()) } fn min(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.min_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [1]], [[1], [2]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[[2, 1, 4], [1, 2, 8]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.min_keepdim(0)?.to_vec1::<u32>()?, &[200]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!( tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?, &[[200]] ); assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .min_keepdim(0)? .min_keepdim(2)? .min_keepdim(1)? .to_vec3::<u32>()?, &[[[200]]] ); assert_eq!( tensor.min_keepdim(0)?.to_vec3::<u32>()?, &[[ [200, 201, 202, 203], [204, 205, 206, 207], [208, 209, 210, 211], [212, 213, 214, 215], [216, 217, 218, 219] ]] ); } Ok(()) } fn max(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.max_keepdim(2)?.to_vec3::<u32>()?, &[[[4], [9]], [[7], [8]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[[3, 1, 7], [8, 5, 9]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.max_keepdim(0)?.to_vec1::<u32>()?, &[3999]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!( tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?, &[[3999]] ); assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .max_keepdim(0)? .max_keepdim(2)? .max_keepdim(1)? .to_vec3::<u32>()?, &[[[3999]]] ); assert_eq!( tensor.max_keepdim(0)?.to_vec3::<u32>()?, &[[ [3980, 3981, 3982, 3983], [3984, 3985, 3986, 3987], [3988, 3989, 3990, 3991], [3992, 3993, 3994, 3995], [3996, 3997, 3998, 3999] ]] ); } Ok(()) } fn argmin(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmin_keepdim(2)?.to_vec3::<u32>()?, &[[[1], [0]], [[1], [1]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[[1, 0, 0], [0, 1, 1]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmin_keepdim(0)?.to_vec1::<u32>()?, &[0]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmin_keepdim(1)? .argmin_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmin_keepdim(0)? .argmin_keepdim(2)? .argmin_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmin_keepdim(0)?.to_vec3::<u32>()?, &[[ [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], ]] ); } Ok(()) } fn argmax(device: &Device) -> Result<()> { let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.argmax_keepdim(2)?.to_vec3::<u32>()?, &[[[2], [2]], [[2], [0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[[0, 0, 1], [1, 0, 0]]], ); let data: Vec<u32> = (200..4000u32).collect(); let tensor = Tensor::new(data.as_slice(), device)?; assert_eq!(tensor.argmax_keepdim(0)?.to_vec1::<u32>()?, &[3799]); let tensor = tensor.reshape((1900, 2))?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); // Make the tensor non contiguous. let tensor = tensor.t()?.contiguous()?.t()?; assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(1)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!( tensor .argmax_keepdim(1)? .argmax_keepdim(0)? .to_vec2::<u32>()?, &[[0]] ); assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]); let t1 = tensor.reshape((190, 5, 4))?; let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?; for tensor in [t1, t2] { assert_eq!( tensor .argmax_keepdim(0)? .argmax_keepdim(2)? .argmax_keepdim(1)? .to_vec3::<u32>()?, &[[[0]]] ); assert_eq!( tensor.argmax_keepdim(0)?.to_vec3::<u32>()?, &[[ [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], [189, 189, 189, 189], ]] ); } Ok(()) } fn narrow(device: &Device) -> Result<()> { let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.narrow(2, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 4.0], [5.0, 9.0]], [[1.0, 7.0], [2.0, 8.0]]], ); assert_eq!( tensor.narrow(1, 1, 1)?.to_vec3::<f32>()?, &[[[1.0, 5.0, 9.0]], [[8.0, 2.0, 8.0]]], ); assert_eq!( tensor.narrow(0, 0, 1)?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]], ); assert_eq!( tensor.narrow(0, 1, 1)?.to_vec3::<f32>()?, &[[[2.0, 1.0, 7.0], [8.0, 2.0, 8.0]]], ); // The following has been checked against PyTorch via: // import torch // t = torch.tensor([[[3., 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]]) // t.transpose(-1, -2).narrow(1, 1, 2) assert_eq!( tensor.t()?.narrow(1, 1, 2)?.to_vec3::<f32>()?, &[[[1.0, 5.0], [4.0, 9.0]], [[1.0, 2.0], [7.0, 8.0]]], ); Ok(()) } fn broadcast(device: &Device) -> Result<()> { let data = &[3f32, 1., 4.]; let tensor = Tensor::new(data, device)?; assert_eq!( tensor.broadcast_left((3, 1))?.to_vec3::<f32>()?, &[[[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]]] ); Ok(()) } fn cat(device: &Device) -> Result<()> { // 1D let t1 = Tensor::new(&[3f32, 1., 4.], device)?; let t2 = Tensor::new(&[1f32, 5., 9., 2.], device)?; let t3 = Tensor::new(&[6f32, 5., 3., 5., 8., 9.], device)?; assert_eq!(Tensor::cat(&[&t1], 0)?.to_vec1::<f32>()?, [3f32, 1., 4.],); assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2.], ); assert_eq!( Tensor::cat(&[&t1, &t2, &t3], 0)?.to_vec1::<f32>()?, [3f32, 1., 4., 1., 5., 9., 2., 6., 5., 3., 5., 8., 9.], ); // 2D let data = &[[3f32, 1., 4., 1., 5.], [2., 7., 1., 8., 2.]]; let t1 = Tensor::new(data, device)?; let data2 = &[[5f32, 5., 5., 5., 5.], [2., 7., 1., 8., 2.]]; let t2 = Tensor::new(data2, device)?; assert_eq!( Tensor::cat(&[&t1, &t2], 0)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); // PyTorch equivalent: // import torch // t1 = torch.tensor([[3, 1, 4, 1, 5], [2, 7, 1, 8, 2]]) // t2 = torch.tensor([[5]*5, [2, 7, 1, 8, 2]]) // torch.cat([t1.t(), t2.t()], dim=1).t() assert_eq!( Tensor::cat(&[&t1.t()?, &t2.t()?], 1)? .t()? .to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0], [5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0] ] ); assert_eq!( Tensor::cat(&[&t1, &t2], 1)?.to_vec2::<f32>()?, [ [3.0, 1.0, 4.0, 1.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], [2.0, 7.0, 1.0, 8.0, 2.0, 2.0, 7.0, 1.0, 8.0, 2.0] ] ); Ok(()) } fn embeddings(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let hs = t.embedding(&ids)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); let hs = t.index_select(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]); Ok(()) } fn cmp(device: &Device) -> Result<()> { let t1 = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?; let t2 = Tensor::new(&[[1f32, 0f32], [3f32, 3f32], [4f32, 7f32]], device)?; assert_eq!(t1.eq(&t2)?.to_vec2::<u8>()?, &[[0, 0], [0, 1], [1, 0]]); assert_eq!(t1.ne(&t2)?.to_vec2::<u8>()?, &[[1, 1], [1, 0], [0, 1]]); assert_eq!(t1.le(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 1], [1, 1]]); assert_eq!(t1.lt(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 0], [0, 1]]); assert_eq!(t1.gt(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 0], [0, 0]]); assert_eq!(t1.ge(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 1], [1, 0]]); Ok(()) } fn index_select(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let hs = t.index_select(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 2.0, 1.0], [3.0, 5.0, 4.0], [6.0, 8.0, 7.0], [9.0, 11.0, 10.0] ] ); let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0]] ); // Prior to https://github.com/huggingface/candle/pull/1022 // There would be a bug where the last values in the result tensor would be set to 0. let ids = Tensor::new(&[0u32, 2u32, 1u32, 0u32, 2u32, 1u32], device)?; let hs = t.index_select(&ids, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], [0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0], ] ); // Test when selecting dim > 0 with ids size different from elem count of // target dim in source/input. let ids = Tensor::new(&[1u32, 0u32, 1u32], device)?; let t = Tensor::arange(1f32, 5f32, device)?.reshape((2, 2))?; assert_eq!(t.to_vec2::<f32>()?, &[[1.0, 2.0], [3.0, 4.0]]); let hs = t.index_select(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[2.0, 1.0, 2.0], [4.0, 3.0, 4.0]]); Ok(()) } fn index_add(device: &Device) -> Result<()> { let ids = Tensor::new(&[0u32, 1u32, 1u32], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let init = Tensor::ones((4, 2), DType::F32, device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[1.0, 4.0], [4.0, 10.0], [7.0, 16.0], [10.0, 22.0]], ); let init = Tensor::zeros((4, 2), DType::F32, device)?; let ids = Tensor::new(&[1u32, 0u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[3.0, 0.0], [9.0, 3.0], [15.0, 6.0], [21.0, 9.0]], ); let init = Tensor::zeros((6, 3), DType::F32, device)?; let ids = Tensor::new(&[5u32, 0u32, 1u32, 0u32], device)?; let hs = init.index_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [12.0, 14.0, 16.0], [6.0, 7.0, 8.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 1.0, 2.0] ] ); Ok(()) } fn slice_scatter(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let src = Tensor::arange(100f32, 106f32, device)?.reshape((2, 3))?; assert_eq!( t.slice_scatter0(&src, 0)?.to_vec2::<f32>()?, &[ [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 1)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], [9.0, 10.0, 11.0] ] ); assert_eq!( t.slice_scatter0(&src, 2)?.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [100.0, 101.0, 102.0], [103.0, 104.0, 105.0], ] ); Ok(()) } fn scatter_add(device: &Device) -> Result<()> { let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let ids = Tensor::new(&[[0u32, 1, 2], [3, 4, 0], [3, 3, 1], [2, 0, 4]], device)?; let init = Tensor::ones((4, 5), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 2.0, 3.0, 1.0, 1.0], [6.0, 1.0, 1.0, 4.0, 5.0], [1.0, 9.0, 1.0, 14.0, 1.0], [11.0, 1.0, 10.0, 1.0, 12.0] ] ); let init = Tensor::ones((6, 3), DType::F32, device)?; let hs = init.scatter_add(&ids, &t, 0)?; assert_eq!( hs.to_vec2::<f32>()?, &[ [1.0, 11.0, 6.0], [1.0, 2.0, 9.0], [10.0, 1.0, 3.0], [10.0, 8.0, 1.0], [1.0, 5.0, 12.0], [1.0, 1.0, 1.0] ] ); Ok(()) } fn gather(device: &Device) -> Result<()> { let ids = Tensor::new(&[[0u32], [2u32], [1u32], [0u32]], device)?; let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?; assert_eq!( t.to_vec2::<f32>()?, &[ [0.0, 1.0, 2.0], [3.0, 4.0, 5.0], [6.0, 7.0, 8.0], [9.0, 10.0, 11.0] ] ); let hs = t.gather(&ids, 1)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0], [5.0], [7.0], [9.0]]); let ids = Tensor::new( &[[0u32, 0u32], [2u32, 0u32], [1u32, 1u32], [0u32, 2u32]], device, )?; let hs = t.gather(&ids, 1)?; assert_eq!( hs.to_vec2::<f32>()?, &[[0.0, 0.0], [5.0, 3.0], [7.0, 7.0], [9.0, 11.0]] ); let ids = Tensor::new(&[[0u32, 2u32, 0u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0]]); let ids = Tensor::new(&[[0u32, 2u32, 0u32], [0u32, 1u32, 1u32]], device)?; let hs = t.gather(&ids, 0)?; assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 7.0, 2.0], [0.0, 4.0, 5.0]]); Ok(()) } fn matmul(device: &Device) -> Result<()> { let data = vec![1.0f32, 2.0, 3.0, 4.0]; let a = Tensor::from_slice(&data, (2, 2), device)?; let data = vec![1.0f32, 2.0, 3.0, 4.0]; let b = Tensor::from_slice(&data, (2, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]); let data = vec![1.0f32, 2.0]; let a = Tensor::from_slice(&data, (2, 1), device)?; let data = vec![3.0f32, 4.0]; let b = Tensor::from_slice(&data, (1, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[3.0, 4.0], &[6.0, 8.0]]); let data: Vec<_> = (0..6).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 3), device)?; let data: Vec<_> = (0..6).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (3, 2), device)?; let c = a.matmul(&b)?; assert_eq!(c.to_vec2::<f32>()?, &[&[16., 19.], &[52., 64.]]); let data: Vec<_> = (0..12).map(|i| i as f32).collect(); let a = Tensor::from_slice(&data, (2, 2, 3), device)?; let data: Vec<_> = (0..12).map(|i| (i + 2) as f32).collect(); let b = Tensor::from_slice(&data, (2, 3, 2), device)?; let expected = [[[16., 19.], [52., 64.]], [[214., 235.], [304., 334.]]]; let c = a.matmul(&b)?; assert_eq!(c.to_vec3::<f32>()?, &expected); // Also perform the matmul on contiguous transposed versions. let a_tt = a.t()?.contiguous()?.t()?; assert!(!a_tt.is_contiguous()); assert_eq!(a.dims(), a_tt.dims()); assert_eq!(a_tt.stride(), &[6, 1, 2]); let b_tt = b.t()?.contiguous()?.t()?; assert!(!b_tt.is_contiguous()); assert_eq!(b.dims(), b_tt.dims()); assert_eq!(b_tt.stride(), &[6, 1, 3]); assert_eq!(a_tt.matmul(&b)?.to_vec3::<f32>()?, &expected); assert_eq!(a.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); assert_eq!(a_tt.matmul(&b_tt)?.to_vec3::<f32>()?, &expected); Ok(()) } fn broadcast_matmul(device: &Device) -> Result<()> { let lhs = Tensor::randn(0f32, 1f32, (3, 1, 4, 5), device)?; let rhs = Tensor::randn(0f32, 1f32, (6, 5, 2), device)?; let out = lhs.broadcast_matmul(&rhs)?; assert_eq!(out.dims(), &[3, 6, 4, 2]); for idx1 in 0..3 { for idx2 in 0..6 { let out = out.i((idx1, idx2))?; let lhs = lhs.i((idx1, 0))?; let rhs = rhs.i(idx2)?; let out2 = lhs.matmul(&rhs); let sum_diff2 = (out - out2)?.sqr()?.sum_all()?; // With cuda, we see errors of up to ~1e-12. assert!(sum_diff2.to_vec0::<f32>()? < 1e-6) } } Ok(()) } fn broadcasting(device: &Device) -> Result<()> { let t1 = Tensor::arange(0f32, 24f32, device)?.reshape((4, 2, 3))?; let t2 = Tensor::new(&[100f32, 200f32], device)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 101.0, 102.0], [203.0, 204.0, 205.0]], [[106.0, 107.0, 108.0], [209.0, 210.0, 211.0]], [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[100.0, 203.0], [101.0, 204.0], [102.0, 205.0]], [[106.0, 209.0], [107.0, 210.0], [108.0, 211.0]], [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -99.0, -98.0], [-197.0, -196.0, -195.0]], [[-94.0, -93.0, -92.0], [-191.0, -190.0, -189.0]], [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-100.0, -197.0], [-99.0, -196.0], [-98.0, -195.0]], [[-94.0, -191.0], [-93.0, -190.0], [-92.0, -189.0]], [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); // Test a narrowed version as this uses a layout start_offset. let t1 = t1.i(2..)?; let s = t1.broadcast_add(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 113.0, 114.0], [215.0, 216.0, 217.0]], [[118.0, 119.0, 120.0], [221.0, 222.0, 223.0]] ] ); let s = t1.t()?.broadcast_add(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[112.0, 215.0], [113.0, 216.0], [114.0, 217.0]], [[118.0, 221.0], [119.0, 222.0], [120.0, 223.0]] ] ); let s = t1.broadcast_sub(&t2.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -87.0, -86.0], [-185.0, -184.0, -183.0]], [[-82.0, -81.0, -80.0], [-179.0, -178.0, -177.0]] ] ); let s = t1.t()?.broadcast_sub(&t2)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[-88.0, -185.0], [-87.0, -184.0], [-86.0, -183.0]], [[-82.0, -179.0], [-81.0, -178.0], [-80.0, -177.0]] ] ); let t3 = Tensor::new(1f32, device)?.broadcast_div(&t2)?; let s = t1.broadcast_mul(&t2.reshape((2, 1))?)?; let s_div = t1.broadcast_div(&t3.reshape((2, 1))?)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 1300.0, 1400.0], [3000.0, 3200.0, 3400.0]], [[1800.0, 1900.0, 2000.0], [4200.0, 4400.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); let s = t1.t()?.broadcast_mul(&t2)?; let s_div = t1.t()?.broadcast_div(&t3)?; assert_eq!( s.to_vec3::<f32>()?, &[ [[1200.0, 3000.0], [1300.0, 3200.0], [1400.0, 3400.0]], [[1800.0, 4200.0], [1900.0, 4400.0], [2000.0, 4600.0]] ] ); assert_eq!(s.to_vec3::<f32>()?, s_div.to_vec3::<f32>()?,); Ok(()) } fn randn(device: &Device) -> Result<()> { let tensor = Tensor::randn(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); let tensor = Tensor::rand(0f32, 1f32, (5, 3), device)?; assert_eq!(tensor.dims(), [5, 3]); Ok(()) } test_device!(zeros, zeros_cpu, zeros_gpu, zeros_metal); test_device!(ones, ones_cpu, ones_gpu, ones_metal); test_device!(arange, arange_cpu, arange_gpu, arange_metal); test_device!(add_mul, add_mul_cpu, add_mul_gpu, add_mul_metal); test_device!(tensor_2d, tensor_2d_cpu, tensor_2d_gpu, tensor_2d_metal); test_device!(narrow, narrow_cpu, narrow_gpu, narrow_metal); test_device!(broadcast, broadcast_cpu, broadcast_gpu, broadcast_metal); test_device!(cat, cat_cpu, cat_gpu, cat_metal); test_device!(sum, sum_cpu, sum_gpu, sum_metal); test_device!(min, min_cpu, min_gpu, min_metal); test_device!(max, max_cpu, max_gpu, max_metal); test_device!(argmax, argmax_cpu, argmax_gpu, argmax_metal); test_device!(argmin, argmin_cpu, argmin_gpu, argmin_metal); test_device!(transpose, transpose_cpu, transpose_gpu, transpose_metal); test_device!(unary_op, unary_op_cpu, unary_op_gpu, unary_op_metal); test_device!(binary_op, binary_op_cpu, binary_op_gpu, binary_op_metal); test_device!(embeddings, embeddings_cpu, embeddings_gpu, embeddings_metal); test_device!(cmp, cmp_cpu, cmp_gpu, cmp_metal); test_device!(matmul, matmul_cpu, matmul_gpu, matmul_metal); test_device!( broadcast_matmul, broadcast_matmul_cpu, broadcast_matmul_gpu, broadcast_matmul_metal ); test_device!( broadcasting, broadcasting_cpu, broadcasting_gpu, broadcasting_metal ); test_device!( index_select, index_select_cpu, index_select_gpu, index_select_metal ); test_device!(index_add, index_add_cpu, index_add_gpu, index_add_metal); test_device!(gather, gather_cpu, gather_gpu, gather_metal); test_device!( scatter_add, scatter_add_cpu, scatter_add_gpu, scatter_add_metal ); test_device!( slice_scatter, slice_scatter_cpu, slice_scatter_gpu, slice_scatter_metal ); test_device!(randn, randn_cpu, randn_gpu, randn_metal); test_device!(clamp, clamp_cpu, clamp_gpu, clamp_metal); test_device!(var, var_cpu, var_gpu, var_metal); // There was originally a bug on the CPU implementation for randn // https://github.com/huggingface/candle/issues/381 #[test] fn randn_hasneg() -> Result<()> { let t = Tensor::randn(0f32, 1f32, 200, &Device::Cpu)?.to_vec1::<f32>()?; if t.iter().all(|&v| v >= 0.) { candle_core::bail!("all values in tensors are non-negative") } Ok(()) } #[test] fn pad_with_same() -> Result<()> { let t = Tensor::arange(1f32, 5f32, &Device::Cpu)?.reshape((2, 2))?; let t0 = t.pad_with_same(0, 1, 2)?; assert_eq!( t0.to_vec2::<f32>()?, [[1.0, 2.0], [1.0, 2.0], [3.0, 4.0], [3.0, 4.0], [3.0, 4.0]] ); let t1 = t.pad_with_same(1, 1, 2)?; assert_eq!( t1.to_vec2::<f32>()?, [[1.0, 1.0, 2.0, 2.0, 2.0], [3.0, 3.0, 4.0, 4.0, 4.0]] ); Ok(()) } #[test] fn i64_abs() -> Result<()> { let t = Tensor::new(&[-42i64, 1337], &Device::Cpu)?; let t = t.abs()?; assert_eq!(t.to_vec1::<i64>()?, [42, 1337]); Ok(()) } #[test] fn tril_triu_eye() -> Result<()> { let t = Tensor::tril2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0] ], ); let t = Tensor::triu2(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0] ] ); let t = Tensor::eye(4, DType::F32, &Device::Cpu)?; assert_eq!( t.to_vec2::<f32>()?, [ [1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0] ] ); Ok(()) } #[test] fn cumsum() -> Result<()> { let t = &[3f32, 1., 4., 1., 5.]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!(t.cumsum(0)?.to_vec1::<f32>()?, [3., 4., 8., 9., 14.]); let t = t.unsqueeze(1)?; assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0], [4.0], [8.0], [9.0], [14.0]] ); assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0], [1.0], [4.0], [1.0], [5.0]] ); let t = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]]; let t = Tensor::new(t, &Device::Cpu)?; assert_eq!( t.cumsum(1)?.to_vec2::<f32>()?, [[3.0, 4.0, 8.0, 9.0, 14.0], [2.0, 3.0, 10.0, 18.0, 20.0]], ); assert_eq!( t.cumsum(0)?.to_vec2::<f32>()?, [[3.0, 1.0, 4.0, 1.0, 5.0], [5.0, 2.0, 11.0, 9.0, 7.0]] ); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/npy.py
import numpy as np x = np.arange(10) # Write a npy file. np.save("test.npy", x) # Write multiple values to a npz file. values = { "x": x, "x_plus_one": x + 1 } np.savez("test.npz", **values)
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/quantized_tests.rs
use candle_core::{ quantized::{self, GgmlDType}, test_utils::to_vec2_round, Device, Module, Result, Tensor, }; use quantized::{k_quants, GgmlType}; use rand::prelude::*; const GGML_TEST_SIZE: usize = 32 * 128; const GGML_MAX_QUANTIZATION_TOTAL_ERROR: f32 = 0.002; const GGML_MAX_QUANTIZATION_TOTAL_ERROR_2BITS: f32 = 0.0075; const GGML_MAX_QUANTIZATION_TOTAL_ERROR_3BITS: f32 = 0.0040; const GGML_MAX_DOT_PRODUCT_ERROR: f32 = 0.02; #[test] fn quantized_matmul() -> Result<()> { let cpu = &Device::Cpu; let (m, k, n) = (3, 64, 4); let lhs = (0..(m * k)).map(|v| v as f32).collect::<Vec<_>>(); let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?; let mut dst = vec![42.; 3 * 4]; let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8]; let rhs = (0..(k * n)).map(|v| v as f32).collect::<Vec<_>>(); let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?; k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t)?; k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?; assert_eq!( dst.iter().map(|x| x.round()).collect::<Vec<_>>(), &[ 85120.0, 214562.0, 345455.0, 474748.0, 213475.0, 604465.0, 1000686.0, 1388317.0, 341876.0, 994283.0, 1655709.0, 2301518.0 ] ); let mm = tensor_lhs.matmul(&tensor_rhs)?; assert_eq!( mm.to_vec2::<f32>()?, &[ [85344.0, 214368.0, 343392.0, 472416.0], [214368.0, 605536.0, 996704.0, 1387872.0], [343392.0, 996704.0, 1650016.0, 2303328.0] ] ); let qtensor = quantized::QTensor::new(rhs_t, (4, 64))?; let matmul = quantized::QMatMul::from_qtensor(qtensor)?; let res = matmul.forward(&tensor_lhs)?; assert_eq!( to_vec2_round(&res, 0)?, &[ [85120.0, 214562.0, 345455.0, 474748.0], [213475.0, 604465.0, 1000686.0, 1388317.0], [341876.0, 994283.0, 1655709.0, 2301518.0] ] ); Ok(()) } #[test] fn quantized_matmul_neg() -> Result<()> { let cpu = &Device::Cpu; let (m, k, n) = (3, 64, 4); let lhs = (0..(m * k)) .map(|v| v as f32 - (m * k) as f32 / 2.0) .collect::<Vec<_>>(); let tensor_lhs = Tensor::from_slice(&lhs, (m, k), cpu)?; let mut dst = vec![42.; 3 * 4]; let mut rhs_t = vec![k_quants::BlockQ4_0::zeros(); 8]; let rhs = (0..k * n) .map(|v| v as f32 - (k * n) as f32 / 3.0) .collect::<Vec<_>>(); let tensor_rhs = Tensor::from_slice(&rhs, (n, k), cpu)?.t()?; k_quants::BlockQ4_0::from_float(&rhs, &mut rhs_t)?; k_quants::matmul((m, k, n), &lhs, &rhs_t, &mut dst)?; assert_eq!( dst.iter().map(|x| x.round()).collect::<Vec<_>>(), &[ 243524.0, -19596.0, -285051.0, -549815.0, 23777.0, 21651.0, 19398.0, 18367.0, -196472.0, 63012.0, 324585.0, 587902.0 ] ); let mm = tensor_lhs.matmul(&tensor_rhs)?; assert_eq!( to_vec2_round(&mm, 0)?, &[ [244064.0, -20128.0, -284320.0, -548512.0], [23563.0, 21515.0, 19467.0, 17419.0], [-196939.0, 63157.0, 323253.0, 583349.0] ] ); let qtensor = quantized::QTensor::new(rhs_t, (4, 64))?; let matmul = quantized::QMatMul::from_qtensor(qtensor)?; let res = matmul.forward(&tensor_lhs)?; assert_eq!( to_vec2_round(&res, 0)?, &[ [243524.0, -19596.0, -285051.0, -549815.0], [23777.0, 21651.0, 19398.0, 18367.0], [-196472.0, 63012.0, 324585.0, 587902.0] ] ); Ok(()) } #[test] fn quantize_q4_0() -> Result<()> { use k_quants::BlockQ4_0; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ4_0::zeros(); 4]; BlockQ4_0::from_float(&src, &mut quant)?; BlockQ4_0::to_float(&quant, dst.as_mut_slice())?; assert_eq!( dst, &[ -0.0, -0.0, 3.875, 3.875, 3.875, 3.875, 7.75, 7.75, 7.75, 7.75, 11.625, 11.625, 11.625, 11.625, 15.5, 15.5, 15.5, 15.5, 19.375, 19.375, 19.375, 19.375, 23.25, 23.25, 23.25, 23.25, 27.125, 27.125, 27.125, 27.125, 31.0, 31.0, 31.5, 31.5, 31.5, 31.5, 39.375, 39.375, 39.375, 39.375, 39.375, 39.375, 39.375, 39.375, 47.25, 47.25, 47.25, 47.25, 47.25, 47.25, 47.25, 47.25, 55.125, 55.125, 55.125, 55.125, 55.125, 55.125, 55.125, 55.125, 63.0, 63.0, 63.0, 63.0, 59.375, 59.375, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 95.0, 95.0, 95.0, 95.0, 95.0, 95.0, 95.25, 95.25, 95.25, 95.25, 95.25, 95.25, 95.25, 95.25, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 127.0, 127.0, 127.0, 127.0, 127.0, 127.0, 127.0, 127.0 ] ); ggml_quantization_error_test::<BlockQ4_0>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q4_1() -> Result<()> { use k_quants::BlockQ4_1; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ4_1::zeros(); 4]; BlockQ4_1::from_float(&src, &mut quant)?; BlockQ4_1::to_float(&quant, dst.as_mut_slice())?; assert_eq!( round_vector(&dst), &[ 0.0, 0.0, 2.066, 2.066, 4.133, 4.133, 6.199, 6.199, 8.266, 8.266, 10.332, 10.332, 12.398, 12.398, 14.465, 14.465, 16.531, 16.531, 18.598, 18.598, 20.664, 20.664, 22.73, 22.73, 24.797, 24.797, 26.863, 26.863, 28.93, 28.93, 30.996, 30.996, 32.0, 32.0, 34.066, 34.066, 36.133, 36.133, 38.199, 38.199, 40.266, 40.266, 42.332, 42.332, 44.398, 44.398, 46.465, 46.465, 48.531, 48.531, 50.598, 50.598, 52.664, 52.664, 54.73, 54.73, 56.797, 56.797, 58.863, 58.863, 60.93, 60.93, 62.996, 62.996, 64.0, 64.0, 66.066, 66.066, 68.133, 68.133, 70.199, 70.199, 72.266, 72.266, 74.332, 74.332, 76.398, 76.398, 78.465, 78.465, 80.531, 80.531, 82.598, 82.598, 84.664, 84.664, 86.73, 86.73, 88.797, 88.797, 90.863, 90.863, 92.93, 92.93, 94.996, 94.996, 96.0, 96.0, 98.066, 98.066, 100.133, 100.133, 102.199, 102.199, 104.266, 104.266, 106.332, 106.332, 108.398, 108.398, 110.465, 110.465, 112.531, 112.531, 114.598, 114.598, 116.664, 116.664, 118.73, 118.73, 120.797, 120.797, 122.863, 122.863, 124.93, 124.93, 126.996, 126.996 ] ); ggml_quantization_error_test::<BlockQ4_1>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q5_0() -> Result<()> { use k_quants::BlockQ5_0; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ5_0::zeros(); 4]; BlockQ5_0::from_float(&src, &mut quant)?; BlockQ5_0::to_float(&quant, dst.as_mut_slice())?; assert_eq!( round_vector(&dst), &[ -0.0, 1.938, 1.938, 3.875, 3.875, 5.813, 5.813, 7.75, 7.75, 9.688, 9.688, 11.625, 11.625, 13.563, 13.563, 15.5, 15.5, 17.438, 17.438, 19.375, 19.375, 21.313, 21.313, 23.25, 23.25, 25.188, 25.188, 27.125, 27.125, 29.063, 29.063, 31.0, 31.5, 31.5, 35.438, 35.438, 35.438, 35.438, 39.375, 39.375, 39.375, 39.375, 43.313, 43.313, 43.313, 43.313, 47.25, 47.25, 47.25, 47.25, 51.188, 51.188, 51.188, 51.188, 55.125, 55.125, 55.125, 55.125, 59.063, 59.063, 59.063, 59.063, 63.0, 63.0, 65.313, 65.313, 65.313, 65.313, 65.313, 71.25, 71.25, 71.25, 71.25, 71.25, 71.25, 77.188, 77.188, 77.188, 77.188, 77.188, 77.188, 83.125, 83.125, 83.125, 83.125, 83.125, 83.125, 89.063, 89.063, 89.063, 89.063, 89.063, 89.063, 95.0, 95.0, 95.0, 95.25, 95.25, 95.25, 95.25, 103.188, 103.188, 103.188, 103.188, 103.188, 103.188, 103.188, 103.188, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 111.125, 119.063, 119.063, 119.063, 119.063, 119.063, 119.063, 119.063, 119.063, 127.0, 127.0, 127.0, 127.0 ] ); ggml_quantization_error_test::<BlockQ5_0>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q5_1() -> Result<()> { use k_quants::BlockQ5_1; let src = (0..32 * 4).map(|v| v as f32).collect::<Vec<_>>(); let mut dst = vec![0f32; 32 * 4]; let mut quant = vec![BlockQ5_1::zeros(); 4]; BlockQ5_1::from_float(&src, &mut quant)?; BlockQ5_1::to_float(&quant, dst.as_mut_slice())?; assert_eq!( dst, &[ 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0, 26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0, 38.0, 39.0, 40.0, 41.0, 42.0, 43.0, 44.0, 45.0, 46.0, 47.0, 48.0, 49.0, 50.0, 51.0, 52.0, 53.0, 54.0, 55.0, 56.0, 57.0, 58.0, 59.0, 60.0, 61.0, 62.0, 63.0, 64.0, 65.0, 66.0, 67.0, 68.0, 69.0, 70.0, 71.0, 72.0, 73.0, 74.0, 75.0, 76.0, 77.0, 78.0, 79.0, 80.0, 81.0, 82.0, 83.0, 84.0, 85.0, 86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0, 94.0, 95.0, 96.0, 97.0, 98.0, 99.0, 100.0, 101.0, 102.0, 103.0, 104.0, 105.0, 106.0, 107.0, 108.0, 109.0, 110.0, 111.0, 112.0, 113.0, 114.0, 115.0, 116.0, 117.0, 118.0, 119.0, 120.0, 121.0, 122.0, 123.0, 124.0, 125.0, 126.0, 127.0 ] ); ggml_quantization_error_test::<BlockQ5_1>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } /// Generates a small test vector ranging from -`bound` to `bound` with `size` steps fn get_test_vector(bound: f32, size: usize) -> (Vec<f32>, Vec<f32>) { assert!( size % crate::quantized::k_quants::QK_K == 0, "size must be a multiple of {}", crate::quantized::k_quants::QK_K ); let src = (0..size) .map(|v| (v as f32 - size as f32 / 2.) * bound / (size as f32 / 2.)) .collect::<Vec<_>>(); let dst = vec![0f32; size]; assert_eq!([src[0], src[size / 2]], [-bound, 0.0]); (src, dst) } /// Round a vector fn round_vector(values: &[f32]) -> Vec<f32> { values .iter() .map(|x| (1000. * x).round() / 1000.) .collect::<Vec<_>>() } fn compare_with_error(values: &[f32], expected: &[f32], tolerance: f32) { for (i, (value, expected_value)) in values.iter().zip(expected.iter()).enumerate() { let difference = (value - expected_value).abs(); assert!( difference < tolerance, "Error at index {}: value = {}, expected = {}. Difference = {} exceeds tolerance = {}.", i, value, expected_value, difference, tolerance ); } } /// Creates a vector simillarly to the one used in GGML unit tests: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L26-L30 fn create_ggml_like_vector(offset: f32) -> Vec<f32> { (0..GGML_TEST_SIZE) .map(|i| 0.1 + 2.0 * (i as f32 + offset).cos()) .collect() } /// Calculates the root mean square error between two vectors fn calculate_rmse(a: &[f32], b: &[f32]) -> f32 { assert_eq!(a.len(), b.len()); let sum = a .iter() .zip(b) .map(|(a, b)| (a - b).powi(2)) .sum::<f32>() .sqrt(); sum / a.len() as f32 } /// Mirrores the GGML quanitzation unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L43-L50 fn ggml_quantization_error_test<T: GgmlType>(max_error: f32) -> Result<()> { let src = create_ggml_like_vector(0.0); let mut dst = vec![0.0; GGML_TEST_SIZE]; let _quant = quantize_roundtrip::<T>(src.as_slice(), dst.as_mut_slice())?; let error = calculate_rmse(src.as_slice(), dst.as_slice()); if error > max_error { candle_core::bail!( "Quantization error {} exceeds max error {}", error, max_error ); } Ok(()) } fn quantize_roundtrip<T: GgmlType>(src: &[f32], dst: &mut [f32]) -> Result<Vec<T>> { let mut quant = vec![T::zeros(); src.len() / T::BLCK_SIZE]; T::from_float(src, &mut quant)?; T::to_float(&quant, dst)?; Ok(quant) } #[test] fn quantize_q2k() -> Result<()> { use k_quants::BlockQ2K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ2K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.1); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.499, -0.366, -0.249, 0.0, 0.295, 0.492] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ2K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 6.0); ggml_quantization_error_test::<BlockQ2K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR_2BITS)?; Ok(()) } #[test] fn quantize_q3k() -> Result<()> { use k_quants::BlockQ3K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ3K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.03); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.493, -0.37, -0.243, -0.0, 0.292, 0.492] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ3K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 3.5); ggml_quantization_error_test::<BlockQ3K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR_3BITS)?; Ok(()) } #[test] fn quantize_q4k() -> Result<()> { use k_quants::BlockQ4K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ4K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.017); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.5, -0.373, -0.25, 0.0, 0.288, 0.498] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ4K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 4.5); ggml_quantization_error_test::<BlockQ4K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q5k() -> Result<()> { use k_quants::BlockQ5K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ5K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.008); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.499, -0.372, -0.249, 0.001, 0.279, 0.499] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ5K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.5); ggml_quantization_error_test::<BlockQ5K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q6k() -> Result<()> { use k_quants::BlockQ6K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ6K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.008); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.497, -0.372, -0.25, -0.0, 0.284, 0.5] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ6K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 2.0); ggml_quantization_error_test::<BlockQ6K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } #[test] fn quantize_q8k() -> Result<()> { use k_quants::BlockQ8K; let (src, mut dst) = get_test_vector(0.5, 1024); let _quant = quantize_roundtrip::<BlockQ8K>(src.as_slice(), dst.as_mut_slice())?; compare_with_error(dst.as_slice(), src.as_slice(), 0.003); // Test some specific values assert_eq!( [src[0], src[128], src[256], src[512], src[800], src[1023]], [-0.5, -0.375, -0.25, 0.0, 0.28125, 0.49902344] ); let dst = round_vector(&dst); assert_eq!( [dst[0], dst[128], dst[256], dst[512], dst[800], dst[1023]], [-0.5, -0.375, -0.25, -0.0, 0.281, 0.499] ); let (src_big, mut dst_big) = get_test_vector(128.0, 1024); let _quant_big = quantize_roundtrip::<BlockQ8K>(src_big.as_slice(), dst_big.as_mut_slice())?; compare_with_error(dst_big.as_slice(), src_big.as_slice(), 0.6); ggml_quantization_error_test::<BlockQ8K>(GGML_MAX_QUANTIZATION_TOTAL_ERROR)?; Ok(()) } /// Very simple dot product implementation fn vec_dot_reference(a: &[f32], b: &[f32]) -> f32 { a.iter().zip(b).map(|(a, b)| a * b).sum() } /// Returns the error achieved by the GGML matmul unit test. fn ggml_reference_matmul_error(dtype: GgmlDType) -> Result<f32> { let err = match dtype { GgmlDType::F16 => 0.000010, GgmlDType::Q2K => 0.004086, GgmlDType::Q3K => 0.016148, GgmlDType::Q4K => 0.002425, GgmlDType::Q5K => 0.000740, GgmlDType::Q6K => 0.000952, GgmlDType::Q4_0 => 0.001143, GgmlDType::Q4_1 => 0.007784, GgmlDType::Q5_0 => 0.001353, GgmlDType::Q5_1 => 0.001363, GgmlDType::Q8_0 => 0.000092, // Not from the ggml repo. GgmlDType::Q8K => 0.00065, _ => candle_core::bail!("No GGML results for quantization type {dtype:?}",), }; Ok(err) } /// Mirrores the GGML matmul unit test: https://github.com/ggerganov/llama.cpp/blob/master/tests/test-quantize-fns.cpp#L76-L91 fn ggml_matmul_error_test<T: GgmlType>() -> Result<()> { let a = create_ggml_like_vector(0.0); let b = create_ggml_like_vector(1.0); let length = a.len(); let mut a_quant = vec![T::zeros(); length / T::BLCK_SIZE]; let mut b_quant = vec![T::VecDotType::zeros(); length / T::VecDotType::BLCK_SIZE]; T::from_float(&a, &mut a_quant)?; T::VecDotType::from_float(&b, &mut b_quant)?; let result = T::vec_dot(length, &a_quant, &b_quant)?; let result_unopt = T::vec_dot_unopt(length, &a_quant, &b_quant)?; let reference_result = vec_dot_reference(&a, &b); if (result - result_unopt).abs() / length as f32 > 1e-6 { candle_core::bail!( "the opt and unopt vec-dot returned different values, opt {result}, unopt {result_unopt}" ) } let error = (result - reference_result).abs() / length as f32; let ggml_error = ggml_reference_matmul_error(T::DTYPE)?; if !error.is_finite() || error > GGML_MAX_DOT_PRODUCT_ERROR { candle_core::bail!( "Dot product error {error} exceeds max error {GGML_MAX_DOT_PRODUCT_ERROR}", ); } // We diverge slightly due to different rounding behavior / f16 to f32 conversions in GGML // => we use a slightly higher error threshold const ERROR_LENIENCY: f32 = 0.00001; if error - ERROR_LENIENCY > ggml_error { candle_core::bail!( "Dot product error {} exceeds ggml reference error {}", error, ggml_error ); } Ok(()) } /// generates random tensors of size `m x k` and `n x k` and calculates their expected matrix multiplication result. fn get_random_tensors( m: usize, k: usize, n: usize, device: &Device, ) -> Result<(Tensor, Tensor, Tensor)> { let mut rng = StdRng::seed_from_u64(314159265358979); let lhs = (0..m * k) .map(|_| rng.gen::<f32>() - 0.5) .collect::<Vec<_>>(); let rhs = (0..n * k) .map(|_| rng.gen::<f32>() - 0.5) .collect::<Vec<_>>(); let lhs = Tensor::from_vec(lhs, (m, k), device)?; let rhs = Tensor::from_vec(rhs, (n, k), device)?; let mm = lhs.matmul(&rhs.t()?)?; Ok((lhs, rhs, mm)) } #[test] fn quantized_matmul_q2k() -> Result<()> { use k_quants::BlockQ2K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ2K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [0.916, 0.422, 0.215, 1.668]); ggml_matmul_error_test::<BlockQ2K>()?; Ok(()) } #[test] fn quantized_matmul_q3k() -> Result<()> { use k_quants::BlockQ3K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ3K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.029, 1.418, -0.314, 1.495]); ggml_matmul_error_test::<BlockQ3K>()?; Ok(()) } #[test] fn quantized_matmul_q4k() -> Result<()> { use k_quants::BlockQ4K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ4K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.125, 1.435, -0.201, 1.589]); ggml_matmul_error_test::<BlockQ4K>()?; Ok(()) } #[test] fn quantized_matmul_q5k() -> Result<()> { use k_quants::BlockQ5K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ5K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.192, 1.491, -0.18, 1.743]); //Expected: 0.000740408897 ggml_matmul_error_test::<BlockQ5K>()?; Ok(()) } #[test] fn quantized_matmul_q6k() -> Result<()> { use k_quants::BlockQ6K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ6K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.324, 1.49, -0.164, 1.741]); ggml_matmul_error_test::<BlockQ6K>()?; Ok(()) } #[test] fn quantized_matmul_q8k() -> Result<()> { use k_quants::BlockQ8K; let cpu = &Device::Cpu; let (m, k, n) = (11, 512, 21); let (lhs, rhs, mm) = get_random_tensors(m, k, n, cpu)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.262, 1.513, -0.208, 1.702]); let rhs = quantized::QTensor::quantize::<BlockQ8K>(&rhs)?; let rhs = quantized::QMatMul::from_qtensor(rhs)?; let mm = rhs.forward(&lhs)?; assert_eq!(mm.dims(), [m, n]); let dst = mm.flatten_all()?.to_vec1::<f32>()?; let dst = round_vector(&[dst[0], dst[m * n / 3], dst[m * n * 2 / 3], dst[m * n - 1]]); assert_eq!(dst, [1.266, 1.504, -0.204, 1.7]); ggml_matmul_error_test::<BlockQ8K>()?; Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/custom_op_tests.rs
use candle_core::backend::BackendStorage; use candle_core::cpu_backend; use candle_core::test_utils::to_vec1_round; use candle_core::{CpuStorage, CustomOp1, DType, Device, Error, Layout, Result, Shape, Tensor}; fn fwd<T: num_traits::Float>(v: T, alpha: f64) -> T { if v.is_sign_positive() { v } else { let alpha = T::from(alpha).unwrap_or(T::nan()); (v.exp() - T::one()) * alpha } } struct Elu { alpha: f64, } impl CustomOp1 for Elu { fn name(&self) -> &'static str { "elu" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { let storage = candle_core::map_dtype!( "elu", s, |s| cpu_backend::unary_map(s, l, |v| fwd(v, self.alpha)), (BF16, F16, F32, F64) ); Ok((storage, l.shape().clone())) } } #[test] fn custom_op1_no_backward() -> Result<()> { let cpu = &Device::Cpu; let t = Tensor::arange(0u32, 12u32, cpu)?.to_dtype(DType::F32)?; let t = (t - 5.)?; let elu_t = t.apply_op1_no_bwd(&Elu { alpha: 1. })?; assert_eq!( to_vec1_round(&elu_t, 4)?, &[-0.9933, -0.9817, -0.9502, -0.8647, -0.6321, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0] ); Ok(()) } // Define a similar struct as Elu but with backward support. fn bwd<T: num_traits::Float>(v: T, alpha: f64) -> T { if v.is_sign_positive() { T::one() } else { let alpha = T::from(alpha).unwrap_or(T::nan()); v.exp() * alpha } } struct EluBackward { alpha: f64, } impl CustomOp1 for EluBackward { fn name(&self) -> &'static str { "elu-bwd" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { let storage = candle_core::map_dtype!( "elu-bwd", s, |s| cpu_backend::unary_map(s, l, |v| bwd(v, self.alpha)), (BF16, F16, F32, F64) ); Ok((storage, l.shape().clone())) } } struct EluWithBackward(Elu); impl EluWithBackward { fn new(alpha: f64) -> Self { Self(Elu { alpha }) } } impl CustomOp1 for EluWithBackward { fn name(&self) -> &'static str { "elu" } fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> { self.0.cpu_fwd(s, l) } fn bwd(&self, arg: &Tensor, _res: &Tensor, grad_res: &Tensor) -> Result<Option<Tensor>> { let alpha = self.0.alpha; let bwd = arg.apply_op1(EluBackward { alpha })?; Ok(Some(grad_res.mul(&bwd)?)) } } #[test] fn custom_op1_with_backward() -> Result<()> { let cpu = &Device::Cpu; let t = candle_core::Var::new(&[-2f32, 0f32, 2f32], cpu)?; let elu_t = t.apply_op1(EluWithBackward::new(2.))?; assert_eq!(to_vec1_round(&elu_t, 4)?, &[-1.7293, 0.0, 2.0]); let grads = elu_t.backward()?; let grad_x = grads.get(&t).unwrap(); assert_eq!(to_vec1_round(grad_x, 4)?, [0.2707, 1.0, 1.0]); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/conv_tests.rs
use anyhow::Result; use candle_core::{test_device, test_utils, Device, IndexOp, Tensor}; /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5)) w = torch.randn((2, 4, 3)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv1d(t, w) print(res.flatten()) res = torch.nn.functional.conv1d(t, w, padding=1) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose1d(t, w_t) print(res.shape) print(res) */ fn conv1d(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145, 1.8025, -0.1536, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278, -1.0124, 0.5599, ], dev, )? .reshape((1, 4, 5))?; let w = Tensor::new( &[ -0.8404f32, -0.3490, 0.0130, 1.3123, 0.1763, -1.9249, 1.4270, 0.9421, 0.8670, -0.7181, -1.1111, 0.8869, -1.2429, 1.8357, 1.6052, -1.3844, 0.3951, -1.2036, 0.6686, 1.6261, -0.6451, -0.0840, -1.4247, 0.5512, ], dev, )? .reshape((2, 4, 3))?; let res = t.conv1d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.6357, -1.3336, 4.1393, -1.1784, 3.5675, 0.5069] ); let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 5]); // Same as pytorch default padding: use zeros. assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.4509, 2.6357, -1.3336, 4.1393, 0.5657, 1.8091, -1.1784, 3.5675, 0.5069, 3.3352] ); if dev.is_cpu() { let res = t.conv_transpose1d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 2, 7]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ 0.0699, -1.2899, 8.3018, 5.5873, 2.4572, -2.6143, -0.0706, 1.8765, 4.8318, 1.1538, 4.7076, -5.9745, -0.8276, 1.621 ], ); } Ok(()) } fn conv1d_small(dev: &Device) -> Result<()> { let t = Tensor::new(&[0.4056f32, -0.8689, -0.0773, -1.5630], dev)?.reshape((1, 1, 4))?; let w = Tensor::new(&[1f32, 0., 0.], dev)?.reshape((1, 1, 3))?; let res = t.conv1d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 2]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.4056, -0.8689] ); let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 4]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.0, 0.4056, -0.8689, -0.0773], ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5, 5)) w = torch.randn((2, 4, 3, 3)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t, w_t) print(res.shape) print(res) res = torch.nn.functional.conv2d(t, w, dilation=2) print(res.shape) print(res[0]) res = torch.nn.functional.conv_transpose2d(t, w_t, dilation=2) print(res.shape) print(res) */ fn conv2d(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006, -0.8000, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085, ], dev, )?; let w = Tensor::new( &[ -0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273, -2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514, -0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027, 0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667, 0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679, -0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646, 1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, ], dev, )?; let t = t.reshape((1, 4, 5, 5))?; let w = w.reshape((2, 4, 3, 3))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 2, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ -4.2812, 2.0923, 5.2187, 7.5184, 0.752, -14.9426, 10.0087, 4.391, 0.2918, 1.6715, 10.389, 3.6023, -4.2808, 0.2672, 5.3646, -5.2023, -2.1955, -9.4075 ] ); let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 2, 7, 7]); assert_eq!( test_utils::to_vec3_round(&res.i(0)?, 4)?, [ [ [-1.9918, 2.6797, -0.4599, -1.6037, 1.4131, -2.4012, 2.9277], [1.8016, -3.5361, 1.0757, 3.5395, -8.2168, -3.2023, 0.5375], [0.8243, 1.8675, 7.8929, -4.0746, -6.4415, 5.1139, 1.6889], [0.2722, 8.9679, 3.3477, 1.8514, -4.2896, -3.8228, -7.5632], [-8.5412, -5.8142, -7.1587, -1.6095, 0.4651, 0.2748, -2.0985], [2.0833, -0.6482, -12.1692, -4.1284, -2.9765, -0.0656, -4.5114], [5.307, 2.6957, 2.3087, 1.0478, 0.7808, -1.1519, -0.9579] ], [ [1.089, 0.1872, -0.6408, -0.9897, 0.8503, 1.1019, -0.9211], [-0.1741, -0.2915, 4.2472, 1.9417, 1.65, 0.6303, -4.7131], [1.6555, 2.4026, -2.9293, 2.9953, 0.5328, 3.5873, -0.9621], [-1.4289, -3.2787, 4.1747, -6.0341, -4.6341, -5.7945, 4.142], [7.5973, 6.4431, 5.9872, 2.1639, -8.6566, 3.3143, -3.4059], [-0.8775, -3.048, 11.6543, 0.6442, 2.3218, -0.4765, 1.1516], [-5.5423, -2.5188, 1.0754, -0.0563, -2.9386, -1.1504, 1.0171] ] ] ); // Dilations. let res = t.conv2d(&w, 0, 1, 2, 1)?; assert_eq!(res.dims(), [1, 2, 1, 1]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [2.45, -2.3504], ); // Transpose and dilations. let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 2)?; assert_eq!(res.dims(), [1, 2, 9, 9]); assert_eq!( test_utils::to_vec3_round(&res.i(0)?, 4)?, [ [ [-1.9918, 3.1652, -0.6778, -4.3442, 4.4351, 0.6652, -3.0124, -0.6031, 2.9277], [2.7036, -1.7156, -0.3969, 1.0516, 1.6381, -2.8886, -0.205, 2.4682, -1.0499], [-0.9459, 3.1631, 3.707, -4.8369, -8.5166, -1.4496, -2.7559, -3.2698, 1.4376], [-0.2157, 3.7786, -2.0252, -4.2633, 3.6731, -1.5142, 5.9391, -0.2622, -0.141], [-6.8121, -3.1744, 1.5945, 3.0637, -9.6088, 1.4446, 2.9489, -3.0082, -7.3822], [0.2371, 3.3303, 0.3861, 2.2646, -4.6784, 4.1235, -0.0109, 0.3176, -0.03], [-2.5339, -2.9564, -3.4518, -4.4594, -9.1873, -1.9709, -0.4676, 0.51, -3.5024], [4.007, 0.3067, -2.2954, 1.1105, -0.1992, 1.6372, -2.9268, 0.2807, -1.2787], [5.307, 1.1317, 1.3518, 0.9049, 3.8116, -0.4075, -0.8874, -0.2241, -0.9579] ], [ [1.089, -0.6483, 0.0726, -0.4752, -1.3283, 1.7103, 1.0703, 0.1076, -0.9211], [-0.8629, 0.1376, 0.3202, 2.0955, 0.9696, 2.8988, -1.0012, 1.5049, -0.1278], [1.9286, -1.5255, -2.9563, 2.4589, 3.3611, -0.6951, 0.3525, -1.7724, -5.9861], [1.1226, 2.1561, 3.6417, 4.7546, -0.692, 4.4126, -5.1902, 6.0805, 2.3185], [1.0111, 0.3604, 0.6432, -3.6605, 7.9517, -9.2955, -5.2988, -3.7803, -2.0642], [3.3172, -1.7967, -3.6576, -2.0942, 1.3158, 0.112, -1.7405, 2.9167, 0.7957], [5.1001, 1.8995, -1.8639, 1.1262, 9.9629, 2.683, -3.6319, -1.1607, 0.5856], [-4.8445, -0.5642, 4.2317, 0.0856, 1.2267, -0.5712, 1.736, 1.0997, 0.6908], [-5.5423, -1.1831, -1.2176, 0.0843, 0.0446, -0.7545, -2.4798, -0.0827, 1.0171] ] ] ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 3, 3)) w = torch.randn((1, 2, 1, 1)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) w_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t, w_t) print(res.shape) print(res.flatten()) t_t = w.transpose(0, 1) res = torch.nn.functional.conv_transpose2d(t_t, w) print(res.shape) print(res.flatten()) */ fn conv2d_small(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278, ], dev, )?; let w = Tensor::new(&[-0.9259f32, 1.3017], dev)?; let t = t.reshape((1, 2, 3, 3))?; let w = w.reshape((1, 2, 1, 1))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539] ); let res = t.conv2d(&w, 2, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 7, 7]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.1640, -0.0111, -0.1742, 0.0000, 0.0000, 0.0000, 0.0000, 2.6437, -2.0268, 1.1823, 0.0000, 0.0000, 0.0000, 0.0000, 3.2855, -1.0324, 0.2539, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000 ] ); let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?; assert_eq!(res.dims(), [1, 1, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539], ); let res = t.transpose(0, 1)?.conv_transpose2d(&w, 0, 0, 1, 1)?; assert_eq!(res.dims(), [2, 2, 3, 3]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [ -0.3755, 0.8045, -0.6336, -0.2218, -1.1369, 0.8599, 1.5768, -0.1268, -0.1728, 0.528, -1.131, 0.8908, 0.3118, 1.5984, -1.2089, -2.2168, 0.1783, 0.2429, -0.3838, 0.5802, -0.3268, -2.0382, 0.6329, -0.2293, -1.2154, 0.6441, -0.3035, 0.5396, -0.8156, 0.4594, 2.8654, -0.8898, 0.3224, 1.7087, -0.9056, 0.4267 ] ); Ok(()) } fn conv2d_smaller(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, ], dev, )?; let w = Tensor::new(&[1f32, 1., 1., 1., 1., 1., 1., 1., 1.], dev)?; let t = t.reshape((1, 1, 3, 3))?; let w = w.reshape((1, 1, 3, 3))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 1, 1]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [-0.6197] ); Ok(()) } /* This test is based on the following script. import torch torch.manual_seed(4242) t = torch.randn((1, 2, 4, 2)) w = torch.randn((1, 2, 1, 1)) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) */ fn conv2d_non_square(dev: &Device) -> Result<()> { let t = Tensor::new( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, ], dev, )?; let w = Tensor::new(&[-1.1351f32, 1.3841], dev)?; let t = t.reshape((1, 2, 4, 2))?; let w = w.reshape((1, 2, 1, 1))?; let res = t.conv2d(&w, 0, 1, 1, 1)?; assert_eq!(res.dims(), [1, 1, 4, 2]); assert_eq!( test_utils::to_vec1_round(&res.flatten_all()?, 4)?, [0.2312, 5.2238, 2.3772, 1.9076, 2.0256, -0.5776, -1.6028, -1.467] ); Ok(()) } /* import torch torch.manual_seed(4242) t = torch.randn((1, 4, 5, 5), requires_grad=True) w = torch.randn((2, 4, 3, 3), requires_grad=True) print(t.flatten()) print(w.flatten()) res = torch.nn.functional.conv2d(t, w) print(res.flatten()) loss = (res ** 2).sum() print(loss) loss.backward() print(t.grad.shape) print(t.grad.flatten()) print(w.grad.shape) print(w.grad.flatten()) t.grad.zero_() w.grad.zero_() res = torch.nn.functional.conv2d(t, w, stride=2) print(res.flatten()) loss = (res ** 2).sum() print(loss) loss.backward() print(t.grad.shape) print(t.grad[0]) print(w.grad.shape) print(w.grad[0]) */ fn conv2d_grad(dev: &Device) -> Result<()> { use candle_core::Var; let t = Var::from_slice( &[ 0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006, -0.8000, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085, ], (1, 4, 5, 5), dev, )?; let w = Var::from_slice( &[ -0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273, -2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514, -0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027, 0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667, 0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679, -0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646, 1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, ], (2, 4, 3, 3), dev, )?; let res = t.conv2d(&w, 0, 1, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 741.12f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec1_round(&grad_t.flatten_all()?, 2)?, [ 9.29, -2.84, -5.71, 3.38, -7.71, -19.15, 7.02, 29.1, 9.34, 34.73, -22.87, 24.35, -39.88, -14.01, 21.08, 9.94, 13.63, -34.68, 11.21, -6.26, 7.72, -6.32, -16.64, -1.08, -20.22, 21.73, -0.37, -4.06, 5.82, -3.65, -30.73, 14.55, 87.7, 31.6, 4.53, -89.78, -75.37, -57.43, -7.56, 92.96, 18.79, -4.63, -159.75, -42.47, -47.26, 52.88, 37.32, 49.0, 12.82, 2.01, -8.98, 20.18, 16.62, 12.06, 15.38, 20.0, 2.57, -15.22, 72.62, -10.75, 2.25, -31.2, 3.75, -0.2, 9.76, -0.68, 5.21, -40.44, -22.59, -61.61, 17.28, 20.41, 37.55, 5.23, 6.81, 23.54, 23.62, -9.99, -9.13, 4.87, -35.06, -26.1, 63.48, 25.81, -39.21, -70.68, -46.96, 2.33, 41.81, 82.42, -28.63, -11.78, -35.33, -10.28, -28.57, -9.13, 7.21, -9.05, -9.62, -11.25 ] ); assert_eq!( test_utils::to_vec1_round(&grad_w.flatten_all()?, 2)?, [ -28.92, -22.88, -141.23, 73.35, 61.07, 47.81, -20.0, -73.71, -41.82, -13.59, 21.5, 28.72, 28.57, -46.85, -90.19, 143.61, 16.68, 7.43, 18.88, -90.81, -20.29, 54.79, 82.63, 22.94, 77.81, -16.39, -13.2, 9.34, -40.39, -26.62, 5.33, -60.91, 9.09, -59.37, 7.08, 58.64, 5.55, 20.52, 2.5, -17.25, -6.8, 22.21, 30.15, -7.52, -37.46, 5.67, 22.58, 9.03, 47.05, 17.61, 37.31, -98.13, -14.61, -4.8, -6.36, 44.69, 23.34, 8.37, -13.52, 80.05, -34.24, -16.36, -12.31, 1.92, -33.62, -14.1, -49.23, -7.39, 11.5, -9.98, 9.66, 29.6 ] ); // Same as before but with stride. let res = t.conv2d(&w, 0, 2, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 277.16f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 2)?, [ [ [9.29, -7.03, 0.94, 3.49, -7.71], [-1.8, -7.82, 8.9, 8.46, 7.43], [-25.84, 22.09, -19.27, -0.22, 1.69], [4.02, 18.53, -18.37, 2.3, -24.51], [7.72, -9.68, -12.34, 5.6, -20.22] ], [ [21.73, 3.39, -18.27, 3.86, -3.65], [8.25, 3.73, 30.73, -8.61, -11.93], [-72.15, -15.36, -17.53, -12.32, -1.61], [-22.32, -7.79, -91.82, 6.44, -37.69], [52.88, 14.44, 42.75, 9.88, 2.01] ], [ [-8.98, 9.91, 6.75, -4.68, 15.38], [4.93, -0.33, 9.94, -1.46, 14.78], [13.62, -30.63, 3.96, -3.58, -4.48], [-14.13, 1.19, -34.43, 3.08, -33.83], [17.28, 12.94, 31.83, -3.35, 6.81] ], [ [23.54, 6.98, -24.52, 0.52, 4.87], [9.65, 6.18, 1.71, -25.23, -4.93], [-54.99, -23.66, 3.19, -3.73, 18.58], [-21.35, -10.39, -39.88, 28.73, -30.76], [-9.13, 11.12, -14.0, -8.23, -11.25] ] ] ); assert_eq!( test_utils::to_vec3_round(&grad_w.i(0)?, 2)?, [ [ [28.34, -7.91, -45.75], [21.03, 3.86, 29.86], [0.72, -36.58, -35.28] ], [ [-16.04, 11.53, -16.38], [29.62, -16.32, -48.35], [57.5, 28.29, 25.81] ], [ [2.93, -19.6, 1.57], [27.15, 53.88, -24.64], [12.74, -22.6, -26.2] ], [ [-0.18, -14.86, -6.82], [-19.55, -2.72, 45.9], [-2.54, 36.97, 27.11] ] ] ); // Replicate the issue from https://github.com/huggingface/candle/issues/1212 let res = t.i((.., .., 0..4, 0..4))?.conv2d(&w, 0, 2, 1, 1)?; let loss = res.sqr()?.sum_all()?; assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 21.12f32); let grads = loss.backward()?; let grad_t = grads.get(&t).unwrap(); let grad_w = grads.get(&w).unwrap(); assert_eq!(grad_t.dims(), [1, 4, 5, 5]); assert_eq!(grad_w.dims(), [2, 4, 3, 3]); assert_eq!( test_utils::to_vec3_round(&grad_t.i(0)?, 2)?, [ [ [9.29, -7.03, 7.87, 0.0, 0.0], [-1.8, -7.82, 5.9, 0.0, 0.0], [-3.12, 4.49, 5.52, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [21.73, 3.39, 4.77, 0.0, 0.0], [8.25, 3.73, 27.61, 0.0, 0.0], [-20.55, -5.61, -2.77, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [-8.98, 9.91, -7.15, 0.0, 0.0], [4.93, -0.33, 4.56, 0.0, 0.0], [-6.7, -5.76, -8.05, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ], [ [23.54, 6.98, -10.0, 0.0, 0.0], [9.65, 6.18, 18.72, 0.0, 0.0], [3.29, -5.27, 0.79, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0] ] ] ); assert_eq!( test_utils::to_vec3_round(&grad_w.i(0)?, 2)?, [ [ [-3.47, 7.44, 0.66], [12.89, -3.4, -9.29], [-14.16, -0.83, 7.14] ], [ [-3.23, 5.37, -3.02], [-2.12, -11.24, 1.94], [6.97, 7.2, 2.99] ], [ [-4.04, -3.31, 4.87], [-6.68, -5.68, 1.73], [-5.54, 4.32, 0.52] ], [[-4.72, 1.5, 4.72], [3.79, 4.04, 6.76], [-4.6, 5.8, 6.93]] ] ); Ok(()) } test_device!(conv1d, conv1d_cpu, conv1d_gpu, conv1d_metal); test_device!( conv1d_small, conv1d_small_cpu, conv1d_small_gpu, conv1d_small_metal ); test_device!(conv2d, conv2d_cpu, conv2d_gpu, conv2d_metal); test_device!( conv2d_non_square, conv2d_non_square_cpu, conv2d_non_square_gpu, conv2d_non_square_metal ); test_device!( conv2d_small, conv2d_small_cpu, conv2d_small_gpu, conv2d_small_metal ); test_device!( conv2d_smaller, conv2d_smaller_cpu, conv2d_smaller_gpu, conv2d_smaller_metal ); test_device!( conv2d_grad, conv2d_grad_cpu, conv2d_grad_gpu, conv2_grad_metal );
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/indexing_tests.rs
use anyhow::Result; use candle_core::{Device, IndexOp, Tensor}; #[test] fn integer_index() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(1)?; assert_eq!(result.dims(), &[3]); assert_eq!(result.to_vec1::<u32>()?, &[3, 4, 5]); let result = tensor.i((.., 2))?; assert_eq!(result.dims(), &[2]); assert_eq!(result.to_vec1::<u32>()?, &[2, 5]); Ok(()) } #[test] fn range_index() -> Result<()> { let dev = Device::Cpu; // RangeFull let tensor = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((2, 3))?; let result = tensor.i(..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Range let tensor = Tensor::arange(0u32, 4 * 3, &dev)?.reshape((4, 3))?; let result = tensor.i(1..3)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeFrom let result = tensor.i(2..)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[6, 7, 8], [9, 10, 11]]); // RangeTo let result = tensor.i(..2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // RangeInclusive let result = tensor.i(1..=2)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[3, 4, 5], [6, 7, 8]]); // RangeTo let result = tensor.i(..1)?; assert_eq!(result.dims(), &[1, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2]]); // RangeToInclusive let result = tensor.i(..=1)?; assert_eq!(result.dims(), &[2, 3]); assert_eq!(result.to_vec2::<u32>()?, &[[0, 1, 2], [3, 4, 5]]); // Empty range let result = tensor.i(1..1)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); // Similar to PyTorch, allow empty ranges when the computed length is negative. #[allow(clippy::reversed_empty_ranges)] let result = tensor.i(1..0)?; assert_eq!(result.dims(), &[0, 3]); let empty: [[u32; 3]; 0] = []; assert_eq!(result.to_vec2::<u32>()?, &empty); Ok(()) } #[test] fn index_3d() -> Result<()> { let tensor = Tensor::from_iter(0..24u32, &Device::Cpu)?.reshape((2, 3, 4))?; assert_eq!(tensor.i((0, 0, 0))?.to_scalar::<u32>()?, 0); assert_eq!(tensor.i((1, 0, 0))?.to_scalar::<u32>()?, 12); assert_eq!(tensor.i((0, 1, 0))?.to_scalar::<u32>()?, 4); assert_eq!(tensor.i((0, 1, 3))?.to_scalar::<u32>()?, 7); assert_eq!(tensor.i((0..2, 0, 0))?.to_vec1::<u32>()?, &[0, 12]); assert_eq!( tensor.i((0..2, .., 0))?.to_vec2::<u32>()?, &[[0, 4, 8], [12, 16, 20]] ); assert_eq!( tensor.i((..2, .., 3))?.to_vec2::<u32>()?, &[[3, 7, 11], [15, 19, 23]] ); assert_eq!(tensor.i((1, .., 3))?.to_vec1::<u32>()?, &[15, 19, 23]); Ok(()) } #[test] fn slice_assign() -> Result<()> { let dev = Device::Cpu; let tensor = Tensor::arange(0u32, 4 * 5, &dev)?.reshape((4, 5))?; let src = Tensor::arange(0u32, 2 * 3, &dev)?.reshape((3, 2))?; let out = tensor.slice_assign(&[1..4, 3..5], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [5, 6, 7, 0, 1], [10, 11, 12, 2, 3], [15, 16, 17, 4, 5] ] ); let out = tensor.slice_assign(&[0..3, 0..2], &src)?; assert_eq!( out.to_vec2::<u32>()?, &[ [0, 1, 2, 3, 4], [2, 3, 7, 8, 9], [4, 5, 12, 13, 14], [15, 16, 17, 18, 19] ] ); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/serialization_tests.rs
use candle_core::{DType, Result, Tensor}; #[test] fn npy() -> Result<()> { let npy = Tensor::read_npy("tests/test.npy")?; assert_eq!( npy.to_dtype(DType::U8)?.to_vec1::<u8>()?, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] ); Ok(()) } #[test] fn npz() -> Result<()> { let npz = Tensor::read_npz("tests/test.npz")?; assert_eq!(npz.len(), 2); assert_eq!(npz[0].0, "x"); assert_eq!(npz[1].0, "x_plus_one"); assert_eq!( npz[1].1.to_dtype(DType::U8)?.to_vec1::<u8>()?, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] ); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/display_tests.rs
use anyhow::Result; use candle_core::{DType, Device::Cpu, Tensor}; #[test] fn display_scalar() -> Result<()> { let t = Tensor::new(1234u32, &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[1234]\nTensor[[], u32]"); let t = t.to_dtype(DType::F32)?.neg()?; let s = format!("{}", (&t / 10.0)?); assert_eq!(&s, "[-123.4000]\nTensor[[], f32]"); let s = format!("{}", (&t / 1e8)?); assert_eq!(&s, "[-1.2340e-5]\nTensor[[], f32]"); let s = format!("{}", (&t * 1e8)?); assert_eq!(&s, "[-1.2340e11]\nTensor[[], f32]"); let s = format!("{}", (&t * 0.)?); assert_eq!(&s, "[0.]\nTensor[[], f32]"); Ok(()) } #[test] fn display_vector() -> Result<()> { let t = Tensor::new::<&[u32; 0]>(&[], &Cpu)?; let s = format!("{t}"); assert_eq!(&s, "[]\nTensor[[0], u32]"); let t = Tensor::new(&[0.1234567, 1.0, -1.2, 4.1, f64::NAN], &Cpu)?; let s = format!("{t}"); assert_eq!( &s, "[ 0.1235, 1.0000, -1.2000, 4.1000, NaN]\nTensor[[5], f64]" ); let t = (Tensor::ones(50, DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.] Tensor[[50], f32]"#; assert_eq!(&s, expected); let t = (Tensor::ones(11000, DType::F32, &Cpu)? * 42.)?; let s = format!("{t}"); assert_eq!( &s, "[42., 42., 42., ..., 42., 42., 42.]\nTensor[[11000], f32]" ); Ok(()) } #[test] fn display_multi_dim() -> Result<()> { let t = (Tensor::ones((200, 100), DType::F32, &Cpu)? * 42.)?; let s = format!("\n{t}"); let expected = r#" [[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]] Tensor[[200, 100], f32]"#; assert_eq!(&s, expected); let t = t.reshape(&[2, 1, 1, 100, 100])?; let t = format!("\n{t}"); let expected = r#" [[[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]], [[[[42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], ... [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.], [42., 42., 42., ..., 42., 42., 42.]]]]] Tensor[[2, 1, 1, 100, 100], f32]"#; assert_eq!(&t, expected); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/tests/layout_tests.rs
use candle::{test_device, Device, IndexOp, Result, Tensor}; use candle_core as candle; fn contiguous(device: &Device) -> Result<()> { let tensor = Tensor::arange(0u32, 24u32, device)?.reshape((2, 3, 4))?; assert_eq!( tensor.to_vec3::<u32>()?, &[ [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]], [[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]] ] ); assert_eq!( tensor.t()?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]], [[12, 16, 20], [13, 17, 21], [14, 18, 22], [15, 19, 23]] ] ); assert_eq!( tensor.transpose(0, 1)?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 1, 2, 3], [12, 13, 14, 15]], [[4, 5, 6, 7], [16, 17, 18, 19]], [[8, 9, 10, 11], [20, 21, 22, 23]] ] ); assert_eq!( tensor.transpose(0, 1)?.flatten_all()?.to_vec1::<u32>()?, &[0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23] ); assert_eq!( tensor .i(1..)? .transpose(0, 1)? .contiguous()? .to_vec3::<u32>()?, &[[[12, 13, 14, 15]], [[16, 17, 18, 19]], [[20, 21, 22, 23]]] ); assert_eq!( tensor.transpose(0, 2)?.contiguous()?.to_vec3::<u32>()?, &[ [[0, 12], [4, 16], [8, 20]], [[1, 13], [5, 17], [9, 21]], [[2, 14], [6, 18], [10, 22]], [[3, 15], [7, 19], [11, 23]] ] ); Ok(()) } test_device!(contiguous, contiguous_cpu, contiguous_gpu, contiguous_metal); #[test] fn strided_blocks() -> Result<()> { use candle::Device::Cpu; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 0); assert_eq!(len, 24); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 26u32, &Cpu)? .i(2..)? .reshape((2, 3, 4))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 2); assert_eq!(len, 24); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; let tensor = tensor.i(1)?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 12); assert_eq!(len, 12); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; let tensor = tensor.i((.., 1))?; match tensor.strided_blocks() { candle::StridedBlocks::SingleBlock { start_offset, len } => { assert_eq!(start_offset, 0); assert_eq!(len, 8); assert_eq!(tensor.to_vec2::<u32>()?, &[[4, 5, 6, 7], [16, 17, 18, 19]]); } candle::StridedBlocks::MultipleBlocks { .. } => { panic!("unexpected block structure") } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.t()?.strided_blocks() { candle::StridedBlocks::SingleBlock { .. } => { panic!("unexpected block structure") } candle::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { assert_eq!(block_len, 1); assert_eq!( block_start_index.collect::<Vec<_>>(), &[ 0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15, 19, 23 ] ) } }; let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?; match tensor.transpose(0, 1)?.strided_blocks() { candle::StridedBlocks::SingleBlock { .. } => { panic!("unexpected block structure") } candle::StridedBlocks::MultipleBlocks { block_start_index, block_len, } => { assert_eq!(block_len, 4); assert_eq!( block_start_index.collect::<Vec<_>>(), &[0, 12, 4, 16, 8, 20] ) } }; Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/cuda_sum_benchmark.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use std::str::FromStr; use anyhow::Result; use candle_core::{Device, Tensor}; fn cos_sin(n: usize, device: &Device) -> Result<Tensor> { let thetas: Vec<_> = (0..n).map(|i| (i as f32 / n as f32)).collect(); let xs: Vec<_> = thetas.iter().map(|t| t.cos().abs()).collect(); let ys: Vec<_> = thetas.iter().map(|t| t.sin().abs()).collect(); let xs = Tensor::from_vec(xs, (n, 1), device)?; let ys = Tensor::from_vec(ys, (1, n), device)?; let ys = Tensor::cat(&[&ys, &ys, &ys, &ys, &ys, &ys], 1)?; Ok(xs.matmul(&ys)?) } fn main() -> Result<()> { let device = Device::new_cuda(0)?; let args = std::env::args().collect::<Vec<String>>(); let n = if args.len() < 2 { 2000usize } else { usize::from_str(&args[1])? }; let xys_cpu = cos_sin(n, &Device::Cpu)?; let xys = cos_sin(n, &device)?; println!("{xys_cpu:?} {xys:?}"); let sum_keepdim_cpu = xys_cpu.sum_keepdim(1)?; println!("{sum_keepdim_cpu}"); let sum_keepdim = xys.sum_keepdim(1)?; println!("{sum_keepdim}"); let start = std::time::Instant::now(); let n_iters = 100; let mut v = 0f32; for _i in 0..n_iters { let sum_keepdim = xys.sum_keepdim(1)?; let sum_keepdim = sum_keepdim.sum_keepdim(0)?; let sum_keepdim: f32 = sum_keepdim.reshape(&[])?.to_scalar()?; v += sum_keepdim; } let elapsed = start.elapsed(); if v > 0. { println!( "ran {n_iters} iterations, time per iter: {:?} ({v})", elapsed.div_f64(n_iters as f64) ); } Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/cuda_basics.rs
#[cfg(feature = "accelerate")] extern crate accelerate_src; #[cfg(feature = "mkl")] extern crate intel_mkl_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { let device = Device::new_cuda(0)?; let in_t = Tensor::rand(-1f32, 1f32, (1, 3, 12, 7), &device)?; let k_t = Tensor::rand(-1f32, 1f32, (6, 3, 1, 1), &device)?; let out_t = in_t.conv2d(&k_t, 0, 1, 1, 1)?; println!("{out_t}"); let in_t = in_t.to_device(&Device::Cpu)?; let k_t = k_t.to_device(&Device::Cpu)?; let out_t2 = in_t.conv2d(&k_t, 0, 1, 1, 1)?; let diff = (out_t.to_device(&Device::Cpu)? - out_t2)? .sqr()? .sum_all()?; println!("{diff}"); let t = Tensor::randn(0f32, 1f32, (2, 4, 96, 96), &device)?; let w = Tensor::randn(0f32, 1f32, (320, 4, 3, 3), &device)?; let res = t.conv2d(&w, 1, 1, 1, 1)?; println!("{res:?}"); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/basics.rs
#[cfg(feature = "mkl")] extern crate intel_mkl_src; #[cfg(feature = "accelerate")] extern crate accelerate_src; use anyhow::Result; use candle_core::{Device, Tensor}; fn main() -> Result<()> { let a = Tensor::new(&[[0.0f32, 1.0, 2.0], [3.0, 4.0, 5.0]], &Device::Cpu)?; let b = Tensor::new(&[[88.0f32, 99.0]], &Device::Cpu)?; let new_a = a.slice_scatter(&b, 1, 2)?; assert_eq!(a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); assert_eq!(new_a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]); Ok(()) }
0
hf_public_repos/candle/candle-core
hf_public_repos/candle/candle-core/examples/tensor-tools.rs
use candle_core::quantized::{gguf_file, k_quants, QTensor}; use candle_core::{Device, Result, Tensor}; use clap::{Parser, Subcommand, ValueEnum}; use rayon::prelude::*; #[derive(ValueEnum, Debug, Clone)] enum QuantizationMode { /// The default quantization includes all 2d tensors, except the output tensor which always /// uses Q6_K. Llama, } impl QuantizationMode { fn quantize( &self, name: &str, tensor: QTensor, default: fn(&Tensor) -> Result<QTensor>, ) -> Result<QTensor> { match self { Self::Llama => { // Same behavior as the llama.cpp quantization. let should_quantize = name.ends_with(".weight") && tensor.rank() == 2; if should_quantize { let tensor = tensor.dequantize(&Device::Cpu)?; if name == "output.weight" { QTensor::quantize::<k_quants::BlockQ6K>(&tensor) } else { default(&tensor) } } else { Ok(tensor) } } } } } #[derive(ValueEnum, Debug, Clone)] enum Quantization { #[value(name = "q4_0")] Q4_0, #[value(name = "q4_1")] Q4_1, #[value(name = "q5_0")] Q5_0, #[value(name = "q5_1")] Q5_1, #[value(name = "q8_0")] Q8_0, #[value(name = "q8_1")] Q8_1, Q2k, Q3k, Q4k, Q5k, Q6k, Q8k, F16, F32, } #[derive(ValueEnum, Debug, Clone)] enum Format { Safetensors, Npz, Ggml, Gguf, Pth, Pickle, } impl Format { fn infer<P: AsRef<std::path::Path>>(p: P) -> Option<Self> { p.as_ref() .extension() .and_then(|e| e.to_str()) .and_then(|e| match e { // We don't infer any format for .bin as it can be used for ggml/gguf or pytorch. "safetensors" | "safetensor" => Some(Self::Safetensors), "npz" => Some(Self::Npz), "pth" | "pt" => Some(Self::Pth), "ggml" => Some(Self::Ggml), "gguf" => Some(Self::Gguf), _ => None, }) } } #[derive(Subcommand, Debug, Clone)] enum Command { Ls { files: Vec<std::path::PathBuf>, /// The file format to use, if unspecified infer from the file extension. #[arg(long, value_enum)] format: Option<Format>, /// Enable verbose mode. #[arg(short, long)] verbose: bool, }, Quantize { /// The input file, in gguf format. in_file: Vec<std::path::PathBuf>, /// The output file, in gguf format. #[arg(long)] out_file: std::path::PathBuf, /// The quantization schema to apply. #[arg(long, value_enum)] quantization: Quantization, /// Which tensor to quantize. #[arg(long, value_enum, default_value_t = QuantizationMode::Llama)] mode: QuantizationMode, }, } #[derive(Parser, Debug, Clone)] struct Args { #[command(subcommand)] command: Command, } fn run_ls(file: &std::path::PathBuf, format: Option<Format>, verbose: bool) -> Result<()> { let format = match format { Some(format) => format, None => match Format::infer(file) { Some(format) => format, None => { println!( "{file:?}: cannot infer format from file extension, use the --format flag" ); return Ok(()); } }, }; match format { Format::Npz => { let tensors = candle_core::npy::NpzTensors::new(file)?; let mut names = tensors.names(); names.sort(); for name in names { let shape_dtype = match tensors.get_shape_and_dtype(name) { Ok((shape, dtype)) => format!("[{shape:?}; {dtype:?}]"), Err(err) => err.to_string(), }; println!("{name}: {shape_dtype}") } } Format::Safetensors => { let tensors = unsafe { candle_core::safetensors::MmapedSafetensors::new(file)? }; let mut tensors = tensors.tensors(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, view) in tensors.iter() { let dtype = view.dtype(); let dtype = match candle_core::DType::try_from(dtype) { Ok(dtype) => format!("{dtype:?}"), Err(_) => format!("{dtype:?}"), }; let shape = view.shape(); println!("{name}: [{shape:?}; {dtype}]") } } Format::Pth => { let mut tensors = candle_core::pickle::read_pth_tensor_info(file, verbose)?; tensors.sort_by(|a, b| a.name.cmp(&b.name)); for tensor_info in tensors.iter() { println!( "{}: [{:?}; {:?}]", tensor_info.name, tensor_info.layout.shape(), tensor_info.dtype, ); if verbose { println!(" {:?}", tensor_info); } } } Format::Pickle => { let file = std::fs::File::open(file)?; let mut reader = std::io::BufReader::new(file); let mut stack = candle_core::pickle::Stack::empty(); stack.read_loop(&mut reader)?; for (i, obj) in stack.stack().iter().enumerate() { println!("{i} {obj:?}"); } } Format::Ggml => { let mut file = std::fs::File::open(file)?; let content = candle_core::quantized::ggml_file::Content::read(&mut file)?; let mut tensors = content.tensors.into_iter().collect::<Vec<_>>(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, qtensor) in tensors.iter() { println!("{name}: [{:?}; {:?}]", qtensor.shape(), qtensor.dtype()); } } Format::Gguf => { let mut file = std::fs::File::open(file)?; let content = gguf_file::Content::read(&mut file)?; if verbose { let mut metadata = content.metadata.into_iter().collect::<Vec<_>>(); metadata.sort_by(|a, b| a.0.cmp(&b.0)); println!("metadata entries ({})", metadata.len()); for (key, value) in metadata.iter() { println!(" {key}: {value:?}"); } } let mut tensors = content.tensor_infos.into_iter().collect::<Vec<_>>(); tensors.sort_by(|a, b| a.0.cmp(&b.0)); for (name, info) in tensors.iter() { println!("{name}: [{:?}; {:?}]", info.shape, info.ggml_dtype); } } } Ok(()) } fn run_quantize_safetensors( in_files: &[std::path::PathBuf], out_file: std::path::PathBuf, q: Quantization, ) -> Result<()> { let mut out_file = std::fs::File::create(out_file)?; let mut tensors = std::collections::HashMap::new(); for in_file in in_files.iter() { let in_tensors = candle_core::safetensors::load(in_file, &Device::Cpu)?; tensors.extend(in_tensors) } println!("tensors: {}", tensors.len()); let quantize_fn = match q { Quantization::Q4_0 => QTensor::quantize::<k_quants::BlockQ4_0>, Quantization::Q4_1 => QTensor::quantize::<k_quants::BlockQ4_1>, Quantization::Q5_0 => QTensor::quantize::<k_quants::BlockQ5_0>, Quantization::Q5_1 => QTensor::quantize::<k_quants::BlockQ5_1>, Quantization::Q8_0 => QTensor::quantize::<k_quants::BlockQ8_0>, Quantization::Q8_1 => QTensor::quantize::<k_quants::BlockQ8_1>, Quantization::Q2k => QTensor::quantize::<k_quants::BlockQ2K>, Quantization::Q3k => QTensor::quantize::<k_quants::BlockQ3K>, Quantization::Q4k => QTensor::quantize::<k_quants::BlockQ4K>, Quantization::Q5k => QTensor::quantize::<k_quants::BlockQ5K>, Quantization::Q6k => QTensor::quantize::<k_quants::BlockQ6K>, Quantization::Q8k => QTensor::quantize::<k_quants::BlockQ8K>, Quantization::F16 => QTensor::quantize::<half::f16>, Quantization::F32 => QTensor::quantize::<f32>, }; let block_size = match q { Quantization::Q4_0 => k_quants::QK4_0, Quantization::Q4_1 => k_quants::QK4_1, Quantization::Q5_0 => k_quants::QK5_0, Quantization::Q5_1 => k_quants::QK5_1, Quantization::Q8_0 => k_quants::QK8_0, Quantization::Q8_1 => k_quants::QK8_1, Quantization::Q2k | Quantization::Q3k | Quantization::Q4k | Quantization::Q5k | Quantization::Q6k | Quantization::Q8k => k_quants::QK_K, Quantization::F16 | Quantization::F32 => 1, }; let qtensors = tensors .into_par_iter() .map(|(name, tensor)| { let should_quantize = tensor.rank() == 2 && tensor.dim(1)? % block_size == 0; println!(" quantizing {name} {tensor:?} {should_quantize}"); let tensor = if should_quantize { quantize_fn(&tensor)? } else { QTensor::quantize::<f32>(&tensor)? }; Ok((name, tensor)) }) .collect::<Result<Vec<_>>>()?; let qtensors = qtensors .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); gguf_file::write(&mut out_file, &[], &qtensors)?; Ok(()) } fn run_quantize( in_files: &[std::path::PathBuf], out_file: std::path::PathBuf, q: Quantization, qmode: QuantizationMode, ) -> Result<()> { if in_files.is_empty() { candle_core::bail!("no specified input files") } if let Some(extension) = out_file.extension() { if extension == "safetensors" { candle_core::bail!("the generated file cannot use the safetensors extension") } } if let Some(extension) = in_files[0].extension() { if extension == "safetensors" { return run_quantize_safetensors(in_files, out_file, q); } } if in_files.len() != 1 { candle_core::bail!("only a single in-file can be used when quantizing gguf files") } // Open the out file early so as to fail directly on missing directories etc. let mut out_file = std::fs::File::create(out_file)?; let mut in_ = std::fs::File::open(&in_files[0])?; let content = gguf_file::Content::read(&mut in_)?; println!("tensors: {}", content.tensor_infos.len()); let quantize_fn = match q { Quantization::Q4_0 => QTensor::quantize::<k_quants::BlockQ4_0>, Quantization::Q4_1 => QTensor::quantize::<k_quants::BlockQ4_1>, Quantization::Q5_0 => QTensor::quantize::<k_quants::BlockQ5_0>, Quantization::Q5_1 => QTensor::quantize::<k_quants::BlockQ5_1>, Quantization::Q8_0 => QTensor::quantize::<k_quants::BlockQ8_0>, Quantization::Q8_1 => QTensor::quantize::<k_quants::BlockQ8_1>, Quantization::Q2k => QTensor::quantize::<k_quants::BlockQ2K>, Quantization::Q3k => QTensor::quantize::<k_quants::BlockQ3K>, Quantization::Q4k => QTensor::quantize::<k_quants::BlockQ4K>, Quantization::Q5k => QTensor::quantize::<k_quants::BlockQ5K>, Quantization::Q6k => QTensor::quantize::<k_quants::BlockQ6K>, Quantization::Q8k => QTensor::quantize::<k_quants::BlockQ8K>, Quantization::F16 => QTensor::quantize::<half::f16>, Quantization::F32 => QTensor::quantize::<f32>, }; let qtensors = content .tensor_infos .par_iter() .map(|(name, _)| { println!(" quantizing {name}"); let mut in_file = std::fs::File::open(&in_files[0])?; let tensor = content.tensor(&mut in_file, name)?; let tensor = qmode.quantize(name, tensor, quantize_fn)?; Ok((name, tensor)) }) .collect::<Result<Vec<_>>>()?; let qtensors = qtensors .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); let metadata = content .metadata .iter() .map(|(k, v)| (k.as_str(), v)) .collect::<Vec<_>>(); gguf_file::write(&mut out_file, metadata.as_slice(), &qtensors)?; Ok(()) } fn main() -> anyhow::Result<()> { let args = Args::parse(); match args.command { Command::Ls { files, format, verbose, } => { let multiple_files = files.len() > 1; for file in files.iter() { if multiple_files { println!("--- {file:?} ---"); } run_ls(file, format.clone(), verbose)? } } Command::Quantize { in_file, out_file, quantization, mode, } => run_quantize(&in_file, out_file, quantization, mode)?, } Ok(()) }
0
hf_public_repos/candle
hf_public_repos/candle/candle-book/book.toml
[book] authors = ["Nicolas Patry"] language = "en" multilingual = false src = "src" title = "Candle Documentation"
0
hf_public_repos/candle
hf_public_repos/candle/candle-book/Cargo.toml
[package] name = "candle-book" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true readme = "README.md" [dependencies] accelerate-src = { workspace = true, optional = true } candle = { path = "../candle-core", version = "0.3.1", package = "candle-core" } candle-datasets = { path = "../candle-datasets", version = "0.3.1" } candle-nn = { path = "../candle-nn", version = "0.3.1" } candle-transformers = { path = "../candle-transformers", version = "0.3.1" } candle-flash-attn = { path = "../candle-flash-attn", version = "0.3.1", optional = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } num-traits = { workspace = true } intel-mkl-src = { workspace = true, optional = true } cudarc = { workspace = true, optional = true } half = { workspace = true, optional = true } image = { workspace = true, optional = true } anyhow = { workspace = true } tokio = "1.29.1" [dev-dependencies] byteorder = { workspace = true } hf-hub = { workspace = true, features=["tokio"]} clap = { workspace = true } memmap2 = { workspace = true } rand = { workspace = true } tokenizers = { workspace = true, features = ["onig"] } tracing = { workspace = true } tracing-chrome = { workspace = true } tracing-subscriber = { workspace = true } wav = { workspace = true } # Necessary to disambiguate with tokio in wasm examples which are 1.28.1 parquet = { workspace = true } image = { workspace = true } [build-dependencies] anyhow = { workspace = true } [features] default = []
0
hf_public_repos/candle/candle-book
hf_public_repos/candle/candle-book/src/README.md
# Introduction {{#include ../../README.md:features}} This book will introduce step by step how to use `candle`.
0
hf_public_repos/candle/candle-book
hf_public_repos/candle/candle-book/src/error_manage.md
# Error management You might have seen in the code base a lot of `.unwrap()` or `?`. If you're unfamiliar with Rust check out the [Rust book](https://doc.rust-lang.org/book/ch09-02-recoverable-errors-with-result.html) for more information. What's important to know though, is that if you want to know *where* a particular operation failed You can simply use `RUST_BACKTRACE=1` to get the location of where the model actually failed. Let's see on failing code: ```rust,ignore let x = Tensor::zeros((1, 784), DType::F32, &device)?; let y = Tensor::zeros((1, 784), DType::F32, &device)?; let z = x.matmul(&y)?; ``` Will print at runtime: ```bash Error: ShapeMismatchBinaryOp { lhs: [1, 784], rhs: [1, 784], op: "matmul" } ``` After adding `RUST_BACKTRACE=1`: ```bash Error: WithBacktrace { inner: ShapeMismatchBinaryOp { lhs: [1, 784], rhs: [1, 784], op: "matmul" }, backtrace: Backtrace [{ fn: "candle::error::Error::bt", file: "/home/nicolas/.cargo/git/checkouts/candle-5bb8ef7e0626d693/f291065/candle-core/src/error.rs", line: 200 }, { fn: "candle::tensor::Tensor::matmul", file: "/home/nicolas/.cargo/git/checkouts/candle-5bb8ef7e0626d693/f291065/candle-core/src/tensor.rs", line: 816 }, { fn: "myapp::main", file: "./src/main.rs", line: 29 }, { fn: "core::ops::function::FnOnce::call_once", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/core/src/ops/function.rs", line: 250 }, { fn: "std::sys_common::backtrace::__rust_begin_short_backtrace", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/sys_common/backtrace.rs", line: 135 }, { fn: "std::rt::lang_start::{{closure}}", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/rt.rs", line: 166 }, { fn: "core::ops::function::impls::<impl core::ops::function::FnOnce<A> for &F>::call_once", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/core/src/ops/function.rs", line: 284 }, { fn: "std::panicking::try::do_call", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/panicking.rs", line: 500 }, { fn: "std::panicking::try", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/panicking.rs", line: 464 }, { fn: "std::panic::catch_unwind", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/panic.rs", line: 142 }, { fn: "std::rt::lang_start_internal::{{closure}}", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/rt.rs", line: 148 }, { fn: "std::panicking::try::do_call", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/panicking.rs", line: 500 }, { fn: "std::panicking::try", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/panicking.rs", line: 464 }, { fn: "std::panic::catch_unwind", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/panic.rs", line: 142 }, { fn: "std::rt::lang_start_internal", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/rt.rs", line: 148 }, { fn: "std::rt::lang_start", file: "/rustc/8ede3aae28fe6e4d52b38157d7bfe0d3bceef225/library/std/src/rt.rs", line: 165 }, { fn: "main" }, { fn: "__libc_start_main" }, { fn: "_start" }] } ``` Not super pretty at the moment, but we can see error occurred on `{ fn: "myapp::main", file: "./src/main.rs", line: 29 }` Another thing to note, is that since Rust is compiled it is not necessarily as easy to recover proper stacktraces especially in release builds. We're using [`anyhow`](https://docs.rs/anyhow/latest/anyhow/) for that. The library is still young, please [report](https://github.com/LaurentMazare/candle/issues) any issues detecting where an error is coming from. ## Cuda error management When running a model on Cuda, you might get a stacktrace not really representing the error. The reason is that CUDA is async by nature, and therefore the error might be caught while you were sending totally different kernels. One way to avoid this is to use `CUDA_LAUNCH_BLOCKING=1` as an environment variable. This will force every kernel to be launched sequentially. You might still however see the error happening on other kernels as the faulty kernel might exit without an error but spoiling some pointer for which the error will happen when dropping the `CudaSlice` only. If this occurs, you can use [`compute-sanitizer`](https://docs.nvidia.com/compute-sanitizer/ComputeSanitizer/index.html) This tool is like `valgrind` but for cuda. It will help locate the errors in the kernels.
0
hf_public_repos/candle/candle-book
hf_public_repos/candle/candle-book/src/simplified.rs
//! #A simplified example in Rust of training a neural network and then using it based on the Candle Framework by Hugging Face. //! Author: Evgeny Igumnov 2023 igumnovnsk@gmail.com //! This program implements a neural network to predict the winner of the second round of elections based on the results of the first round. //! //! ##Basic moments: //! //! A multilayer perceptron with two hidden layers is used. The first hidden layer has 4 neurons, the second has 2 neurons. //! The input is a vector of 2 numbers - the percentage of votes for the first and second candidates in the first stage. //! The output is the number 0 or 1, where 1 means that the first candidate will win in the second stage, 0 means that he will lose. //! For training, samples with real data on the results of the first and second stages of different elections are used. //! The model is trained by backpropagation using gradient descent and the cross-entropy loss function. //! Model parameters (weights of neurons) are initialized randomly, then optimized during training. //! After training, the model is tested on a deferred sample to evaluate the accuracy. //! If the accuracy on the test set is below 100%, the model is considered underfit and the learning process is repeated. //! Thus, this neural network learns to find hidden relationships between the results of the first and second rounds of voting in order to make predictions for new data. #[rustfmt::skip] mod tests { use candle::{DType, Result, Tensor, D, Device}; use candle_nn::{loss, ops, Linear, Module, VarBuilder, VarMap, Optimizer}; // ANCHOR: book_training_simplified1 const VOTE_DIM: usize = 2; const RESULTS: usize = 1; const EPOCHS: usize = 10; const LAYER1_OUT_SIZE: usize = 4; const LAYER2_OUT_SIZE: usize = 2; const LEARNING_RATE: f64 = 0.05; #[derive(Clone)] pub struct Dataset { pub train_votes: Tensor, pub train_results: Tensor, pub test_votes: Tensor, pub test_results: Tensor, } struct MultiLevelPerceptron { ln1: Linear, ln2: Linear, ln3: Linear, } impl MultiLevelPerceptron { fn new(vs: VarBuilder) -> Result<Self> { let ln1 = candle_nn::linear(VOTE_DIM, LAYER1_OUT_SIZE, vs.pp("ln1"))?; let ln2 = candle_nn::linear(LAYER1_OUT_SIZE, LAYER2_OUT_SIZE, vs.pp("ln2"))?; let ln3 = candle_nn::linear(LAYER2_OUT_SIZE, RESULTS + 1, vs.pp("ln3"))?; Ok(Self { ln1, ln2, ln3 }) } fn forward(&self, xs: &Tensor) -> Result<Tensor> { let xs = self.ln1.forward(xs)?; let xs = xs.relu()?; let xs = self.ln2.forward(&xs)?; let xs = xs.relu()?; self.ln3.forward(&xs) } } // ANCHOR_END: book_training_simplified1 // ANCHOR: book_training_simplified3 #[tokio::test] async fn simplified() -> anyhow::Result<()> { let dev = Device::cuda_if_available(0)?; let train_votes_vec: Vec<u32> = vec![ 15, 10, 10, 15, 5, 12, 30, 20, 16, 12, 13, 25, 6, 14, 31, 21, ]; let train_votes_tensor = Tensor::from_vec(train_votes_vec.clone(), (train_votes_vec.len() / VOTE_DIM, VOTE_DIM), &dev)?.to_dtype(DType::F32)?; let train_results_vec: Vec<u32> = vec![ 1, 0, 0, 1, 1, 0, 0, 1, ]; let train_results_tensor = Tensor::from_vec(train_results_vec, train_votes_vec.len() / VOTE_DIM, &dev)?; let test_votes_vec: Vec<u32> = vec![ 13, 9, 8, 14, 3, 10, ]; let test_votes_tensor = Tensor::from_vec(test_votes_vec.clone(), (test_votes_vec.len() / VOTE_DIM, VOTE_DIM), &dev)?.to_dtype(DType::F32)?; let test_results_vec: Vec<u32> = vec![ 1, 0, 0, ]; let test_results_tensor = Tensor::from_vec(test_results_vec.clone(), test_results_vec.len(), &dev)?; let m = Dataset { train_votes: train_votes_tensor, train_results: train_results_tensor, test_votes: test_votes_tensor, test_results: test_results_tensor, }; let trained_model: MultiLevelPerceptron; loop { println!("Trying to train neural network."); match train(m.clone(), &dev) { Ok(model) => { trained_model = model; break; }, Err(e) => { println!("Error: {}", e); continue; } } } let real_world_votes: Vec<u32> = vec![ 13, 22, ]; let tensor_test_votes = Tensor::from_vec(real_world_votes.clone(), (1, VOTE_DIM), &dev)?.to_dtype(DType::F32)?; let final_result = trained_model.forward(&tensor_test_votes)?; let result = final_result .argmax(D::Minus1)? .to_dtype(DType::F32)? .get(0).map(|x| x.to_scalar::<f32>())??; println!("real_life_votes: {:?}", real_world_votes); println!("neural_network_prediction_result: {:?}", result); Ok(()) } // ANCHOR_END: book_training_simplified3 // ANCHOR: book_training_simplified2 fn train(m: Dataset, dev: &Device) -> anyhow::Result<MultiLevelPerceptron> { let train_results = m.train_results.to_device(dev)?; let train_votes = m.train_votes.to_device(dev)?; let varmap = VarMap::new(); let vs = VarBuilder::from_varmap(&varmap, DType::F32, dev); let model = MultiLevelPerceptron::new(vs.clone())?; let mut sgd = candle_nn::SGD::new(varmap.all_vars(), LEARNING_RATE)?; let test_votes = m.test_votes.to_device(dev)?; let test_results = m.test_results.to_device(dev)?; let mut final_accuracy: f32 = 0.0; for epoch in 1..EPOCHS + 1 { let logits = model.forward(&train_votes)?; let log_sm = ops::log_softmax(&logits, D::Minus1)?; let loss = loss::nll(&log_sm, &train_results)?; sgd.backward_step(&loss)?; let test_logits = model.forward(&test_votes)?; let sum_ok = test_logits .argmax(D::Minus1)? .eq(&test_results)? .to_dtype(DType::F32)? .sum_all()? .to_scalar::<f32>()?; let test_accuracy = sum_ok / test_results.dims1()? as f32; final_accuracy = 100. * test_accuracy; println!("Epoch: {epoch:3} Train loss: {:8.5} Test accuracy: {:5.2}%", loss.to_scalar::<f32>()?, final_accuracy ); if final_accuracy == 100.0 { break; } } if final_accuracy < 100.0 { Err(anyhow::Error::msg("The model is not trained well enough.")) } else { Ok(model) } } // ANCHOR_END: book_training_simplified2 }
0
hf_public_repos/candle/candle-book
hf_public_repos/candle/candle-book/src/lib.rs
#[cfg(test)] pub mod simplified; #[cfg(test)] mod tests { use anyhow::Result; use candle::{DType, Device, Tensor}; use parquet::file::reader::SerializedFileReader; // NOTE: Waiting on https://github.com/rust-lang/mdBook/pull/1856 #[rustfmt::skip] #[tokio::test] async fn book_hub_1() { // ANCHOR: book_hub_1 use candle::Device; use hf_hub::api::tokio::Api; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights_filename = repo.get("model.safetensors").await.unwrap(); let weights = candle::safetensors::load(weights_filename, &Device::Cpu).unwrap(); // ANCHOR_END: book_hub_1 assert_eq!(weights.len(), 206); } #[rustfmt::skip] #[test] fn book_hub_2() { // ANCHOR: book_hub_2 use candle::Device; use hf_hub::api::sync::Api; use memmap2::Mmap; use std::fs; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights_filename = repo.get("model.safetensors").unwrap(); let file = fs::File::open(weights_filename).unwrap(); let mmap = unsafe { Mmap::map(&file).unwrap() }; let weights = candle::safetensors::load_buffer(&mmap[..], &Device::Cpu).unwrap(); // ANCHOR_END: book_hub_2 assert_eq!(weights.len(), 206); } #[rustfmt::skip] #[test] fn book_hub_3() { // ANCHOR: book_hub_3 use candle::{DType, Device, Tensor}; use hf_hub::api::sync::Api; use memmap2::Mmap; use safetensors::slice::IndexOp; use safetensors::SafeTensors; use std::fs; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights_filename = repo.get("model.safetensors").unwrap(); let file = fs::File::open(weights_filename).unwrap(); let mmap = unsafe { Mmap::map(&file).unwrap() }; // Use safetensors directly let tensors = SafeTensors::deserialize(&mmap[..]).unwrap(); let view = tensors .tensor("bert.encoder.layer.0.attention.self.query.weight") .unwrap(); // We're going to load shard with rank 1, within a world_size of 4 // We're going to split along dimension 0 doing VIEW[start..stop, :] let rank = 1; let world_size = 4; let dim = 0; let dtype = view.dtype(); let mut tp_shape = view.shape().to_vec(); let size = tp_shape[0]; if size % world_size != 0 { panic!("The dimension is not divisble by `world_size`"); } let block_size = size / world_size; let start = rank * block_size; let stop = (rank + 1) * block_size; // Everything is expressed in tensor dimension // bytes offsets is handled automatically for safetensors. let iterator = view.slice(start..stop).unwrap(); tp_shape[dim] = block_size; // Convert safetensors Dtype to candle DType let dtype: DType = dtype.try_into().unwrap(); // TODO: Implement from_buffer_iterator so we can skip the extra CPU alloc. let raw: Vec<u8> = iterator.into_iter().flatten().cloned().collect(); let tp_tensor = Tensor::from_raw_buffer(&raw, dtype, &tp_shape, &Device::Cpu).unwrap(); // ANCHOR_END: book_hub_3 assert_eq!(view.shape(), &[768, 768]); assert_eq!(tp_tensor.dims(), &[192, 768]); } #[rustfmt::skip] #[test] fn book_training_1() -> Result<()>{ // ANCHOR: book_training_1 use hf_hub::{api::sync::Api, Repo, RepoType}; let dataset_id = "mnist".to_string(); let api = Api::new()?; let repo = Repo::with_revision( dataset_id, RepoType::Dataset, "refs/convert/parquet".to_string(), ); let repo = api.repo(repo); let test_parquet_filename = repo.get("mnist/test/0000.parquet")?; let train_parquet_filename = repo.get("mnist/train/0000.parquet")?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)?; // ANCHOR_END: book_training_1 // Ignore unused let _train = train_parquet; // ANCHOR: book_training_2 for row in test_parquet { for (idx, (name, field)) in row?.get_column_iter().enumerate() { println!("Column id {idx}, name {name}, value {field}"); } } // ANCHOR_END: book_training_2 let test_parquet_filename = repo.get("mnist/test/0000.parquet")?; let train_parquet_filename = repo.get("mnist/train/0000.parquet")?; let test_parquet = SerializedFileReader::new(std::fs::File::open(test_parquet_filename)?)?; let train_parquet = SerializedFileReader::new(std::fs::File::open(train_parquet_filename)?)?; // ANCHOR: book_training_3 let test_samples = 10_000; let mut test_buffer_images: Vec<u8> = Vec::with_capacity(test_samples * 784); let mut test_buffer_labels: Vec<u8> = Vec::with_capacity(test_samples); for row in test_parquet{ for (_name, field) in row?.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); test_buffer_images.extend(image.to_luma8().as_raw()); } } }else if let parquet::record::Field::Long(label) = field { test_buffer_labels.push(*label as u8); } } } let test_images = (Tensor::from_vec(test_buffer_images, (test_samples, 784), &Device::Cpu)?.to_dtype(DType::F32)? / 255.)?; let test_labels = Tensor::from_vec(test_buffer_labels, (test_samples, ), &Device::Cpu)?; let train_samples = 60_000; let mut train_buffer_images: Vec<u8> = Vec::with_capacity(train_samples * 784); let mut train_buffer_labels: Vec<u8> = Vec::with_capacity(train_samples); for row in train_parquet{ for (_name, field) in row?.get_column_iter() { if let parquet::record::Field::Group(subrow) = field { for (_name, field) in subrow.get_column_iter() { if let parquet::record::Field::Bytes(value) = field { let image = image::load_from_memory(value.data()).unwrap(); train_buffer_images.extend(image.to_luma8().as_raw()); } } }else if let parquet::record::Field::Long(label) = field { train_buffer_labels.push(*label as u8); } } } let train_images = (Tensor::from_vec(train_buffer_images, (train_samples, 784), &Device::Cpu)?.to_dtype(DType::F32)? / 255.)?; let train_labels = Tensor::from_vec(train_buffer_labels, (train_samples, ), &Device::Cpu)?; let mnist = candle_datasets::vision::Dataset { train_images, train_labels, test_images, test_labels, labels: 10, }; // ANCHOR_END: book_training_3 assert_eq!(mnist.test_images.dims(), &[10_000, 784]); assert_eq!(mnist.test_labels.dims(), &[10_000]); assert_eq!(mnist.train_images.dims(), &[60_000, 784]); assert_eq!(mnist.train_labels.dims(), &[60_000]); Ok(()) } }
0
hf_public_repos/candle/candle-book
hf_public_repos/candle/candle-book/src/SUMMARY.md
# Summary [Introduction](README.md) # User Guide - [Installation](guide/installation.md) - [Hello World - MNIST](guide/hello_world.md) - [PyTorch cheatsheet](guide/cheatsheet.md) # Reference Guide - [Running a model](inference/inference.md) - [Using the hub](inference/hub.md) - [Error management](error_manage.md) - [Training](training/training.md) - [Simplified](training/simplified.md) - [MNIST](training/mnist.md) - [Fine-tuning]() - [Serialization]() - [Advanced Cuda usage]() - [Writing a custom kernel]() - [Porting a custom kernel]() - [Using MKL]() - [Creating apps]() - [Creating a WASM app]() - [Creating a REST api webserver]() - [Creating a desktop Tauri app]()
0
hf_public_repos/candle/candle-book
hf_public_repos/candle/candle-book/src/chapter_1.md
# Chapter 1
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/inference/inference.md
# Running a model In order to run an existing model, you will need to download and use existing weights. Most models are already available on https://huggingface.co/ in [`safetensors`](https://github.com/huggingface/safetensors) format. Let's get started by running an old model : `bert-base-uncased`.
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/inference/hub.md
# Using the hub Install the [`hf-hub`](https://github.com/huggingface/hf-hub) crate: ```bash cargo add hf-hub ``` Then let's start by downloading the [model file](https://huggingface.co/bert-base-uncased/tree/main). ```rust # extern crate candle_core; # extern crate hf_hub; use hf_hub::api::sync::Api; use candle_core::Device; let api = Api::new().unwrap(); let repo = api.model("bert-base-uncased".to_string()); let weights = repo.get("model.safetensors").unwrap(); let weights = candle_core::safetensors::load(weights, &Device::Cpu); ``` We now have access to all the [tensors](https://huggingface.co/bert-base-uncased?show_tensors=true) within the file. You can check all the names of the tensors [here](https://huggingface.co/bert-base-uncased?show_tensors=true) ## Using async `hf-hub` comes with an async API. ```bash cargo add hf-hub --features tokio ``` ```rust,ignore # This is tested directly in examples crate because it needs external dependencies unfortunately: # See [this](https://github.com/rust-lang/mdBook/issues/706) {{#include ../lib.rs:book_hub_1}} ``` ## Using in a real model. Now that we have our weights, we can use them in our bert architecture: ```rust # extern crate candle_core; # extern crate candle_nn; # extern crate hf_hub; # use hf_hub::api::sync::Api; # # let api = Api::new().unwrap(); # let repo = api.model("bert-base-uncased".to_string()); # # let weights = repo.get("model.safetensors").unwrap(); use candle_core::{Device, Tensor, DType}; use candle_nn::{Linear, Module}; let weights = candle_core::safetensors::load(weights, &Device::Cpu).unwrap(); let weight = weights.get("bert.encoder.layer.0.attention.self.query.weight").unwrap(); let bias = weights.get("bert.encoder.layer.0.attention.self.query.bias").unwrap(); let linear = Linear::new(weight.clone(), Some(bias.clone())); let input_ids = Tensor::zeros((3, 768), DType::F32, &Device::Cpu).unwrap(); let output = linear.forward(&input_ids).unwrap(); ``` For a full reference, you can check out the full [bert](https://github.com/LaurentMazare/candle/tree/main/candle-examples/examples/bert) example. ## Memory mapping For more efficient loading, instead of reading the file, you could use [`memmap2`](https://docs.rs/memmap2/latest/memmap2/) **Note**: Be careful about memory mapping it seems to cause issues on [Windows, WSL](https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/5893) and will definitely be slower on network mounted disk, because it will issue more read calls. ```rust,ignore {{#include ../lib.rs:book_hub_2}} ``` **Note**: This operation is **unsafe**. [See the safety notice](https://docs.rs/memmap2/latest/memmap2/struct.Mmap.html#safety). In practice model files should never be modified, and the mmaps should be mostly READONLY anyway, so the caveat most likely does not apply, but always keep it in mind. ## Tensor Parallel Sharding When using multiple GPUs to use in Tensor Parallel in order to get good latency, you can load only the part of the Tensor you need. For that you need to use [`safetensors`](https://crates.io/crates/safetensors) directly. ```bash cargo add safetensors ``` ```rust,ignore {{#include ../lib.rs:book_hub_3}} ```
0
hf_public_repos/candle/candle-book/src/inference
hf_public_repos/candle/candle-book/src/inference/cuda/README.md
# Advanced Cuda usage
0
hf_public_repos/candle/candle-book/src/inference
hf_public_repos/candle/candle-book/src/inference/cuda/porting.md
# Porting a custom kernel
0
hf_public_repos/candle/candle-book/src/inference
hf_public_repos/candle/candle-book/src/inference/cuda/writing.md
# Writing a custom kernel
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/apps/README.md
# Creating apps
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/apps/dekstop.md
# Creating a desktop Tauri app
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/apps/wasm.md
# Creating a WASM app
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/apps/rest.md
# Creating a REST api webserver
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/guide/installation.md
# Installation **With Cuda support**: 1. First, make sure that Cuda is correctly installed. - `nvcc --version` should print information about your Cuda compiler driver. - `nvidia-smi --query-gpu=compute_cap --format=csv` should print your GPUs compute capability, e.g. something like: ```bash compute_cap 8.9 ``` You can also compile the Cuda kernels for a specific compute cap using the `CUDA_COMPUTE_CAP=<compute cap>` environment variable. If any of the above commands errors out, please make sure to update your Cuda version. 2. Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) with Cuda support. Start by creating a new cargo: ```bash cargo new myapp cd myapp ``` Make sure to add the `candle-core` crate with the cuda feature: ```bash cargo add --git https://github.com/huggingface/candle.git candle-core --features "cuda" ``` Run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **Without Cuda support**: Create a new app and add [`candle-core`](https://github.com/huggingface/candle/tree/main/candle-core) as follows: ```bash cargo new myapp cd myapp cargo add --git https://github.com/huggingface/candle.git candle-core ``` Finally, run `cargo build` to make sure everything can be correctly built. ```bash cargo build ``` **With mkl support** You can also see the `mkl` feature which could be interesting to get faster inference on CPU. [Using mkl](./advanced/mkl.md)
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/guide/cheatsheet.md
# Pytorch cheatsheet {{#include ../../../README.md:cheatsheet}}
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/guide/hello_world.md
# Hello world! We will now create the hello world of the ML world, building a model capable of solving MNIST dataset. Open `src/main.rs` and fill in this content: ```rust # extern crate candle_core; use candle_core::{Device, Result, Tensor}; struct Model { first: Tensor, second: Tensor, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = image.matmul(&self.first)?; let x = x.relu()?; x.matmul(&self.second) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; let first = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let second = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Everything should now run with: ```bash cargo run --release ``` ## Using a `Linear` layer. Now that we have this, we might want to complexify things a bit, for instance by adding `bias` and creating the classical `Linear` layer. We can do as such ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; struct Linear{ weight: Tensor, bias: Tensor, } impl Linear{ fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = x.matmul(&self.weight)?; x.broadcast_add(&self.bias) } } struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } ``` This will change the model running code into a new function ```rust # extern crate candle_core; # use candle_core::{Device, Result, Tensor}; # struct Linear{ # weight: Tensor, # bias: Tensor, # } # impl Linear{ # fn forward(&self, x: &Tensor) -> Result<Tensor> { # let x = x.matmul(&self.weight)?; # x.broadcast_add(&self.bias) # } # } # # struct Model { # first: Linear, # second: Linear, # } # # impl Model { # fn forward(&self, image: &Tensor) -> Result<Tensor> { # let x = self.first.forward(image)?; # let x = x.relu()?; # self.second.forward(&x) # } # } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. // Use Device::Cpu; to use the CPU. let device = Device::cuda_if_available(0)?; // Creating a dummy model let weight = Tensor::randn(0f32, 1.0, (784, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear{weight, bias}; let weight = Tensor::randn(0f32, 1.0, (100, 10), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear{weight, bias}; let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; // Inference on the model let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Now it works, it is a great way to create your own layers. But most of the classical layers are already implemented in [candle-nn](https://github.com/huggingface/candle/tree/main/candle-nn). ## Using `candle_nn`. For instance [Linear](https://github.com/huggingface/candle/blob/main/candle-nn/src/linear.rs) is already there. This Linear is coded with PyTorch layout in mind, to reuse better existing models out there, so it uses the transpose of the weights and not the weights directly. So instead we can simplify our example: ```bash cargo add --git https://github.com/huggingface/candle.git candle-nn ``` And rewrite our examples using it ```rust # extern crate candle_core; # extern crate candle_nn; use candle_core::{Device, Result, Tensor}; use candle_nn::{Linear, Module}; struct Model { first: Linear, second: Linear, } impl Model { fn forward(&self, image: &Tensor) -> Result<Tensor> { let x = self.first.forward(image)?; let x = x.relu()?; self.second.forward(&x) } } fn main() -> Result<()> { // Use Device::new_cuda(0)?; to use the GPU. let device = Device::Cpu; // This has changed (784, 100) -> (100, 784) ! let weight = Tensor::randn(0f32, 1.0, (100, 784), &device)?; let bias = Tensor::randn(0f32, 1.0, (100, ), &device)?; let first = Linear::new(weight, Some(bias)); let weight = Tensor::randn(0f32, 1.0, (10, 100), &device)?; let bias = Tensor::randn(0f32, 1.0, (10, ), &device)?; let second = Linear::new(weight, Some(bias)); let model = Model { first, second }; let dummy_image = Tensor::randn(0f32, 1.0, (1, 784), &device)?; let digit = model.forward(&dummy_image)?; println!("Digit {digit:?} digit"); Ok(()) } ``` Feel free to modify this example to use `Conv2d` to create a classical convnet instead. Now that we have the running dummy code we can get to more advanced topics: - [For PyTorch users](../guide/cheatsheet.md) - [Running existing models](../inference/inference.md) - [Training models](../training/training.md)
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/training/finetuning.md
# Fine-tuning
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/training/mnist.md
# MNIST So we now have downloaded the MNIST parquet files, let's put them in a simple struct. ```rust,ignore {{#include ../lib.rs:book_training_3}} ``` The parsing of the file and putting it into single tensors requires the dataset to fit the entire memory. It is quite rudimentary, but simple enough for a small dataset like MNIST.
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/training/serialization.md
# Serialization
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/training/simplified.md
# Simplified ## How its works This program implements a neural network to predict the winner of the second round of elections based on the results of the first round. Basic moments: 1. A multilayer perceptron with two hidden layers is used. The first hidden layer has 4 neurons, the second has 2 neurons. 2. The input is a vector of 2 numbers - the percentage of votes for the first and second candidates in the first stage. 3. The output is the number 0 or 1, where 1 means that the first candidate will win in the second stage, 0 means that he will lose. 4. For training, samples with real data on the results of the first and second stages of different elections are used. 5. The model is trained by backpropagation using gradient descent and the cross-entropy loss function. 6. Model parameters (weights of neurons) are initialized randomly, then optimized during training. 7. After training, the model is tested on a deferred sample to evaluate the accuracy. 8. If the accuracy on the test set is below 100%, the model is considered underfit and the learning process is repeated. Thus, this neural network learns to find hidden relationships between the results of the first and second rounds of voting in order to make predictions for new data. ```rust,ignore {{#include ../simplified.rs:book_training_simplified1}} ``` ```rust,ignore {{#include ../simplified.rs:book_training_simplified2}} ``` ```rust,ignore {{#include ../simplified.rs:book_training_simplified3}} ``` ## Example output ```bash Trying to train neural network. Epoch: 1 Train loss: 4.42555 Test accuracy: 0.00% Epoch: 2 Train loss: 0.84677 Test accuracy: 33.33% Epoch: 3 Train loss: 2.54335 Test accuracy: 33.33% Epoch: 4 Train loss: 0.37806 Test accuracy: 33.33% Epoch: 5 Train loss: 0.36647 Test accuracy: 100.00% real_life_votes: [13, 22] neural_network_prediction_result: 0.0 ```
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/training/training.md
# Training Training starts with data. We're going to use the huggingface hub and start with the Hello world dataset of machine learning, MNIST. Let's start with downloading `MNIST` from [huggingface](https://huggingface.co/datasets/mnist). This requires [`hf-hub`](https://github.com/huggingface/hf-hub). ```bash cargo add hf-hub ``` This is going to be very hands-on for now. ```rust,ignore {{#include ../../../candle-examples/src/lib.rs:book_training_1}} ``` This uses the standardized `parquet` files from the `refs/convert/parquet` branch on every dataset. Our handles are now [`parquet::file::serialized_reader::SerializedFileReader`]. We can inspect the content of the files with: ```rust,ignore {{#include ../../../candle-examples/src/lib.rs:book_training_2}} ``` You should see something like: ```bash Column id 1, name label, value 6 Column id 0, name image, value {bytes: [137, ....] Column id 1, name label, value 8 Column id 0, name image, value {bytes: [137, ....] ``` So each row contains 2 columns (image, label) with image being saved as bytes. Let's put them into a useful struct.
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/cuda/README.md
# Advanced Cuda usage
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/cuda/porting.md
# Porting a custom kernel
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/cuda/writing.md
# Writing a custom kernel
0
hf_public_repos/candle/candle-book/src
hf_public_repos/candle/candle-book/src/advanced/mkl.md
# Using MKL
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/blip/README.md
## Running [BLIP Image Captioning](https://huggingface.co/Salesforce/blip-image-captioning-large) Example ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model } from "./build/m.js"; ``` The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/index.html` in your browser.
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/blip/blipWorker.js
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url, cacheFile = true) { if (!cacheFile) return new Uint8Array(await (await fetch(url)).arrayBuffer()); const cacheName = "blip-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Blip { static instance = {}; static async getInstance( weightsURL, tokenizerURL, configURL, modelID, quantized ) { if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, configArrayU8, quantized ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized } = event.data; try { self.postMessage({ status: "status", message: "Loading Blip Model..." }); const model = await Blip.getInstance( weightsURL, tokenizerURL, configURL, modelID, quantized ); self.postMessage({ status: "status", message: "Running Blip Inference...", }); const imageArrayU8 = await fetchArrayBuffer(imageURL, false); const output = model.generate_caption_from_image(imageArrayU8); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/blip/index.html
<!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <title>Candle Blip Image Captioning Demo</title> <script src="https://cdn.tailwindcss.com"></script> <script type="module" src="./code.js"></script> <script type="module"> const MODELS = { blip_image_quantized_q4k: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q4k.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "271 MB", }, blip_image_quantized_q80: { base_url: "https://huggingface.co/lmz/candle-blip/resolve/main/", model: "blip-image-captioning-large-q80.gguf", config: "config.json", tokenizer: "tokenizer.json", quantized: true, size: "505 MB", }, blip_image_large: { base_url: "https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/refs%2Fpr%2F18/", model: "model.safetensors", config: "config.json", tokenizer: "tokenizer.json", quantized: false, size: "1.88 GB", }, }; const blipWorker = new Worker("./blipWorker.js", { type: "module", }); const outputStatusEl = document.querySelector("#output-status"); const outputCaptionEl = document.querySelector("#output-caption"); const modelSelectEl = document.querySelector("#model"); const clearBtn = document.querySelector("#clear-btn"); const fileUpload = document.querySelector("#file-upload"); const dropArea = document.querySelector("#drop-area"); const imagesExamples = document.querySelector("#image-select"); const canvas = document.querySelector("#canvas"); const ctxCanvas = canvas.getContext("2d"); let isCaptioning = false; let currentImageURL = null; clearBtn.addEventListener("click", () => { clearImageCanvas(); }); modelSelectEl.addEventListener("change", () => { if (currentImageURL) { runInference(currentImageURL); } }); //add event listener to file input fileUpload.addEventListener("input", async (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); // add event listener to drop-area dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", async (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); clearImageCanvas(); await drawImageCanvas(href); runInference(href); } else if (url) { clearImageCanvas(); await drawImageCanvas(url); runInference(url); } }); imagesExamples.addEventListener("click", async (e) => { if (isCaptioning) { return; } const target = e.target; if (target.nodeName === "IMG") { const href = target.src; clearImageCanvas(); await drawImageCanvas(href); runInference(href); } }); function clearImageCanvas() { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); isCaptioning = false; clearBtn.disabled = true; canvas.parentElement.style.height = "auto"; outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = "Please select an image"; currentImageURL = null; } async function drawImageCanvas(imgURL) { if (!imgURL) { throw new Error("No image URL provided"); } return new Promise((resolve, reject) => { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctxCanvas.drawImage(img, 0, 0); canvas.parentElement.style.height = canvas.offsetHeight + "px"; clearBtn.disabled = false; resolve(img); }; img.src = imgURL; currentImageURL = imgURL; }); } document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelSelectEl.appendChild(option); } }); async function getImageCaption( worker, weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, imageURL, quantized, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } function updateStatus(data) { if (data.status === "status") { outputStatusEl.innerText = data.message; } } async function runInference(imageURL) { if (isCaptioning || !imageURL) { alert("Please select an image first"); return; } outputStatusEl.hidden = false; outputCaptionEl.hidden = true; clearBtn.disabled = true; modelSelectEl.disabled = true; isCaptioning = true; const selectedModel = modelSelectEl.value; const model = MODELS[selectedModel]; const weightsURL = `${model.base_url}${model.model}`; const tokenizerURL = `${model.base_url}${model.tokenizer}`; const configURL = `${model.base_url}${model.config}`; const quantized = model.quantized; try { const time = performance.now(); const caption = await getImageCaption( blipWorker, weightsURL, tokenizerURL, configURL, selectedModel, imageURL, quantized, updateStatus ); outputStatusEl.hidden = true; outputCaptionEl.hidden = false; const totalTime = ((performance.now() - time)/1000).toFixed(2); outputCaptionEl.innerHTML = `${ caption.output }<br/><span class="text-xs">Inference time: ${totalTime} s</span>`; } catch (err) { console.error(err); outputStatusEl.hidden = false; outputCaptionEl.hidden = true; outputStatusEl.innerText = err.message; } clearBtn.disabled = false; modelSelectEl.disabled = false; isCaptioning = false; } </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-5 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle BLIP Image Captioning</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://huggingface.co/Salesforce/blip-image-captioning-large" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >BLIP Image Captioning </a> running in the browser using <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle</a >, a minimalist ML framework for Rust. </p> <p class="text-xs max-w-lg py-2"> <b>Note:</b> The image captioning on the smallest model takes about ~50 seconds, it will vary depending on your machine and model size. </p> </div> <div> <label for="model" class="font-medium block">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light interactive disabled:cursor-not-allowed w-full max-w-max" ></select> </div> <!-- drag and drop area --> <div class="grid gap-4 sm:grid-cols-2 py-4"> <div class="relative max-w-lg"> <div class="absolute w-full bottom-full flex justify-between items-center" > <div class="flex gap-2 w-full"> <button id="clear-btn" disabled title="Clear Image" class="ml-auto text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center" > <svg class="" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 13 12" height="1em" > <path d="M1.6.7 12 11.1M12 .7 1.6 11.1" stroke="#2E3036" stroke-width="2" /> </svg> </button> </div> </div> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative aspect-video w-full overflow-hidden" > <div class="flex flex-col items-center justify-center space-y-1 text-center" > <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg" > <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700" > <span>Drag and drop y our image here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" class="sr-only" /> </div> <canvas id="canvas" class="absolute pointer-events-none w-full" ></canvas> </div> </div> <div class=""> <div class="h-full bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2" > <p id="output-caption" class="m-auto text-xl text-center p-2" hidden ></p> <span id="output-status" class="m-auto font-light"> Please select an image </span> </div> </div> </div> <div> <div class="flex gap-3 items-center overflow-x-scroll" id="image-select" > <h3 class="font-medium">Examples:</h3> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg" class="cursor-pointer w-24 h-24 object-cover" /> </div> </div> </main> </body> </html>
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/blip/build-lib.sh
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/blip/Cargo.toml
[package] name = "candle-wasm-example-blip" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { path = "../../candle-core", version = "0.3.1", package = "candle-core" } candle-nn = { path = "../../candle-nn", version = "0.3.1" } candle-transformers = { path = "../../candle-transformers", version = "0.3.1" } tokenizers = { workspace = true, features = ["unstable_wasm"] } num-traits = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } getrandom = { version = "0.2", features = ["js"] } image = { workspace = true } log = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" wasm-bindgen = "0.2.87" js-sys = "0.3.64"
0
hf_public_repos/candle/candle-wasm-examples/blip
hf_public_repos/candle/candle-wasm-examples/blip/src/lib.rs
use wasm_bindgen::prelude::*; pub mod token_output_stream; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string())) }
0
hf_public_repos/candle/candle-wasm-examples/blip
hf_public_repos/candle/candle-wasm-examples/blip/src/token_output_stream.rs
use candle::Result; /// This is a wrapper around a tokenizer to ensure that tokens can be returned to the user in a /// streaming way rather than having to wait for the full decoding. pub struct TokenOutputStream { tokenizer: tokenizers::Tokenizer, tokens: Vec<u32>, prev_index: usize, current_index: usize, } impl TokenOutputStream { pub fn new(tokenizer: tokenizers::Tokenizer) -> Self { Self { tokenizer, tokens: Vec::new(), prev_index: 0, current_index: 0, } } pub fn into_inner(self) -> tokenizers::Tokenizer { self.tokenizer } fn decode(&self, tokens: &[u32]) -> Result<String> { match self.tokenizer.decode(tokens, true) { Ok(str) => Ok(str), Err(err) => candle::bail!("cannot decode: {err}"), } } // https://github.com/huggingface/text-generation-inference/blob/5ba53d44a18983a4de32d122f4cb46f4a17d9ef6/server/text_generation_server/models/model.py#L68 pub fn next_token(&mut self, token: u32) -> Result<Option<String>> { let prev_text = if self.tokens.is_empty() { String::new() } else { let tokens = &self.tokens[self.prev_index..self.current_index]; self.decode(tokens)? }; self.tokens.push(token); let text = self.decode(&self.tokens[self.prev_index..])?; if text.len() > prev_text.len() && text.chars().last().unwrap().is_ascii() { let text = text.split_at(prev_text.len()); self.prev_index = self.current_index; self.current_index = self.tokens.len(); Ok(Some(text.1.to_string())) } else { Ok(None) } } pub fn decode_rest(&self) -> Result<Option<String>> { let prev_text = if self.tokens.is_empty() { String::new() } else { let tokens = &self.tokens[self.prev_index..self.current_index]; self.decode(tokens)? }; let text = self.decode(&self.tokens[self.prev_index..])?; if text.len() > prev_text.len() { let text = text.split_at(prev_text.len()); Ok(Some(text.1.to_string())) } else { Ok(None) } } pub fn decode_all(&self) -> Result<String> { self.decode(&self.tokens) } pub fn get_token(&self, token_s: &str) -> Option<u32> { self.tokenizer.get_vocab(true).get(token_s).copied() } pub fn tokenizer(&self) -> &tokenizers::Tokenizer { &self.tokenizer } pub fn clear(&mut self) { self.tokens.clear(); self.prev_index = 0; self.current_index = 0; } }
0
hf_public_repos/candle/candle-wasm-examples/blip/src
hf_public_repos/candle/candle-wasm-examples/blip/src/bin/m.rs
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use candle_transformers::models::blip; use candle_transformers::models::quantized_blip; use candle_wasm_example_blip::console_log; use candle_wasm_example_blip::token_output_stream::TokenOutputStream; use js_sys::Date; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; enum SelectedModel { M(blip::BlipForConditionalGeneration), Q(quantized_blip::BlipForConditionalGeneration), } impl SelectedModel { fn text_decoder_forward(&mut self, xs: &Tensor, img_xs: &Tensor) -> Result<Tensor, JsError> { match self { Self::M(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), Self::Q(m) => m .text_decoder() .forward(xs, img_xs) .map_err(|e| JsError::new(&e.to_string())), } } fn reset_kv_cache(&mut self) { match self { Self::M(m) => m.reset_kv_cache(), Self::Q(m) => m.reset_kv_cache(), } } } #[wasm_bindgen] pub struct Model { model: SelectedModel, tokenizer: TokenOutputStream, } const SEP_TOKEN_ID: u32 = 102; #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, quantized: bool, ) -> Result<Model, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let tokenizer = TokenOutputStream::new(tokenizer); let config: blip::Config = serde_json::from_slice(&config)?; let device = Device::Cpu; let start = Date::now(); let model: SelectedModel = if quantized { let vb = quantized_blip::VarBuilder::from_gguf_buffer(&weights)?; let model = quantized_blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::Q(model) } else { let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, &device)?; let model = blip::BlipForConditionalGeneration::new(&config, vb)?; SelectedModel::M(model) }; console_log!("model loaded in {:?}s", (Date::now() - start) / 1000.); Ok(Self { model, tokenizer }) } #[wasm_bindgen] pub fn generate_caption_from_image(&mut self, image: Vec<u8>) -> Result<String, JsError> { self.model.reset_kv_cache(); let device = Device::Cpu; console_log!("loading image as tensor"); let start = Date::now(); let image: Tensor = self.load_image(image)?.to_device(&device)?; console_log!("image loaded in {:?}s", (Date::now() - start) / 1000.); let start = Date::now(); let image_embeds: Tensor = match &mut self.model { SelectedModel::M(m) => image.unsqueeze(0)?.apply(m.vision_model())?, SelectedModel::Q(m) => image.unsqueeze(0)?.apply(m.vision_model())?, }; console_log!("image embedded in {:?}s", (Date::now() - start) / 1000.); let mut logits_processor = LogitsProcessor::new(299792458, None, None); let mut token_ids = vec![30522u32]; let mut text: String = "".to_string(); let start = Date::now(); for index in 0..1000 { let context_size = if index > 0 { 1 } else { token_ids.len() }; let start_pos = token_ids.len().saturating_sub(context_size); let input_ids = Tensor::new(&token_ids[start_pos..], &device)?.unsqueeze(0)?; let logits = self.model.text_decoder_forward(&input_ids, &image_embeds)?; let logits = logits.squeeze(0)?; let logits = logits.get(logits.dim(0)? - 1)?; let token = logits_processor.sample(&logits)?; if token == SEP_TOKEN_ID { break; } token_ids.push(token); if let Some(t) = self.tokenizer.next_token(token)? { text.push_str(&t); } } if let Some(rest) = self .tokenizer .decode_rest() .map_err(|m| JsError::new(&m.to_string()))? { text.push_str(&rest); } console_log!("caption generated in {:?}s", (Date::now() - start) / 1000.); Ok(text) } } impl Model { fn load_image(&self, image: Vec<u8>) -> Result<Tensor, JsError> { let device = &Device::Cpu; let img = image::io::Reader::new(std::io::Cursor::new(image)) .with_guessed_format()? .decode() .map_err(|e| JsError::new(&e.to_string()))? .resize_to_fill(384, 384, image::imageops::FilterType::Triangle); let img = img.to_rgb8(); let data = img.into_raw(); let data = Tensor::from_vec(data, (384, 384, 3), device)?.permute((2, 0, 1))?; let mean = Tensor::new(&[0.48145466f32, 0.4578275, 0.40821073], device)?.reshape((3, 1, 1))?; let std = Tensor::new(&[0.26862954f32, 0.261_302_6, 0.275_777_1], device)?.reshape((3, 1, 1))?; (data.to_dtype(candle::DType::F32)? / 255.)? .broadcast_sub(&mean)? .broadcast_div(&std) .map_err(|e| JsError::new(&e.to_string())) } } fn main() { console_error_panic_hook::set_once(); }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/segment-anything/README.md
## Running Segment Anything Example Here, we provide two examples of how to run Whisper using a Candle-compiled WASM binary and runtimes. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/segment-anything/build-lib.sh
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/segment-anything/lib-example.html
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Segment Anything Model (SAM) Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for image examples const MODEL_BASEURL = "https://huggingface.co/lmz/candle-sam/resolve/main/"; // models base url const MODELS = { sam_mobile_tiny: { url: "mobile_sam-tiny-vitt.safetensors", }, sam_base: { url: "sam_vit_b_01ec64.safetensors", }, }; const samWorker = new Worker("./samWorker.js", { type: "module" }); async function segmentPoints( modelURL, // URL to the weights file modelID, // model ID imageURL, // URL to the image file points // {x, y} points to prompt image ) { return new Promise((resolve, reject) => { function messageHandler(event) { console.log(event.data); if ("status" in event.data) { updateStatus(event.data); } if ("error" in event.data) { samWorker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete-embedding") { samWorker.removeEventListener("message", messageHandler); resolve(); } if (event.data.status === "complete") { samWorker.removeEventListener("message", messageHandler); resolve(event.data.output); } } samWorker.addEventListener("message", messageHandler); samWorker.postMessage({ modelURL, modelID, imageURL, points, }); }); } function updateStatus(statusMessage) { statusOutput.innerText = event.data.message; } let copyMaskURL = null; let copyImageURL = null; const clearBtn = document.querySelector("#clear-btn"); const maskBtn = document.querySelector("#mask-btn"); const undoBtn = document.querySelector("#undo-btn"); const downloadBtn = document.querySelector("#download-btn"); const canvas = document.querySelector("#canvas"); const mask = document.querySelector("#mask"); const ctxCanvas = canvas.getContext("2d"); const ctxMask = mask.getContext("2d"); const fileUpload = document.querySelector("#file-upload"); const dropArea = document.querySelector("#drop-area"); const dropButtons = document.querySelector("#drop-buttons"); const imagesExamples = document.querySelector("#image-select"); const modelSelection = document.querySelector("#model"); const statusOutput = document.querySelector("#output-status"); //add event listener to file input fileUpload.addEventListener("input", (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); clearImageCanvas(); copyImageURL = href; drawImageCanvas(href); setImageEmbeddings(href); togglePointMode(false); } }); // add event listener to drop-area dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); }); dropArea.addEventListener("drop", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); clearImageCanvas(); copyImageURL = href; drawImageCanvas(href); setImageEmbeddings(href); togglePointMode(false); } else if (url) { clearImageCanvas(); copyImageURL = url; drawImageCanvas(url); setImageEmbeddings(url); togglePointMode(false); } }); let hasImage = false; let isSegmenting = false; let isEmbedding = false; let currentImageURL = ""; let pointArr = []; let bgPointMode = false; //add event listener to image examples imagesExamples.addEventListener("click", (e) => { if (isEmbedding || isSegmenting) { return; } const target = e.target; if (target.nodeName === "IMG") { const href = target.src; clearImageCanvas(); copyImageURL = href; drawImageCanvas(href); setImageEmbeddings(href); } }); //add event listener to mask button maskBtn.addEventListener("click", () => { togglePointMode(); }); //add event listener to clear button clearBtn.addEventListener("click", () => { clearImageCanvas(); togglePointMode(false); pointArr = []; }); //add event listener to undo button undoBtn.addEventListener("click", () => { undoPoint(); }); // add event to download btn downloadBtn.addEventListener("click", async () => { // Function to load image blobs as Image elements asynchronously const loadImageAsync = (imageURL) => { return new Promise((resolve) => { const img = new Image(); img.onload = () => { resolve(img); }; img.crossOrigin = "anonymous"; img.src = imageURL; }); }; const originalImage = await loadImageAsync(copyImageURL); const maskImage = await loadImageAsync(copyMaskURL); // create main a board to draw const canvas = document.createElement("canvas"); const ctx = canvas.getContext("2d"); canvas.width = originalImage.width; canvas.height = originalImage.height; // Perform the mask operation ctx.drawImage(maskImage, 0, 0); ctx.globalCompositeOperation = "source-in"; ctx.drawImage(originalImage, 0, 0); // to blob const blobPromise = new Promise((resolve) => { canvas.toBlob(resolve); }); const blob = await blobPromise; const resultURL = URL.createObjectURL(blob); // download const link = document.createElement("a"); link.href = resultURL; link.download = "cutout.png"; link.click(); }); //add click event to canvas canvas.addEventListener("click", async (event) => { if (!hasImage || isEmbedding || isSegmenting) { return; } const backgroundMode = event.shiftKey ? bgPointMode^event.shiftKey : bgPointMode; const targetBox = event.target.getBoundingClientRect(); const x = (event.clientX - targetBox.left) / targetBox.width; const y = (event.clientY - targetBox.top) / targetBox.height; const ptsToRemove = []; for (const [idx, pts] of pointArr.entries()) { const d = Math.sqrt((pts[0] - x) ** 2 + (pts[1] - y) ** 2); if (d < 6 / targetBox.width) { ptsToRemove.push(idx); } } if (ptsToRemove.length > 0) { pointArr = pointArr.filter((_, idx) => !ptsToRemove.includes(idx)); } else { pointArr = [...pointArr, [x, y, !backgroundMode]]; } undoBtn.disabled = false; downloadBtn.disabled = false; if (pointArr.length == 0) { ctxMask.clearRect(0, 0, canvas.width, canvas.height); undoBtn.disabled = true; downloadBtn.disabled = true; return; } isSegmenting = true; const { maskURL } = await getSegmentationMask(pointArr); isSegmenting = false; copyMaskURL = maskURL; drawMask(maskURL, pointArr); }); async function undoPoint() { if (!hasImage || isEmbedding || isSegmenting) { return; } if (pointArr.length === 0) { return; } pointArr.pop(); if (pointArr.length === 0) { ctxMask.clearRect(0, 0, canvas.width, canvas.height); undoBtn.disabled = true; return; } isSegmenting = true; const { maskURL } = await getSegmentationMask(pointArr); isSegmenting = false; copyMaskURL = maskURL; drawMask(maskURL, pointArr); } function togglePointMode(mode) { bgPointMode = mode === undefined ? !bgPointMode : mode; maskBtn.querySelector("span").innerText = bgPointMode ? "Background Point" : "Mask Point"; if (bgPointMode) { maskBtn.querySelector("#mask-circle").setAttribute("hidden", ""); maskBtn.querySelector("#unmask-circle").removeAttribute("hidden"); } else { maskBtn.querySelector("#mask-circle").removeAttribute("hidden"); maskBtn.querySelector("#unmask-circle").setAttribute("hidden", ""); } } async function getSegmentationMask(points) { const modelID = modelSelection.value; const modelURL = MODEL_BASEURL + MODELS[modelID].url; const imageURL = currentImageURL; const { maskURL } = await segmentPoints( modelURL, modelID, imageURL, points ); return { maskURL }; } async function setImageEmbeddings(imageURL) { if (isEmbedding) { return; } canvas.classList.remove("cursor-pointer"); canvas.classList.add("cursor-wait"); clearBtn.disabled = true; const modelID = modelSelection.value; const modelURL = MODEL_BASEURL + MODELS[modelID].url; isEmbedding = true; await segmentPoints(modelURL, modelID, imageURL); canvas.classList.remove("cursor-wait"); canvas.classList.add("cursor-pointer"); clearBtn.disabled = false; isEmbedding = false; currentImageURL = imageURL; } function clearImageCanvas() { ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxMask.clearRect(0, 0, canvas.width, canvas.height); hasImage = false; isEmbedding = false; isSegmenting = false; currentImageURL = ""; pointArr = []; clearBtn.disabled = true; canvas.parentElement.style.height = "auto"; dropButtons.classList.remove("invisible"); } function drawMask(maskURL, points) { if (!maskURL) { throw new Error("No mask URL provided"); } const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { mask.width = canvas.width; mask.height = canvas.height; ctxMask.save(); ctxMask.drawImage(canvas, 0, 0); ctxMask.globalCompositeOperation = "source-atop"; ctxMask.fillStyle = "rgba(255, 0, 0, 0.6)"; ctxMask.fillRect(0, 0, canvas.width, canvas.height); ctxMask.globalCompositeOperation = "destination-in"; ctxMask.drawImage(img, 0, 0); ctxMask.globalCompositeOperation = "source-over"; for (const pt of points) { if (pt[2]) { ctxMask.fillStyle = "rgba(0, 255, 255, 1)"; } else { ctxMask.fillStyle = "rgba(255, 255, 0, 1)"; } ctxMask.beginPath(); ctxMask.arc( pt[0] * canvas.width, pt[1] * canvas.height, 3, 0, 2 * Math.PI ); ctxMask.fill(); } ctxMask.restore(); }; img.src = maskURL; } function drawImageCanvas(imgURL) { if (!imgURL) { throw new Error("No image URL provided"); } ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); ctxCanvas.clearRect(0, 0, canvas.width, canvas.height); const img = new Image(); img.crossOrigin = "anonymous"; img.onload = () => { canvas.width = img.width; canvas.height = img.height; ctxCanvas.drawImage(img, 0, 0); canvas.parentElement.style.height = canvas.offsetHeight + "px"; hasImage = true; clearBtn.disabled = false; dropButtons.classList.add("invisible"); }; img.src = imgURL; } const observer = new ResizeObserver((entries) => { for (let entry of entries) { if (entry.target === canvas) { canvas.parentElement.style.height = canvas.offsetHeight + "px"; } } }); observer.observe(canvas); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]">🕯️</span> <div> <h1 class="text-5xl font-bold">Candle Segment Anything</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> Zero-shot image segmentation with <a href="https://segment-anything.com" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >Segment Anything Model (SAM)</a > and <a href="https://github.com/ChaoningZhang/MobileSAM" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >MobileSAM </a >. It runs in the browser with a WASM runtime built with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> <option value="sam_mobile_tiny" selected> Mobile SAM Tiny (40.6 MB) </option> <option value="sam_base">SAM Base (375 MB)</option> </select> </div> <div> <p class="text-xs italic max-w-lg"> <b>Note:</b> The model's first run may take a few seconds as it loads and caches the model in the browser, and then creates the image embeddings. Any subsequent clicks on points will be significantly faster. </p> </div> <div class="relative max-w-2xl"> <div class="flex justify-between items-center"> <div class="px-2 rounded-md inline text-xs"> <span id="output-status" class="m-auto font-light"></span> </div> <div class="flex gap-2"> <button id="mask-btn" title="Toggle Mask Point and Background Point" class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"> <span>Mask Point</span> <svg xmlns="http://www.w3.org/2000/svg" height="1em" viewBox="0 0 512 512"> <path id="mask-circle" d="M256 512a256 256 0 1 0 0-512 256 256 0 1 0 0 512z" /> <path id="unmask-circle" hidden d="M464 256a208 208 0 1 0-416 0 208 208 0 1 0 416 0zM0 256a256 256 0 1 1 512 0 256 256 0 1 1-512 0z" /> </svg> </button> <button id="undo-btn" disabled title="Undo Last Point" class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"> <svg xmlns="http://www.w3.org/2000/svg" height="1em" viewBox="0 0 512 512"> <path d="M48.5 224H40a24 24 0 0 1-24-24V72a24 24 0 0 1 41-17l41.6 41.6a224 224 0 1 1-1 317.8 32 32 0 0 1 45.3-45.3 160 160 0 1 0 1-227.3L185 183a24 24 0 0 1-17 41H48.5z" /> </svg> </button> <button id="clear-btn" disabled title="Clear Image" class="text-xs bg-white rounded-md disabled:opacity-50 flex gap-1 items-center"> <svg class="" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 13 12" height="1em"> <path d="M1.6.7 12 11.1M12 .7 1.6 11.1" stroke="#2E3036" stroke-width="2" /> </svg> </button> </div> </div> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative p-20 w-full overflow-hidden"> <div id="drop-buttons" class="flex flex-col items-center justify-center space-y-1 text-center relative z-10"> <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg"> <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"> <span>Drag and drop your image here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" class="sr-only" /> </div> <canvas id="canvas" class="absolute w-full"></canvas> <canvas id="mask" class="pointer-events-none absolute w-full"></canvas> </div> <div class="text-right py-2"> <button id="share-btn" class="bg-white rounded-md hover:outline outline-orange-200 disabled:opacity-50 invisible"> <img src="https://huggingface.co/datasets/huggingface/badges/raw/main/share-to-community-sm.svg" /> </button> <button id="download-btn" title="Copy result (.png)" disabled class="p-1 px-2 text-xs font-medium bg-white rounded-2xl outline outline-gray-200 hover:outline-orange-200 disabled:opacity-50" > Download Cut-Out </button> </div> </div> <div> <div class="flex gap-3 items-center overflow-x-scroll" id="image-select"> <h3 class="font-medium">Examples:</h3> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/sf.jpg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/bike.jpeg" class="cursor-pointer w-24 h-24 object-cover" /> <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/candle/examples/000000000077.jpg" class="cursor-pointer w-24 h-24 object-cover" /> </div> </div> </main> </body> </html>
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/segment-anything/samWorker.js
//load the candle SAM Model wasm module import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url, cacheModel = true) { if (!cacheModel) return new Uint8Array(await (await fetch(url)).arrayBuffer()); const cacheName = "sam-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class SAMModel { static instance = {}; // keep current image embeddings state static imageArrayHash = {}; // Add a new property to hold the current modelID static currentModelID = null; static async getInstance(modelURL, modelID) { if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: `Loading Model ${modelID}`, }); const weightsArrayU8 = await fetchArrayBuffer(modelURL); this.instance[modelID] = new Model( weightsArrayU8, /tiny|mobile/.test(modelID) ); } else { self.postMessage({ status: "loading", message: "Model Already Loaded" }); } // Set the current modelID to the modelID that was passed in this.currentModelID = modelID; return this.instance[modelID]; } // Remove the modelID parameter from setImageEmbeddings static setImageEmbeddings(imageArrayU8) { // check if image embeddings are already set for this image and model const imageArrayHash = this.getSimpleHash(imageArrayU8); if ( this.imageArrayHash[this.currentModelID] === imageArrayHash && this.instance[this.currentModelID] ) { self.postMessage({ status: "embedding", message: "Embeddings Already Set", }); return; } this.imageArrayHash[this.currentModelID] = imageArrayHash; this.instance[this.currentModelID].set_image_embeddings(imageArrayU8); self.postMessage({ status: "embedding", message: "Embeddings Set" }); } static getSimpleHash(imageArrayU8) { // get simple hash of imageArrayU8 let imageArrayHash = 0; for (let i = 0; i < imageArrayU8.length; i += 100) { imageArrayHash ^= imageArrayU8[i]; } return imageArrayHash.toString(16); } } async function createImageCanvas( { mask_shape, mask_data }, // mask { original_width, original_height, width, height } // original image ) { const [_, __, shape_width, shape_height] = mask_shape; const maskCanvas = new OffscreenCanvas(shape_width, shape_height); // canvas for mask const maskCtx = maskCanvas.getContext("2d"); const canvas = new OffscreenCanvas(original_width, original_height); // canvas for creating mask with original image size const ctx = canvas.getContext("2d"); const imageData = maskCtx.createImageData( maskCanvas.width, maskCanvas.height ); const data = imageData.data; for (let p = 0; p < data.length; p += 4) { data[p] = 0; data[p + 1] = 0; data[p + 2] = 0; data[p + 3] = mask_data[p / 4] * 255; } maskCtx.putImageData(imageData, 0, 0); let sx, sy; if (original_height < original_width) { sy = original_height / original_width; sx = 1; } else { sy = 1; sx = original_width / original_height; } ctx.drawImage( maskCanvas, 0, 0, maskCanvas.width * sx, maskCanvas.height * sy, 0, 0, original_width, original_height ); const blob = await canvas.convertToBlob(); return URL.createObjectURL(blob); } self.addEventListener("message", async (event) => { const { modelURL, modelID, imageURL, points } = event.data; try { self.postMessage({ status: "loading", message: "Starting SAM" }); const sam = await SAMModel.getInstance(modelURL, modelID); self.postMessage({ status: "loading", message: "Loading Image" }); const imageArrayU8 = await fetchArrayBuffer(imageURL, false); self.postMessage({ status: "embedding", message: "Creating Embeddings" }); SAMModel.setImageEmbeddings(imageArrayU8); if (!points) { // no points only do the embeddings self.postMessage({ status: "complete-embedding", message: "Embeddings Complete", }); return; } self.postMessage({ status: "segmenting", message: "Segmenting" }); const { mask, image } = sam.mask_for_point({ points }); const maskDataURL = await createImageCanvas(mask, image); // Send the segment back to the main thread as JSON self.postMessage({ status: "complete", message: "Segmentation Complete", output: { maskURL: maskDataURL }, }); } catch (e) { self.postMessage({ error: e }); } });
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/segment-anything/Cargo.toml
[package] name = "candle-wasm-example-sam" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { path = "../../candle-core", version = "0.3.1", package = "candle-core" } candle-nn = { path = "../../candle-nn", version = "0.3.1" } candle-transformers = { path = "../../candle-transformers", version = "0.3.1" } num-traits = { workspace = true } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } getrandom = { version = "0.2", features = ["js"] } image = { workspace = true } log = { workspace = true } safetensors = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" wasm-bindgen = "0.2.87" serde-wasm-bindgen = "0.6.0"
0
hf_public_repos/candle/candle-wasm-examples/segment-anything
hf_public_repos/candle/candle-wasm-examples/segment-anything/src/lib.rs
use candle_transformers::models::segment_anything::sam; use wasm_bindgen::prelude::*; pub use sam::{Sam, IMAGE_SIZE}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string())) }
0
hf_public_repos/candle/candle-wasm-examples/segment-anything/src
hf_public_repos/candle/candle-wasm-examples/segment-anything/src/bin/m.rs
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_wasm_example_sam as sam; use wasm_bindgen::prelude::*; struct Embeddings { original_width: u32, original_height: u32, width: u32, height: u32, data: Tensor, } #[wasm_bindgen] pub struct Model { sam: sam::Sam, embeddings: Option<Embeddings>, } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn new(weights: Vec<u8>, use_tiny: bool) -> Result<Model, JsError> { console_error_panic_hook::set_once(); let dev = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, dev)?; let sam = if use_tiny { sam::Sam::new_tiny(vb)? // tiny vit_t } else { sam::Sam::new(768, 12, 12, &[2, 5, 8, 11], vb)? // sam_vit_b }; Ok(Self { sam, embeddings: None, }) } pub fn set_image_embeddings(&mut self, image_data: Vec<u8>) -> Result<(), JsError> { sam::console_log!("image data: {}", image_data.len()); let image_data = std::io::Cursor::new(image_data); let image = image::io::Reader::new(image_data) .with_guessed_format()? .decode() .map_err(candle::Error::wrap)?; let (original_height, original_width) = (image.height(), image.width()); let (height, width) = (original_height, original_width); let resize_longest = sam::IMAGE_SIZE as u32; let (height, width) = if height < width { let h = (resize_longest * height) / width; (h, resize_longest) } else { let w = (resize_longest * width) / height; (resize_longest, w) }; let image_t = { let img = image.resize_exact(width, height, image::imageops::FilterType::CatmullRom); let data = img.to_rgb8().into_raw(); Tensor::from_vec( data, (img.height() as usize, img.width() as usize, 3), &Device::Cpu, )? .permute((2, 0, 1))? }; let data = self.sam.embeddings(&image_t)?; self.embeddings = Some(Embeddings { original_width, original_height, width, height, data, }); Ok(()) } pub fn mask_for_point(&self, input: JsValue) -> Result<JsValue, JsError> { let input: PointsInput = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let transformed_points = input.points; for &(x, y, _bool) in &transformed_points { if !(0.0..=1.0).contains(&x) { return Err(JsError::new(&format!( "x has to be between 0 and 1, got {}", x ))); } if !(0.0..=1.0).contains(&y) { return Err(JsError::new(&format!( "y has to be between 0 and 1, got {}", y ))); } } let embeddings = match &self.embeddings { None => Err(JsError::new("image embeddings have not been set"))?, Some(embeddings) => embeddings, }; let (mask, iou_predictions) = self.sam.forward_for_embeddings( &embeddings.data, embeddings.height as usize, embeddings.width as usize, &transformed_points, false, )?; let iou = iou_predictions.flatten(0, 1)?.to_vec1::<f32>()?[0]; let mask_shape = mask.dims().to_vec(); let mask_data = mask.ge(0f32)?.flatten_all()?.to_vec1::<u8>()?; let mask = Mask { iou, mask_shape, mask_data, }; let image = Image { original_width: embeddings.original_width, original_height: embeddings.original_height, width: embeddings.width, height: embeddings.height, }; Ok(serde_wasm_bindgen::to_value(&MaskImage { mask, image })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct Mask { iou: f32, mask_shape: Vec<usize>, mask_data: Vec<u8>, } #[derive(serde::Serialize, serde::Deserialize)] struct Image { original_width: u32, original_height: u32, width: u32, height: u32, } #[derive(serde::Serialize, serde::Deserialize)] struct MaskImage { mask: Mask, image: Image, } #[derive(serde::Serialize, serde::Deserialize)] struct PointsInput { points: Vec<(f64, f64, bool)>, } fn main() { console_error_panic_hook::set_once(); }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/whisper/whisperWorker.js
//load the candle Whisper decoder wasm module import init, { Decoder } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "whisper-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Whisper { static instance = {}; // Retrieve the Whisper model. When called for the first time, // this will load the model and save it for future use. static async getInstance(params) { const { weightsURL, modelID, tokenizerURL, mel_filtersURL, configURL, quantized, is_multilingual, timestamps, task, language, } = params; // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [ weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8, configArrayU8, ] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(mel_filtersURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Decoder( weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8, configArrayU8, quantized, is_multilingual, timestamps, task, language ); } else { self.postMessage({ status: "loading", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, modelID, tokenizerURL, configURL, mel_filtersURL, audioURL, } = event.data; try { self.postMessage({ status: "decoding", message: "Starting Decoder" }); let quantized = false; if (modelID.includes("quantized")) { quantized = true; } let is_multilingual = false; if (modelID.includes("multilingual")) { is_multilingual = true; } let timestamps = true; const decoder = await Whisper.getInstance({ weightsURL, modelID, tokenizerURL, mel_filtersURL, configURL, quantized, is_multilingual, timestamps, task: null, language: null, }); self.postMessage({ status: "decoding", message: "Loading Audio" }); const audioArrayU8 = await fetchArrayBuffer(audioURL); self.postMessage({ status: "decoding", message: "Running Decoder..." }); const segments = decoder.decode(audioArrayU8); // Send the segment back to the main thread as JSON self.postMessage({ status: "complete", message: "complete", output: JSON.parse(segments), }); } catch (e) { self.postMessage({ error: e }); } });
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/whisper/README.md
## Running Whisper Examples Here, we provide two examples of how to run Whisper using a Candle-compiled WASM binary and runtimes. ### Pure Rust UI To build and test the UI made in Rust you will need [Trunk](https://trunkrs.dev/#install) From the `candle-wasm-examples/whisper` directory run: Download assets: ```bash # mel filters wget -c https://huggingface.co/spaces/lmz/candle-whisper/resolve/main/mel_filters.safetensors # Model and tokenizer tiny.en wget -c https://huggingface.co/openai/whisper-tiny.en/resolve/main/model.safetensors -P whisper-tiny.en wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/tokenizer.json -P whisper-tiny.en wget -c https://huggingface.co/openai/whisper-tiny.en/raw/main/config.json -P whisper-tiny.en # model and tokenizer tiny multilanguage wget -c https://huggingface.co/openai/whisper-tiny/resolve/main/model.safetensors -P whisper-tiny wget -c https://huggingface.co/openai/whisper-tiny/raw/main/tokenizer.json -P whisper-tiny wget -c https://huggingface.co/openai/whisper-tiny/raw/main/config.json -P whisper-tiny #quantized wget -c https://huggingface.co/lmz/candle-whisper/resolve/main/model-tiny-en-q80.gguf -P quantized wget -c https://huggingface.co/lmz/candle-whisper/raw/main/tokenizer-tiny-en.json -P quantized wget -c https://huggingface.co/lmz/candle-whisper/raw/main/config-tiny-en.json -P quantized # Audio samples wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb0.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_a13.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_gb1.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_hp0.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_jfk.wav -P audios wget -c https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/samples_mm0.wav -P audios ``` Run hot reload server: ```bash trunk serve --release --public-url / --port 8080 ``` ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Decoder } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/whisper/index.html
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <title>Welcome to Candle!</title> <link data-trunk rel="copy-file" href="mel_filters.safetensors" /> <!-- samples --> <link data-trunk rel="copy-dir" href="audios" /> <!-- tiny.en --> <link data-trunk rel="copy-dir" href="whisper-tiny.en" /> <!-- tiny --> <link data-trunk rel="copy-dir" href="whisper-tiny" /> <!-- quantized --> <link data-trunk rel="copy-dir" href="quantized" /> <link data-trunk rel="rust" href="Cargo.toml" data-bin="app" data-type="main" /> <link data-trunk rel="rust" href="Cargo.toml" data-bin="worker" data-type="worker" /> <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic" /> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css" /> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.4.1/milligram.css" /> </head> <body></body> </html>
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/whisper/build-lib.sh
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/whisper/lib-example.html
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Whisper Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for audio examples const AUDIO_BASE_URL = "https://huggingface.co/datasets/Narsil/candle-examples/resolve/main/"; // models base url const MODELS = { tiny_multilingual: { base_url: "https://huggingface.co/openai/whisper-tiny/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", size: "151 MB", }, tiny_en: { base_url: "https://huggingface.co/openai/whisper-tiny.en/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", size: "151 MB", }, tiny_quantized_multilingual_q80: { base_url: "https://huggingface.co/lmz/candle-whisper/resolve/main/", model: "model-tiny-q80.gguf", tokenizer: "tokenizer-tiny.json", config: "config-tiny.json", size: "41.5 MB", }, tiny_en_quantized_q80: { base_url: "https://huggingface.co/lmz/candle-whisper/resolve/main/", model: "model-tiny-q80.gguf", tokenizer: "tokenizer-tiny-en.json", config: "config-tiny-en.json", size: "41.8 MB", }, distil_medium_en: { base_url: "https://huggingface.co/distil-whisper/distil-medium.en/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", size: "789 MB", }, }; const modelEl = document.querySelector("#model"); Object.keys(MODELS).forEach((modelID) => { const model = MODELS[modelID]; const option = document.createElement("option"); option.value = modelID; option.textContent = `${modelID} (${model.size})`; modelEl.appendChild(option); }); const whisperWorker = new Worker("./whisperWorker.js", { type: "module", }); async function classifyAudio( weightsURL, // URL to the weights file modelID, // model ID tokenizerURL, // URL to the tokenizer file configURL, // model config URL mel_filtersURL, // URL to the mel filters file audioURL, // URL to the audio file updateStatus // function to update the status ) { return new Promise((resolve, reject) => { whisperWorker.postMessage({ weightsURL, modelID, tokenizerURL, configURL, mel_filtersURL, audioURL, }); function messageHandler(event) { console.log(event.data); if ("status" in event.data) { updateStatus(event.data); } if ("error" in event.data) { whisperWorker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { whisperWorker.removeEventListener("message", messageHandler); resolve(event.data); } } whisperWorker.addEventListener("message", messageHandler); }); } // keep track of the audio URL let audioURL = null; function setAudio(src) { const audio = document.querySelector("#audio"); audio.src = src; audio.controls = true; audio.hidden = false; document.querySelector("#detect").disabled = false; audioURL = src; } // add event listener to audio buttons document.querySelectorAll("#audios-select > button").forEach((target) => { target.addEventListener("click", (e) => { const value = target.dataset.value; const href = AUDIO_BASE_URL + value; setAudio(href); }); }); //add event listener to file input document.querySelector("#file-upload").addEventListener("change", (e) => { const target = e.target; if (target.files.length > 0) { const href = URL.createObjectURL(target.files[0]); setAudio(href); } }); // add event listener to drop-area const dropArea = document.querySelector("#drop-area"); dropArea.addEventListener("dragenter", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("dragleave", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); }); dropArea.addEventListener("dragover", (e) => { e.preventDefault(); dropArea.classList.add("border-blue-700"); }); dropArea.addEventListener("drop", (e) => { e.preventDefault(); dropArea.classList.remove("border-blue-700"); const url = e.dataTransfer.getData("text/uri-list"); const files = e.dataTransfer.files; if (files.length > 0) { const href = URL.createObjectURL(files[0]); setAudio(href); } else if (url) { setAudio(url); } }); // add event listener to detect button document.querySelector("#detect").addEventListener("click", async () => { if (audioURL === null) { return; } const modelID = modelEl.value; const model = MODELS[modelID]; const modelURL = model.base_url + model.model; const tokenizerURL = model.base_url + model.tokenizer; const configURL = model.base_url + model.config; classifyAudio( modelURL, modelID, tokenizerURL, configURL, "mel_filters.safetensors", audioURL, updateStatus ) .then((result) => { console.log("RESULT", result); const { output } = result; const text = output.map((segment) => segment.dr.text).join(" "); console.log(text); document.querySelector("#output-status").hidden = true; document.querySelector("#output-generation").hidden = false; document.querySelector("#output-generation").textContent = text; }) .catch((error) => { console.error(error); }); }); function updateStatus(data) { const { status, message } = data; const button = document.querySelector("#detect"); if (status === "decoding" || status === "loading") { button.disabled = true; button.textContent = message; } else if (status === "complete") { button.disabled = false; button.textContent = "Transcribe Audio"; } } </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Whisper</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> Transcribe audio in the browser using rust/wasm with an audio file. This demo uses the <a href="https://huggingface.co/openai/" target="_blank" class="underline hover:text-blue-500 hover:no-underline"> OpenAI Whisper models </a> and WASM runtime built with <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> </select> </div> <!-- drag and drop area --> <div class="relative"> <div id="drop-area" class="flex flex-col items-center justify-center border-2 border-gray-300 border-dashed rounded-xl relative h-48 w-full overflow-hidden"> <div class="flex flex-col items-center justify-center space-y-1 text-center"> <svg width="25" height="25" viewBox="0 0 25 25" fill="none" xmlns="http://www.w3.org/2000/svg"> <path d="M3.5 24.3a3 3 0 0 1-1.9-.8c-.5-.5-.8-1.2-.8-1.9V2.9c0-.7.3-1.3.8-1.9.6-.5 1.2-.7 2-.7h18.6c.7 0 1.3.2 1.9.7.5.6.7 1.2.7 2v18.6c0 .7-.2 1.4-.7 1.9a3 3 0 0 1-2 .8H3.6Zm0-2.7h18.7V2.9H3.5v18.7Zm2.7-2.7h13.3c.3 0 .5 0 .6-.3v-.7l-3.7-5a.6.6 0 0 0-.6-.2c-.2 0-.4 0-.5.3l-3.5 4.6-2.4-3.3a.6.6 0 0 0-.6-.3c-.2 0-.4.1-.5.3l-2.7 3.6c-.1.2-.2.4 0 .7.1.2.3.3.6.3Z" fill="#000" /> </svg> <div class="flex text-sm text-gray-600"> <label for="file-upload" class="relative cursor-pointer bg-white rounded-md font-medium text-blue-950 hover:text-blue-700"> <span>Drag and drop your audio here</span> <span class="block text-xs">or</span> <span class="block text-xs">Click to upload</span> </label> </div> <input id="file-upload" name="file-upload" type="file" accept="audio/*" class="sr-only" /> </div> <audio id="audio" hidden controls class="w-full p-2 select-none"></audio> </div> </div> <div> <div class="flex flex-wrap gap-3 items-center" id="audios-select"> <h3 class="font-medium">Examples:</h3> <button data-value="samples_jfk.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>jfk.wav</span> <span class="text-xs block"> (352 kB)</span> </button> <button data-value="samples_a13.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>a13.wav</span> <span class="text-xs block"> (960 kB)</span> </button> <button data-value="samples_mm0.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>mm0.wav</span> <span class="text-xs block new"> (957 kB)</span> </button> <button data-value="samples_gb0.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>gb0.wav </span> <span class="text-xs block">(4.08 MB)</span> </button> <button data-value="samples_gb1.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>gb1.wav </span> <span class="text-xs block">(6.36 MB)</span> </button> <button data-value="samples_hp0.wav" class="text-gray-500 border border-gray-500 rounded-md p-2 underline hover:no-underline"> <span>hp0.wav </span> <span class="text-xs block">(8.75 MB)</span> </button> </div> </div> <div> <button id="detect" disabled class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 px-4 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Transcribe Audio </button> </div> <div> <h3 class="font-medium">Transcription:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"> <p hidden id="output-generation" class="grid-rows-2"></p> <span id="output-status" class="m-auto font-light" >No transcription results yet</span > </div> </div> </main> </body> </html>
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/whisper/main.js
import init, { run_app } from './pkg/candle_wasm_example_whisper.js'; async function main() { await init('/pkg/candle_wasm_example_whisper_bg.wasm'); run_app(); } main()
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/whisper/Cargo.toml
[package] name = "candle-wasm-example-whisper" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { path = "../../candle-core", version = "0.3.1", package = "candle-core" } candle-nn = { path = "../../candle-nn", version = "0.3.1" } candle-transformers = { path = "../../candle-transformers", version = "0.3.1" } num-traits = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } # App crates. anyhow = { workspace = true } log = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } wav = { workspace = true } safetensors = { workspace = true } # Wasm specific crates. getrandom = { version = "0.2", features = ["js"] } gloo = "0.8" js-sys = "0.3.64" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" wasm-logger = "0.2" yew-agent = "0.2.0" yew = { version = "0.20.0", features = ["csr"] } [dependencies.web-sys] version = "0.3.64" features = [ 'Blob', 'Document', 'Element', 'HtmlElement', 'Node', 'Window', 'Request', 'RequestCache', 'RequestInit', 'RequestMode', 'Response', 'Performance', ]
0
hf_public_repos/candle/candle-wasm-examples/whisper
hf_public_repos/candle/candle-wasm-examples/whisper/src/worker.rs
use crate::languages::LANGUAGES; use anyhow::Error as E; use candle::{safetensors::Load, DType, Device, IndexOp, Tensor, D}; use candle_nn::{ops::softmax, VarBuilder}; pub use candle_transformers::models::whisper::{self as m, Config}; use rand::{distributions::Distribution, rngs::StdRng, SeedableRng}; use serde::{Deserialize, Serialize}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } pub const DTYPE: DType = DType::F32; pub enum Model { Normal(m::model::Whisper), Quantized(m::quantized_model::Whisper), } // Maybe we should use some traits rather than doing the dispatch for all these. impl Model { pub fn config(&self) -> &Config { match self { Self::Normal(m) => &m.config, Self::Quantized(m) => &m.config, } } pub fn encoder_forward(&mut self, x: &Tensor, flush: bool) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.encoder.forward(x, flush), Self::Quantized(m) => m.encoder.forward(x, flush), } } pub fn decoder_forward( &mut self, x: &Tensor, xa: &Tensor, flush: bool, ) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.forward(x, xa, flush), Self::Quantized(m) => m.decoder.forward(x, xa, flush), } } pub fn decoder_final_linear(&self, x: &Tensor) -> candle::Result<Tensor> { match self { Self::Normal(m) => m.decoder.final_linear(x), Self::Quantized(m) => m.decoder.final_linear(x), } } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct DecodingResult { pub tokens: Vec<u32>, pub text: String, pub avg_logprob: f64, pub no_speech_prob: f64, temperature: f64, compression_ratio: f64, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct Segment { pub start: f64, pub duration: f64, pub dr: DecodingResult, } pub struct Decoder { model: Model, rng: rand::rngs::StdRng, task: Option<Task>, language: Option<String>, is_multilingual: bool, mel_filters: Vec<f32>, timestamps: bool, tokenizer: Tokenizer, suppress_tokens: Tensor, sot_token: u32, transcribe_token: u32, translate_token: u32, eot_token: u32, no_speech_token: u32, no_timestamps_token: u32, } impl Decoder { #[allow(clippy::too_many_arguments)] fn new( model: Model, tokenizer: Tokenizer, mel_filters: Vec<f32>, device: &Device, task: Option<Task>, language: Option<String>, is_multilingual: bool, timestamps: bool, ) -> anyhow::Result<Self> { let suppress_tokens: Vec<f32> = (0..model.config().vocab_size as u32) .map(|i| { if model.config().suppress_tokens.contains(&i) { f32::NEG_INFINITY } else { 0f32 } }) .collect(); let no_timestamps_token = token_id(&tokenizer, m::NO_TIMESTAMPS_TOKEN)?; let suppress_tokens = Tensor::new(suppress_tokens.as_slice(), device)?; let sot_token = token_id(&tokenizer, m::SOT_TOKEN)?; let transcribe_token = token_id(&tokenizer, m::TRANSCRIBE_TOKEN)?; let translate_token = token_id(&tokenizer, m::TRANSLATE_TOKEN)?; let eot_token = token_id(&tokenizer, m::EOT_TOKEN)?; let no_speech_token = m::NO_SPEECH_TOKENS .iter() .find_map(|token| token_id(&tokenizer, token).ok()); let no_speech_token = match no_speech_token { None => anyhow::bail!("unable to find any non-speech token"), Some(n) => n, }; let seed = 299792458; Ok(Self { model, rng: StdRng::seed_from_u64(seed), tokenizer, mel_filters, task, timestamps, language, is_multilingual, suppress_tokens, sot_token, transcribe_token, translate_token, eot_token, no_speech_token, no_timestamps_token, }) } fn decode(&mut self, mel: &Tensor, t: f64) -> anyhow::Result<DecodingResult> { let model = &mut self.model; let language_token = match (self.is_multilingual, &self.language) { (true, None) => Some(detect_language(model, &self.tokenizer, mel)?), (false, None) => None, (true, Some(language)) => { match token_id(&self.tokenizer, &format!("<|{:?}|>", self.language)) { Ok(token_id) => Some(token_id), Err(_) => anyhow::bail!("language {language} is not supported"), } } (false, Some(_)) => { anyhow::bail!("a language cannot be set for non-multilingual models") } }; let audio_features = model.encoder_forward(mel, true)?; println!("audio features: {:?}", audio_features.dims()); let sample_len = model.config().max_target_positions / 2; let mut sum_logprob = 0f64; let mut no_speech_prob = f64::NAN; let mut tokens = vec![self.sot_token]; if let Some(language_token) = language_token { tokens.push(language_token); } match self.task { None | Some(Task::Transcribe) => tokens.push(self.transcribe_token), Some(Task::Translate) => tokens.push(self.translate_token), } if !self.timestamps { tokens.push(self.no_timestamps_token); } for i in 0..sample_len { let tokens_t = Tensor::new(tokens.as_slice(), mel.device())?; // The model expects a batch dim but this inference loop does not handle // it so we add it at this point. let tokens_t = tokens_t.unsqueeze(0)?; let ys = model.decoder_forward(&tokens_t, &audio_features, i == 0)?; // Extract the no speech probability on the first iteration by looking at the first // token logits and the probability for the according token. if i == 0 { let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?; no_speech_prob = softmax(&logits, 0)? .i(self.no_speech_token as usize)? .to_scalar::<f32>()? as f64; } let (_, seq_len, _) = ys.dims3()?; let logits = model .decoder_final_linear(&ys.i((..1, seq_len - 1..))?)? .i(0)? .i(0)?; // TODO: Besides suppress tokens, we should apply the heuristics from // ApplyTimestampRules, i.e.: // - Timestamps come in pairs, except before EOT. // - Timestamps should be non-decreasing. // - If the sum of the probabilities of timestamps is higher than any other tokens, // only consider timestamps when sampling. // https://github.com/openai/whisper/blob/e8622f9afc4eba139bf796c210f5c01081000472/whisper/decoding.py#L439 let logits = logits.broadcast_add(&self.suppress_tokens)?; let next_token = if t > 0f64 { let prs = softmax(&(&logits / t)?, 0)?; let logits_v: Vec<f32> = prs.to_vec1()?; let distr = rand::distributions::WeightedIndex::new(&logits_v)?; distr.sample(&mut self.rng) as u32 } else { let logits_v: Vec<f32> = logits.to_vec1()?; logits_v .iter() .enumerate() .max_by(|(_, u), (_, v)| u.total_cmp(v)) .map(|(i, _)| i as u32) .unwrap() }; tokens.push(next_token); let prob = softmax(&logits, candle::D::Minus1)? .i(next_token as usize)? .to_scalar::<f32>()? as f64; if next_token == self.eot_token || tokens.len() > model.config().max_target_positions { break; } sum_logprob += prob.ln(); } let text = self.tokenizer.decode(&tokens, true).map_err(E::msg)?; let avg_logprob = sum_logprob / tokens.len() as f64; Ok(DecodingResult { tokens, text, avg_logprob, no_speech_prob, temperature: t, compression_ratio: f64::NAN, }) } fn decode_with_fallback(&mut self, segment: &Tensor) -> anyhow::Result<DecodingResult> { for (i, &t) in m::TEMPERATURES.iter().enumerate() { let dr: Result<DecodingResult, _> = self.decode(segment, t); if i == m::TEMPERATURES.len() - 1 { return dr; } // On errors, we try again with a different temperature. match dr { Ok(dr) => { let needs_fallback = dr.compression_ratio > m::COMPRESSION_RATIO_THRESHOLD || dr.avg_logprob < m::LOGPROB_THRESHOLD; if !needs_fallback || dr.no_speech_prob > m::NO_SPEECH_THRESHOLD { return Ok(dr); } } Err(err) => { console_log!("Error running at {t}: {err}") } } } unreachable!() } fn run(&mut self, mel: &Tensor) -> anyhow::Result<Vec<Segment>> { let (_, _, content_frames) = mel.dims3()?; let mut seek = 0; let mut segments = vec![]; while seek < content_frames { let time_offset = (seek * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let segment_size = usize::min(content_frames - seek, m::N_FRAMES); let mel_segment = mel.narrow(2, seek, segment_size)?; let segment_duration = (segment_size * m::HOP_LENGTH) as f64 / m::SAMPLE_RATE as f64; let dr = self.decode_with_fallback(&mel_segment)?; seek += segment_size; if dr.no_speech_prob > m::NO_SPEECH_THRESHOLD && dr.avg_logprob < m::LOGPROB_THRESHOLD { console_log!("no speech detected, skipping {seek} {dr:?}"); continue; } let segment = Segment { start: time_offset, duration: segment_duration, dr, }; console_log!("{seek}: {segment:?}"); segments.push(segment) } Ok(segments) } pub fn load(md: ModelData) -> anyhow::Result<Self> { let device = Device::Cpu; let tokenizer = Tokenizer::from_bytes(&md.tokenizer).map_err(E::msg)?; let mel_filters = safetensors::tensor::SafeTensors::deserialize(&md.mel_filters)?; let mel_filters = mel_filters.tensor("mel_80")?.load(&device)?; console_log!("loaded mel filters {:?}", mel_filters.shape()); let mel_filters = mel_filters.flatten_all()?.to_vec1::<f32>()?; let config: Config = serde_json::from_slice(&md.config)?; let model = if md.quantized { let vb = candle_transformers::quantized_var_builder::VarBuilder::from_gguf_buffer( &md.weights, )?; Model::Quantized(m::quantized_model::Whisper::load(&vb, config)?) } else { let vb = VarBuilder::from_buffered_safetensors(md.weights, m::DTYPE, &device)?; Model::Normal(m::model::Whisper::load(&vb, config)?) }; console_log!("done loading model"); let task = match md.task.as_deref() { Some("translate") => Some(Task::Translate), _ => Some(Task::Transcribe), }; let decoder = Self::new( model, tokenizer, mel_filters, &device, task, md.language, md.is_multilingual, md.timestamps, )?; Ok(decoder) } pub fn convert_and_run(&mut self, wav_input: &[u8]) -> anyhow::Result<Vec<Segment>> { let device = Device::Cpu; let mut wav_input = std::io::Cursor::new(wav_input); let (header, data) = wav::read(&mut wav_input)?; console_log!("loaded wav data: {header:?}"); if header.sampling_rate != m::SAMPLE_RATE as u32 { anyhow::bail!("wav file must have a {} sampling rate", m::SAMPLE_RATE); } let data = data.as_sixteen().expect("expected 16 bit wav file"); let pcm_data: Vec<_> = data[..data.len() / header.channel_count as usize] .iter() .map(|v| *v as f32 / 32768.) .collect(); console_log!("pcm data loaded {}", pcm_data.len()); let mel = crate::audio::pcm_to_mel(self.model.config(), &pcm_data, &self.mel_filters)?; let mel_len = mel.len(); let n_mels = self.model.config().num_mel_bins; let mel = Tensor::from_vec(mel, (1, n_mels, mel_len / n_mels), &device)?; console_log!("loaded mel: {:?}", mel.dims()); let segments = self.run(&mel)?; Ok(segments) } } /// Returns the token id for the selected language. pub fn detect_language(model: &mut Model, tokenizer: &Tokenizer, mel: &Tensor) -> Result<u32, E> { console_log!("detecting language"); let (_bsize, _, seq_len) = mel.dims3()?; let mel = mel.narrow( 2, 0, usize::min(seq_len, model.config().max_source_positions), )?; let device = mel.device(); let language_token_ids = LANGUAGES .iter() .map(|(t, _)| token_id(tokenizer, &format!("<|{t}|>"))) .map(|e| e.map_err(E::msg)) .collect::<Result<Vec<_>, E>>()?; let sot_token = token_id(tokenizer, m::SOT_TOKEN)?; let audio_features = model.encoder_forward(&mel, true)?; let tokens = Tensor::new(&[[sot_token]], device)?; let language_token_ids = Tensor::new(language_token_ids.as_slice(), device)?; let ys = model.decoder_forward(&tokens, &audio_features, true)?; let logits = model.decoder_final_linear(&ys.i(..1)?)?.i(0)?.i(0)?; let logits = logits.index_select(&language_token_ids, 0)?; let probs = candle_nn::ops::softmax(&logits, D::Minus1)?; let probs = probs.to_vec1::<f32>()?; let mut probs = LANGUAGES.iter().zip(probs.iter()).collect::<Vec<_>>(); probs.sort_by(|(_, p1), (_, p2)| p2.total_cmp(p1)); for ((_, language), p) in probs.iter().take(5) { println!("{language}: {p}") } let token = &format!("<|{}|>", probs[0].0 .0); let language = token_id(tokenizer, token)?; console_log!("detected language: {language} {token}"); Ok(language) } pub fn token_id(tokenizer: &Tokenizer, token: &str) -> candle::Result<u32> { match tokenizer.token_to_id(token) { None => candle::bail!("no token-id for {token}"), Some(id) => Ok(id), } } #[derive(Serialize, Deserialize, Clone, Copy, Debug)] pub enum Task { Transcribe, Translate, } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transfered via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub weights: Vec<u8>, pub tokenizer: Vec<u8>, pub mel_filters: Vec<u8>, pub config: Vec<u8>, pub quantized: bool, pub timestamps: bool, pub is_multilingual: bool, pub language: Option<String>, pub task: Option<String>, } pub struct Worker { link: WorkerLink<Self>, decoder: Option<Decoder>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), DecodeTask { wav_bytes: Vec<u8> }, } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { Decoded(Vec<Segment>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, decoder: None, } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Decoder::load(md) { Ok(decoder) => { self.decoder = Some(decoder); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::DecodeTask { wav_bytes } => match &mut self.decoder { None => Err("model has not been set".to_string()), Some(decoder) => decoder .convert_and_run(&wav_bytes) .map(WorkerOutput::Decoded) .map_err(|e| e.to_string()), }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
0
hf_public_repos/candle/candle-wasm-examples/whisper
hf_public_repos/candle/candle-wasm-examples/whisper/src/audio.rs
// Audio processing code, adapted from whisper.cpp // https://github.com/ggerganov/whisper.cpp use super::worker; pub trait Float: num_traits::Float + num_traits::FloatConst + num_traits::NumAssign {} impl Float for f32 {} impl Float for f64 {} // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2357 fn fft<T: Float>(inp: &[T]) -> Vec<T> { let n = inp.len(); let zero = T::zero(); if n == 1 { return vec![inp[0], zero]; } if n % 2 == 1 { return dft(inp); } let mut out = vec![zero; n * 2]; let mut even = Vec::with_capacity(n / 2); let mut odd = Vec::with_capacity(n / 2); for (i, &inp) in inp.iter().enumerate() { if i % 2 == 0 { even.push(inp) } else { odd.push(inp); } } let even_fft = fft(&even); let odd_fft = fft(&odd); let two_pi = T::PI() + T::PI(); let n_t = T::from(n).unwrap(); for k in 0..n / 2 { let k_t = T::from(k).unwrap(); let theta = two_pi * k_t / n_t; let re = theta.cos(); let im = -theta.sin(); let re_odd = odd_fft[2 * k]; let im_odd = odd_fft[2 * k + 1]; out[2 * k] = even_fft[2 * k] + re * re_odd - im * im_odd; out[2 * k + 1] = even_fft[2 * k + 1] + re * im_odd + im * re_odd; out[2 * (k + n / 2)] = even_fft[2 * k] - re * re_odd + im * im_odd; out[2 * (k + n / 2) + 1] = even_fft[2 * k + 1] - re * im_odd - im * re_odd; } out } // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2337 fn dft<T: Float>(inp: &[T]) -> Vec<T> { let zero = T::zero(); let n = inp.len(); let two_pi = T::PI() + T::PI(); let mut out = Vec::with_capacity(2 * n); let n_t = T::from(n).unwrap(); for k in 0..n { let k_t = T::from(k).unwrap(); let mut re = zero; let mut im = zero; for (j, &inp) in inp.iter().enumerate() { let j_t = T::from(j).unwrap(); let angle = two_pi * k_t * j_t / n_t; re += inp * angle.cos(); im -= inp * angle.sin(); } out.push(re); out.push(im); } out } #[allow(clippy::too_many_arguments)] // https://github.com/ggerganov/whisper.cpp/blob/4774d2feb01a772a15de81ffc34b34a1f294f020/whisper.cpp#L2414 fn log_mel_spectrogram_w<T: Float>( ith: usize, hann: &[T], samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, speed_up: bool, n_len: usize, n_mel: usize, n_threads: usize, ) -> Vec<T> { let n_fft = if speed_up { 1 + fft_size / 4 } else { 1 + fft_size / 2 }; let zero = T::zero(); let half = T::from(0.5).unwrap(); let mut fft_in = vec![zero; fft_size]; let mut mel = vec![zero; n_len * n_mel]; for i in (ith..n_len).step_by(n_threads) { let offset = i * fft_step; // apply Hanning window for j in 0..fft_size { fft_in[j] = if offset + j < samples.len() { hann[j] * samples[offset + j] } else { zero } } // FFT -> mag^2 let mut fft_out: Vec<T> = fft(&fft_in); for j in 0..fft_size { fft_out[j] = fft_out[2 * j] * fft_out[2 * j] + fft_out[2 * j + 1] * fft_out[2 * j + 1]; } for j in 1..fft_size / 2 { let v = fft_out[fft_size - j]; fft_out[j] += v; } if speed_up { // scale down in the frequency domain results in a speed up in the time domain for j in 0..n_fft { fft_out[j] = half * (fft_out[2 * j] + fft_out[2 * j + 1]); } } // mel spectrogram for j in 0..n_mel { let mut sum = zero; for k in 0..n_fft { sum += fft_out[k] * filters[j * n_fft + k]; } mel[j * n_len + i] = T::max(sum, T::from(1e-10).unwrap()).log10(); } } mel } fn log_mel_spectrogram_<T: Float + std::fmt::Display>( samples: &[T], filters: &[T], fft_size: usize, fft_step: usize, n_mel: usize, speed_up: bool, ) -> Vec<T> { let zero = T::zero(); let two_pi = T::PI() + T::PI(); let half = T::from(0.5).unwrap(); let one = T::from(1.0).unwrap(); let four = T::from(4.0).unwrap(); let fft_size_t = T::from(fft_size).unwrap(); let hann: Vec<T> = (0..fft_size) .map(|i| half * (one - ((two_pi * T::from(i).unwrap()) / fft_size_t).cos())) .collect(); let n_len = samples.len() / fft_step; // pad audio with at least one extra chunk of zeros let pad = 100 * worker::m::CHUNK_LENGTH / 2; let n_len = if n_len % pad != 0 { (n_len / pad + 1) * pad } else { n_len }; let n_len = n_len + pad; let samples = { let mut samples_padded = samples.to_vec(); let to_add = n_len * fft_step - samples.len(); samples_padded.extend(std::iter::repeat(zero).take(to_add)); samples_padded }; // Use a single thread for now. let mut mel = log_mel_spectrogram_w( 0, &hann, &samples, filters, fft_size, fft_step, speed_up, n_len, n_mel, 1, ); let mmax = mel .iter() .max_by(|&u, &v| u.partial_cmp(v).unwrap_or(std::cmp::Ordering::Greater)) .copied() .unwrap_or(zero) - T::from(8).unwrap(); for m in mel.iter_mut() { let v = T::max(*m, mmax); *m = v / four + one } mel } pub fn pcm_to_mel<T: Float + std::fmt::Display>( cfg: &worker::m::Config, samples: &[T], filters: &[T], ) -> anyhow::Result<Vec<T>> { let mel = log_mel_spectrogram_( samples, filters, worker::m::N_FFT, worker::m::HOP_LENGTH, cfg.num_mel_bins, false, ); Ok(mel) }
0
hf_public_repos/candle/candle-wasm-examples/whisper
hf_public_repos/candle/candle-wasm-examples/whisper/src/languages.rs
pub const LANGUAGES: [(&str, &str); 99] = [ ("en", "english"), ("zh", "chinese"), ("de", "german"), ("es", "spanish"), ("ru", "russian"), ("ko", "korean"), ("fr", "french"), ("ja", "japanese"), ("pt", "portuguese"), ("tr", "turkish"), ("pl", "polish"), ("ca", "catalan"), ("nl", "dutch"), ("ar", "arabic"), ("sv", "swedish"), ("it", "italian"), ("id", "indonesian"), ("hi", "hindi"), ("fi", "finnish"), ("vi", "vietnamese"), ("he", "hebrew"), ("uk", "ukrainian"), ("el", "greek"), ("ms", "malay"), ("cs", "czech"), ("ro", "romanian"), ("da", "danish"), ("hu", "hungarian"), ("ta", "tamil"), ("no", "norwegian"), ("th", "thai"), ("ur", "urdu"), ("hr", "croatian"), ("bg", "bulgarian"), ("lt", "lithuanian"), ("la", "latin"), ("mi", "maori"), ("ml", "malayalam"), ("cy", "welsh"), ("sk", "slovak"), ("te", "telugu"), ("fa", "persian"), ("lv", "latvian"), ("bn", "bengali"), ("sr", "serbian"), ("az", "azerbaijani"), ("sl", "slovenian"), ("kn", "kannada"), ("et", "estonian"), ("mk", "macedonian"), ("br", "breton"), ("eu", "basque"), ("is", "icelandic"), ("hy", "armenian"), ("ne", "nepali"), ("mn", "mongolian"), ("bs", "bosnian"), ("kk", "kazakh"), ("sq", "albanian"), ("sw", "swahili"), ("gl", "galician"), ("mr", "marathi"), ("pa", "punjabi"), ("si", "sinhala"), ("km", "khmer"), ("sn", "shona"), ("yo", "yoruba"), ("so", "somali"), ("af", "afrikaans"), ("oc", "occitan"), ("ka", "georgian"), ("be", "belarusian"), ("tg", "tajik"), ("sd", "sindhi"), ("gu", "gujarati"), ("am", "amharic"), ("yi", "yiddish"), ("lo", "lao"), ("uz", "uzbek"), ("fo", "faroese"), ("ht", "haitian creole"), ("ps", "pashto"), ("tk", "turkmen"), ("nn", "nynorsk"), ("mt", "maltese"), ("sa", "sanskrit"), ("lb", "luxembourgish"), ("my", "myanmar"), ("bo", "tibetan"), ("tl", "tagalog"), ("mg", "malagasy"), ("as", "assamese"), ("tt", "tatar"), ("haw", "hawaiian"), ("ln", "lingala"), ("ha", "hausa"), ("ba", "bashkir"), ("jw", "javanese"), ("su", "sundanese"), ];
0
hf_public_repos/candle/candle-wasm-examples/whisper
hf_public_repos/candle/candle-wasm-examples/whisper/src/lib.rs
pub const WITH_TIMER: bool = true; struct Timer { label: &'static str, } // impl Timer { // fn new(label: &'static str) -> Self { // if WITH_TIMER { // web_sys::console::time_with_label(label); // } // Self { label } // } // } impl Drop for Timer { fn drop(&mut self) { if WITH_TIMER { web_sys::console::time_end_with_label(self.label) } } } mod app; mod audio; pub mod languages; pub mod worker; pub use app::App; pub use worker::Worker;
0
hf_public_repos/candle/candle-wasm-examples/whisper
hf_public_repos/candle/candle-wasm-examples/whisper/src/app.rs
use crate::console_log; use crate::worker::{ModelData, Segment, Worker, WorkerInput, WorkerOutput}; use js_sys::Date; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use yew::{html, Component, Context, Html}; use yew_agent::{Bridge, Bridged}; const SAMPLE_NAMES: [&str; 6] = [ "audios/samples_jfk.wav", "audios/samples_a13.wav", "audios/samples_gb0.wav", "audios/samples_gb1.wav", "audios/samples_hp0.wav", "audios/samples_mm0.wav", ]; async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> { use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response}; let window = web_sys::window().ok_or("window")?; let mut opts = RequestInit::new(); let opts = opts .method("GET") .mode(RequestMode::Cors) .cache(RequestCache::NoCache); let request = Request::new_with_str_and_init(url, opts)?; let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?; // `resp_value` is a `Response` object. assert!(resp_value.is_instance_of::<Response>()); let resp: Response = resp_value.dyn_into()?; let data = JsFuture::from(resp.blob()?).await?; let blob = web_sys::Blob::from(data); let array_buffer = JsFuture::from(blob.array_buffer()).await?; let data = js_sys::Uint8Array::new(&array_buffer).to_vec(); Ok(data) } pub enum Msg { Run(usize), UpdateStatus(String), SetDecoder(ModelData), WorkerInMsg(WorkerInput), WorkerOutMsg(Result<WorkerOutput, String>), } pub struct CurrentDecode { start_time: Option<f64>, } pub struct App { status: String, loaded: bool, segments: Vec<Segment>, current_decode: Option<CurrentDecode>, worker: Box<dyn Bridge<Worker>>, } async fn model_data_load() -> Result<ModelData, JsValue> { let quantized = false; let is_multilingual = false; let (tokenizer, mel_filters, weights, config) = if quantized { console_log!("loading quantized weights"); let tokenizer = fetch_url("quantized/tokenizer-tiny-en.json").await?; let mel_filters = fetch_url("mel_filters.safetensors").await?; let weights = fetch_url("quantized/model-tiny-en-q80.gguf").await?; let config = fetch_url("quantized/config-tiny-en.json").await?; (tokenizer, mel_filters, weights, config) } else { console_log!("loading float weights"); if is_multilingual { let mel_filters = fetch_url("mel_filters.safetensors").await?; let tokenizer = fetch_url("whisper-tiny/tokenizer.json").await?; let weights = fetch_url("whisper-tiny/model.safetensors").await?; let config = fetch_url("whisper-tiny/config.json").await?; (tokenizer, mel_filters, weights, config) } else { let mel_filters = fetch_url("mel_filters.safetensors").await?; let tokenizer = fetch_url("whisper-tiny.en/tokenizer.json").await?; let weights = fetch_url("whisper-tiny.en/model.safetensors").await?; let config = fetch_url("whisper-tiny.en/config.json").await?; (tokenizer, mel_filters, weights, config) } }; let timestamps = true; let _task = Some("transcribe".to_string()); console_log!("{}", weights.len()); Ok(ModelData { tokenizer, mel_filters, weights, config, quantized, timestamps, task: None, is_multilingual, language: None, }) } fn performance_now() -> Option<f64> { let window = web_sys::window()?; let performance = window.performance()?; Some(performance.now() / 1000.) } impl Component for App { type Message = Msg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let status = "loading weights".to_string(); let cb = { let link = ctx.link().clone(); move |e| link.send_message(Self::Message::WorkerOutMsg(e)) }; let worker = Worker::bridge(std::rc::Rc::new(cb)); Self { status, segments: vec![], current_decode: None, worker, loaded: false, } } fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) { if first_render { ctx.link().send_future(async { match model_data_load().await { Err(err) => { let status = format!("{err:?}"); Msg::UpdateStatus(status) } Ok(model_data) => Msg::SetDecoder(model_data), } }); } } fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool { match msg { Msg::SetDecoder(md) => { self.status = "weights loaded succesfully!".to_string(); self.loaded = true; console_log!("loaded weights"); self.worker.send(WorkerInput::ModelData(md)); true } Msg::Run(sample_index) => { let sample = SAMPLE_NAMES[sample_index]; if self.current_decode.is_some() { self.status = "already decoding some sample at the moment".to_string() } else { let start_time = performance_now(); self.current_decode = Some(CurrentDecode { start_time }); self.status = format!("decoding {sample}"); self.segments.clear(); ctx.link().send_future(async move { match fetch_url(sample).await { Err(err) => { let output = Err(format!("decoding error: {err:?}")); // Mimic a worker output to so as to release current_decode Msg::WorkerOutMsg(output) } Ok(wav_bytes) => { Msg::WorkerInMsg(WorkerInput::DecodeTask { wav_bytes }) } } }) } // true } Msg::WorkerOutMsg(output) => { let dt = self.current_decode.as_ref().and_then(|current_decode| { current_decode.start_time.and_then(|start_time| { performance_now().map(|stop_time| stop_time - start_time) }) }); self.current_decode = None; match output { Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(), Ok(WorkerOutput::Decoded(segments)) => { self.status = match dt { None => "decoding succeeded!".to_string(), Some(dt) => format!("decoding succeeded in {:.2}s", dt), }; self.segments = segments; } Err(err) => { self.status = format!("decoding error {err:?}"); } } true } Msg::WorkerInMsg(inp) => { self.worker.send(inp); true } Msg::UpdateStatus(status) => { self.status = status; true } } } fn view(&self, ctx: &Context<Self>) -> Html { html! { <div> <table> <thead> <tr> <th>{"Sample"}</th> <th></th> <th></th> </tr> </thead> <tbody> { SAMPLE_NAMES.iter().enumerate().map(|(i, name)| { html! { <tr> <th>{name}</th> <th><audio controls=true src={format!("./{name}")}></audio></th> { if self.loaded { html!(<th><button class="button" onclick={ctx.link().callback(move |_| Msg::Run(i))}> { "run" }</button></th>) }else{html!()} } </tr> } }).collect::<Html>() } </tbody> </table> <h2> {&self.status} </h2> { if !self.loaded{ html! { <progress id="progress-bar" aria-label="loading weights…"></progress> } } else if self.current_decode.is_some() { html! { <progress id="progress-bar" aria-label="decoding…"></progress> } } else { html!{ <blockquote> <p> { self.segments.iter().map(|segment| { html! { <> <i> { format!("{:.2}s-{:.2}s: (avg-logprob: {:.4}, no-speech-prob: {:.4})", segment.start, segment.start + segment.duration, segment.dr.avg_logprob, segment.dr.no_speech_prob, ) } </i> <br/ > {&segment.dr.text} <br/ > </> } }).collect::<Html>() } </p> </blockquote> } } } // Display the current date and time the page was rendered <p class="footer"> { "Rendered: " } { String::from(Date::new_0().to_string()) } </p> </div> } } }
0
hf_public_repos/candle/candle-wasm-examples/whisper/src
hf_public_repos/candle/candle-wasm-examples/whisper/src/bin/worker.rs
use yew_agent::PublicWorker; fn main() { candle_wasm_example_whisper::Worker::register(); }
0
hf_public_repos/candle/candle-wasm-examples/whisper/src
hf_public_repos/candle/candle-wasm-examples/whisper/src/bin/m.rs
use candle_wasm_example_whisper::worker::{Decoder as D, ModelData}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Decoder { decoder: D, } #[wasm_bindgen] impl Decoder { #[wasm_bindgen(constructor)] #[allow(clippy::too_many_arguments)] pub fn new( weights: Vec<u8>, tokenizer: Vec<u8>, mel_filters: Vec<u8>, config: Vec<u8>, quantized: bool, is_multilingual: bool, timestamps: bool, task: Option<String>, language: Option<String>, ) -> Result<Decoder, JsError> { let decoder = D::load(ModelData { tokenizer, mel_filters, config, quantized, weights, is_multilingual, timestamps, task, language, }); match decoder { Ok(decoder) => Ok(Self { decoder }), Err(e) => Err(JsError::new(&e.to_string())), } } #[wasm_bindgen] pub fn decode(&mut self, wav_input: Vec<u8>) -> Result<String, JsError> { let segments = self .decoder .convert_and_run(&wav_input) .map_err(|e| JsError::new(&e.to_string()))?; let json = serde_json::to_string(&segments)?; Ok(json) } } fn main() {}
0
hf_public_repos/candle/candle-wasm-examples/whisper/src
hf_public_repos/candle/candle-wasm-examples/whisper/src/bin/app.rs
fn main() { wasm_logger::init(wasm_logger::Config::new(log::Level::Trace)); yew::Renderer::<candle_wasm_example_whisper::App>::new().render(); }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/T5ModelConditionalGeneration.js
//load Candle Bert Module wasm module let init, ModelConditionalGeneration; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class ConditionalGeneration { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelConditionalGeneration } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelConditionalGeneration } = await import( "./build/m.js" )); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelConditionalGeneration( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, prompt, params } = event.data; let { temperature = 0.0, seed = 299792458, repeat_penalty = 1.1, repeat_last_n = 64, top_p = 1, } = { ...params }; try { self.postMessage({ status: "ready", message: "Starting T5 Conditional Generation", }); const model = await ConditionalGeneration.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "decoding", message: "Decoding Prompt", }); const output = model.decode({ prompt, temperature, seed, top_p, repeat_penalty, repeat_last_n, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/README.md
## Running T5 with Candle and WASM Here, we provide two examples of how to run Bert using a Candle-compiled WASM binary and runtime. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m.js"; ``` For the quantized version, we need to import the quantized module: ```js import init, { ModelConditionalGeneration, ModelEncoder } from "./build/m-quantized.js"; ``` The full example can be found under `./index.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/index.html` in your browser.
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/T5ModelEncoderWorker.js
//load Candle Bert Module wasm module let init, ModelEncoder; async function fetchArrayBuffer(url) { const cacheName = "t5-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Encoder { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (modelID.includes("quantized")) { ({ default: init, ModelEncoder } = await import( "./build/m-quantized.js" )); } else { ({ default: init, ModelEncoder } = await import("./build/m.js")); } if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, configArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new ModelEncoder( weightsArrayU8, tokenizerArrayU8, configArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, } = event.data; try { self.postMessage({ status: "ready", message: "Starting T5 Encoder" }); const model = await Encoder.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "encoding", message: "Encoding Sentences", }); const output = model.decode({ sentences: sentences, normalize_embeddings: normalize_embeddings || true, }); self.postMessage({ status: "complete", message: "complete", output: output, }); } catch (e) { self.postMessage({ error: e }); } });
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/index.html
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle T5</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } </style> <style type="text/tailwindcss"> .link { @apply underline hover:text-blue-500 hover:no-underline; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> import { getModelInfo, MODELS, extractEmbeddings, generateText, } from "./utils.js"; const t5ModelEncoderWorker = new Worker("./T5ModelEncoderWorker.js", { type: "module", }); const t5ModelConditionalGeneration = new Worker( "./T5ModelConditionalGeneration.js", { type: "module" } ); const formEl = document.querySelector("#form"); const modelEl = document.querySelector("#model"); const promptEl = document.querySelector("#prompt"); const temperatureEl = document.querySelector("#temperature"); const toppEL = document.querySelector("#top-p"); const repeatPenaltyEl = document.querySelector("#repeat_penalty"); const seedEl = document.querySelector("#seed"); const outputEl = document.querySelector("#output-generation"); const tasksEl = document.querySelector("#tasks"); let selectedTaskID = ""; document.addEventListener("DOMContentLoaded", () => { for (const [id, model] of Object.entries(MODELS)) { const option = document.createElement("option"); option.value = id; option.innerText = `${id} (${model.size})`; modelEl.appendChild(option); } populateTasks(modelEl.value); modelEl.addEventListener("change", (e) => { populateTasks(e.target.value); }); tasksEl.addEventListener("change", (e) => { const task = e.target.value; const modelID = modelEl.value; promptEl.value = MODELS[modelID].tasks[task].prefix; selectedTaskID = task; }); }); function populateTasks(modelID) { const tasks = MODELS[modelID].tasks; tasksEl.innerHTML = ""; for (const [task, params] of Object.entries(tasks)) { const div = document.createElement("div"); div.innerHTML = ` <input type="radio" name="task" id="${task}" class="font-light cursor-pointer" value="${task}" /> <label for="${task}" class="cursor-pointer"> ${params.prefix} </label> `; tasksEl.appendChild(div); } selectedTaskID = Object.keys(tasks)[0]; tasksEl.querySelector(`#${selectedTaskID}`).checked = true; } form.addEventListener("submit", (e) => { e.preventDefault(); const promptText = promptEl.value; const modelID = modelEl.value; const { modelURL, configURL, tokenizerURL, maxLength } = getModelInfo( modelID, selectedTaskID ); const params = { temperature: Number(temperatureEl.value), top_p: Number(toppEL.value), repetition_penalty: Number(repeatPenaltyEl.value), seed: BigInt(seedEl.value), max_length: maxLength, }; generateText( t5ModelConditionalGeneration, modelURL, tokenizerURL, configURL, modelID, promptText, params, (status) => { if (status.status === "loading") { outputEl.innerText = "Loading model..."; } if (status.status === "decoding") { outputEl.innerText = "Generating..."; } } ).then(({ output }) => { outputEl.innerText = output.generation; }); }); </script> </head> <body class="container max-w-4xl mx-auto p-4"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle T5 Transformer</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> This demo showcase Text-To-Text Transfer Transformer (<a href="https://blog.research.google/2020/02/exploring-transfer-learning-with-t5.html" target="_blank" class="link" >T5</a >) models right in your browser, thanks to <a href="https://github.com/huggingface/candle/" target="_blank" class="link"> Candle </a> ML framework and rust/wasm. You can choose from a range of available models, including <a href="https://huggingface.co/t5-small" target="_blank" class="link"> t5-small</a >, <a href="https://huggingface.co/t5-base" target="_blank" class="link" >t5-base</a >, <a href="https://huggingface.co/google/flan-t5-small" target="_blank" class="link" >flan-t5-small</a >, several <a href="https://huggingface.co/lmz/candle-quantized-t5/tree/main" target="_blank" class="link"> t5 quantized gguf models</a >, and also a quantized <a href="https://huggingface.co/jbochi/candle-coedit-quantized/tree/main" target="_blank" class="link"> CoEdIT model for text rewrite</a >. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"></select> </div> <div> <h3 class="font-medium">Task Prefix:</h3> <form id="tasks" class="flex flex-col gap-1 my-2"></form> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add prompt here, e.g. 'translate English to German: Today I'm going to eat Ice Cream'" value="translate English to German: Today I'm going to eat Ice Cream" /> <button class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <div class="grid grid-cols-3 max-w-md items-center gap-3"> <label class="text-sm font-medium" for="temperature">Temperature</label> <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.00</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2 text-lg"> <p id="output-generation" class="grid-rows-2">No output yet</p> </div> </div> </main> </body> </html>
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/utils.js
export async function extractEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus, normalize_embeddings = true ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, normalize_embeddings, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export async function generateText( worker, weightsURL, tokenizerURL, configURL, modelID, prompt, params, updateStatus ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, prompt, params, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } export const MODELS = { t5_small_quantized: { size: "64.4 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, t5_small: { size: "242 MB", base_url: "https://huggingface.co/t5-small/resolve/main/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_small: { size: "308 MB", base_url: "https://huggingface.co/google/flan-t5-small/resolve/refs%2Fpr%2F14/", model: "model.safetensors", tokenizer: "tokenizer.json", config: "config.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, flan_t5_base_quantized: { size: "263 MB", base_url: "https://huggingface.co/lmz/candle-quantized-t5/resolve/main/", model: "model-flan-t5-base.gguf", tokenizer: "tokenizer.json", config: "config-flan-t5-base.json", tasks: { translation_en_to_de: { prefix: "translate English to German: ", max_length: 300, }, translation_en_to_fr: { prefix: "translate English to French: ", max_length: 300, }, translation_en_to_ro: { prefix: "translate English to Romanian: ", max_length: 300, }, summarization: { prefix: "summarize: ", max_length: 200 }, }, }, coedit_large_quantized: { size: "643 MB", base_url: "https://huggingface.co/jbochi/candle-coedit-quantized/resolve/main/", model: "model.gguf", tokenizer: "tokenizer.json", config: "config.json", tasks: { fluency: { prefix: "Fix the grammar: ", max_length: 300, }, coherence: { prefix: "Rewrite to make this easier to understand: ", max_length: 300, }, simplification: { prefix: "translate English to Romanian: ", max_length: 300, }, simplification: { prefix: "Paraphrase this: ", max_length: 300, }, formalization: { prefix: "Write this more formally: ", max_length: 300, }, neutralize: { prefix: "Write in a more neutral way: ", max_length: 300, }, }, }, }; export function getModelInfo(id, taskID) { const model = MODELS[id]; return { modelURL: model.base_url + model.model, configURL: model.base_url + model.config, tokenizerURL: model.base_url + model.tokenizer, maxLength: model.tasks[taskID].max_length, }; }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/build-lib.sh
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web wasm-bindgen ../../target/wasm32-unknown-unknown/release/m-quantized.wasm --out-dir build --target web
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/t5/Cargo.toml
[package] name = "candle-wasm-example-t5" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { path = "../../candle-core", version = "0.3.1", package = "candle-core" } candle-nn = { path = "../../candle-nn", version = "0.3.1" } candle-transformers = { path = "../../candle-transformers", version = "0.3.1" } num-traits = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } safetensors = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.8" js-sys = "0.3.64" wasm-bindgen = "0.2.87" serde-wasm-bindgen = "0.6.0"
0
hf_public_repos/candle/candle-wasm-examples/t5
hf_public_repos/candle/candle-wasm-examples/t5/src/lib.rs
use wasm_bindgen::prelude::*; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::log(&format_args!($($t)*).to_string())) }
0
hf_public_repos/candle/candle-wasm-examples/t5/src
hf_public_repos/candle/candle-wasm-examples/t5/src/bin/m.rs
use candle::{DType, Device, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; pub use candle_transformers::models::t5::{Config, T5EncoderModel, T5ForConditionalGeneration}; use candle_wasm_example_t5::console_log; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct ModelEncoder { model: T5EncoderModel, tokenizer: Tokenizer, } #[wasm_bindgen] pub struct ModelConditionalGeneration { model: T5ForConditionalGeneration, tokenizer: Tokenizer, config: Config, } #[wasm_bindgen] impl ModelConditionalGeneration { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelConditionalGeneration, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let mut config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5ForConditionalGeneration::load(vb, &config)?; config.use_cache = false; Ok(Self { model, tokenizer, config, }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: ConditionalGenerationParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = &Device::Cpu; self.model.clear_kv_cache(); let mut output_token_ids = [self.config.pad_token_id as u32].to_vec(); let prompt = input.prompt; let repeat_penalty = input.repeat_penalty; let repeat_last_n = input.repeat_last_n; let seed = input.seed; let max_length = usize::clamp(input.max_length.unwrap_or(512), 0, 512); let temperature = if input.temperature <= 0. { None } else { Some(input.temperature) }; let top_p = if input.top_p <= 0. || input.top_p >= 1. { None } else { Some(input.top_p) }; let mut logits_processor = LogitsProcessor::new(seed, temperature, top_p); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let encoder_output = self.model.encode(&input_token_ids)?; let mut decoded = String::new(); for index in 0.. { if output_token_ids.len() > max_length { break; } let decoder_token_ids = if index == 0 { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = self .model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == self.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = self.tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); decoded += &text; } } Ok(serde_wasm_bindgen::to_value( &ConditionalGenerationOutput { generation: decoded, }, )?) } } #[wasm_bindgen] impl ModelEncoder { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelEncoder, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let device = &Device::Cpu; let vb = VarBuilder::from_buffered_safetensors(weights, DType::F32, device)?; let mut config: Config = serde_json::from_slice(&config)?; config.use_cache = false; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5EncoderModel::load(vb, &config)?; Ok(Self { model, tokenizer }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let device = &Device::Cpu; let input: DecoderParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; self.model.clear_kv_cache(); let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = self .tokenizer .encode(sentence, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let embeddings = self.model.forward(&token_ids)?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; console_log!("{:?}", embeddings.shape()); all_embeddings.push(embeddings.squeeze(0)?.to_vec1::<f32>()?); } Ok(serde_wasm_bindgen::to_value(&DecoderOutput { embeddings: all_embeddings, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct ConditionalGenerationOutput { generation: String, } #[derive(serde::Serialize, serde::Deserialize)] struct DecoderOutput { embeddings: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct DecoderParams { sentences: Vec<String>, normalize_embeddings: bool, } #[derive(serde::Serialize, serde::Deserialize)] pub struct ConditionalGenerationParams { prompt: String, temperature: f64, seed: u64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, max_length: Option<usize>, } fn main() { console_error_panic_hook::set_once(); }
0
hf_public_repos/candle/candle-wasm-examples/t5/src
hf_public_repos/candle/candle-wasm-examples/t5/src/bin/m-quantized.rs
use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; pub use candle_transformers::models::quantized_t5::{ Config, T5EncoderModel, T5ForConditionalGeneration, VarBuilder, }; use candle_wasm_example_t5::console_log; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct ModelEncoder { model: T5EncoderModel, tokenizer: Tokenizer, } #[wasm_bindgen] pub struct ModelConditionalGeneration { model: T5ForConditionalGeneration, tokenizer: Tokenizer, config: Config, } #[wasm_bindgen] impl ModelConditionalGeneration { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelConditionalGeneration, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights)?; let mut config: Config = serde_json::from_slice(&config)?; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5ForConditionalGeneration::load(vb, &config)?; config.use_cache = false; Ok(Self { model, tokenizer, config, }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let input: ConditionalGenerationParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; let device = &Device::Cpu; self.model.clear_kv_cache(); let mut output_token_ids = [self.config.pad_token_id as u32].to_vec(); let prompt = input.prompt; let repeat_penalty = input.repeat_penalty; let repeat_last_n = input.repeat_last_n; let seed = input.seed; let max_length = usize::clamp(input.max_length.unwrap_or(512), 0, 512); let temperature = if input.temperature <= 0. { None } else { Some(input.temperature) }; let top_p = if input.top_p <= 0. || input.top_p >= 1. { None } else { Some(input.top_p) }; let mut logits_processor = LogitsProcessor::new(seed, temperature, top_p); let tokens = self .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let input_token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let encoder_output = self.model.encode(&input_token_ids)?; let mut decoded = String::new(); for index in 0.. { if output_token_ids.len() > max_length { break; } let decoder_token_ids = if index == 0 { Tensor::new(output_token_ids.as_slice(), device)?.unsqueeze(0)? } else { let last_token = *output_token_ids.last().unwrap(); Tensor::new(&[last_token], device)?.unsqueeze(0)? }; let logits = self .model .decode(&decoder_token_ids, &encoder_output)? .squeeze(0)?; let logits = if repeat_penalty == 1. { logits } else { let start_at = output_token_ids.len().saturating_sub(repeat_last_n); candle_transformers::utils::apply_repeat_penalty( &logits, repeat_penalty, &output_token_ids[start_at..], )? }; let next_token_id = logits_processor.sample(&logits)?; if next_token_id as usize == self.config.eos_token_id { break; } output_token_ids.push(next_token_id); if let Some(text) = self.tokenizer.id_to_token(next_token_id) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); decoded += &text; } } Ok(serde_wasm_bindgen::to_value( &ConditionalGenerationOutput { generation: decoded, }, )?) } } #[wasm_bindgen] impl ModelEncoder { #[wasm_bindgen(constructor)] pub fn load( weights: Vec<u8>, tokenizer: Vec<u8>, config: Vec<u8>, ) -> Result<ModelEncoder, JsError> { console_error_panic_hook::set_once(); console_log!("loading model"); let vb = VarBuilder::from_gguf_buffer(&weights)?; let mut config: Config = serde_json::from_slice(&config)?; config.use_cache = false; let tokenizer = Tokenizer::from_bytes(&tokenizer).map_err(|m| JsError::new(&m.to_string()))?; let model = T5EncoderModel::load(vb, &config)?; Ok(Self { model, tokenizer }) } pub fn decode(&mut self, input: JsValue) -> Result<JsValue, JsError> { let device = &Device::Cpu; let input: DecoderParams = serde_wasm_bindgen::from_value(input).map_err(|m| JsError::new(&m.to_string()))?; self.model.clear_kv_cache(); let sentences = input.sentences; let normalize_embeddings = input.normalize_embeddings; let n_sentences = sentences.len(); let mut all_embeddings = Vec::with_capacity(n_sentences); for sentence in sentences { let tokens = self .tokenizer .encode(sentence, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let token_ids = Tensor::new(&tokens[..], device)?.unsqueeze(0)?; let embeddings = self.model.forward(&token_ids)?; console_log!("generated embeddings {:?}", embeddings.shape()); // Apply some avg-pooling by taking the mean embedding value for all tokens (including padding) let (_n_sentence, n_tokens, _hidden_size) = embeddings.dims3()?; let embeddings = (embeddings.sum(1)? / (n_tokens as f64))?; let embeddings = if normalize_embeddings { embeddings.broadcast_div(&embeddings.sqr()?.sum_keepdim(1)?.sqrt()?)? } else { embeddings }; console_log!("{:?}", embeddings.shape()); all_embeddings.push(embeddings.squeeze(0)?.to_vec1::<f32>()?); } Ok(serde_wasm_bindgen::to_value(&DecoderOutput { embeddings: all_embeddings, })?) } } #[derive(serde::Serialize, serde::Deserialize)] struct ConditionalGenerationOutput { generation: String, } #[derive(serde::Serialize, serde::Deserialize)] struct DecoderOutput { embeddings: Vec<Vec<f32>>, } #[derive(serde::Serialize, serde::Deserialize)] pub struct DecoderParams { sentences: Vec<String>, normalize_embeddings: bool, } #[derive(serde::Serialize, serde::Deserialize)] pub struct ConditionalGenerationParams { prompt: String, temperature: f64, seed: u64, top_p: f64, repeat_penalty: f32, repeat_last_n: usize, max_length: Option<usize>, } fn main() { console_error_panic_hook::set_once(); }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/llama2-c/llama2cWorker.js
import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "llama2c-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Llama2C { static instance = {}; static async getInstance(weightsURL, modelID, tokenizerURL) { // load individual modelID only once if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), ]); this.instance[modelID] = new Model(weightsArrayU8, tokenizerArrayU8); } return this.instance[modelID]; } } let controller = null; self.addEventListener("message", (event) => { if (event.data.command === "start") { controller = new AbortController(); generate(event.data); } else if (event.data.command === "abort") { controller.abort(); } }); async function generate(data) { const { weightsURL, modelID, tokenizerURL, prompt, temp, top_p, repeatPenalty, seed, maxSeqLen, } = data; try { self.postMessage({ status: "loading", message: "Starting llama2.c" }); const model = await Llama2C.getInstance(weightsURL, modelID, tokenizerURL); self.postMessage({ status: "loading", message: "Initializing model" }); const firstToken = model.init_with_prompt( prompt, temp, top_p, repeatPenalty, seed ); const seq_len = model.get_seq_len(); let sentence = firstToken; let maxTokens = maxSeqLen ? maxSeqLen : seq_len - prompt.length - 1; let startTime = performance.now(); let tokensCount = 0; while (tokensCount < maxTokens) { await new Promise(async (resolve) => { if (controller && controller.signal.aborted) { self.postMessage({ status: "aborted", message: "Aborted", output: prompt + sentence, }); return; } const token = await model.next_token(); const tokensSec = ((tokensCount + 1) / (performance.now() - startTime)) * 1000; sentence += token; self.postMessage({ status: "generating", message: "Generating token", token: token, sentence: sentence, totalTime: performance.now() - startTime, tokensSec, prompt: prompt, }); setTimeout(resolve, 0); }); tokensCount++; } self.postMessage({ status: "complete", message: "complete", output: prompt + sentence, }); } catch (e) { self.postMessage({ error: e }); } }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/llama2-c/README.md
## Running [llama2.c](https://github.com/karpathy/llama2.c) Examples Here, we provide two examples of how to run [llama2.c](https://github.com/karpathy/llama2.c) written in Rust using a Candle-compiled WASM binary and runtimes. ### Pure Rust UI To build and test the UI made in Rust you will need [Trunk](https://trunkrs.dev/#install) From the `candle-wasm-examples/llama2-c` directory run: Download assets: ```bash # Model and tokenizer wget -c https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/model.bin wget -c https://huggingface.co/spaces/lmz/candle-llama2/resolve/main/tokenizer.json ``` Run hot reload server: ```bash trunk serve --release --public-url / --port 8080 ``` ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/llama2-c/index.html
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8" /> <title>Welcome to Candle!</title> <link data-trunk rel="copy-file" href="tokenizer.json" /> <link data-trunk rel="copy-file" href="model.bin" /> <link data-trunk rel="rust" href="Cargo.toml" data-bin="app" data-type="main" /> <link data-trunk rel="rust" href="Cargo.toml" data-bin="worker" data-type="worker" /> <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto:300,300italic,700,700italic"> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/normalize/8.0.1/normalize.css"> <link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/milligram/1.4.1/milligram.css"> </head> <body></body> </html>
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/llama2-c/build-lib.sh
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/llama2-c/lib-example.html
<html> <head> <meta content="text/html;charset=utf-8" http-equiv="Content-Type" /> <title>Candle Llama.c Rust/WASM</title> </head> <body></body> </html> <!DOCTYPE html> <html> <head> <meta charset="UTF-8" /> <meta name="viewport" content="width=device-width, initial-scale=1.0" /> <style> @import url("https://fonts.googleapis.com/css2?family=Source+Code+Pro:wght@200;300;400&family=Source+Sans+3:wght@100;200;300;400;500;600;700;800;900&display=swap"); html, body { font-family: "Source Sans 3", sans-serif; } code, output, select, pre { font-family: "Source Code Pro", monospace; } </style> <script src="https://cdn.tailwindcss.com"></script> <script type="module"> // base url for audio examples const MODELS_BASE_URL = "https://huggingface.co/karpathy/tinyllamas/resolve/main"; // models base url const MODELS = { stories15M: { url: "stories15M.bin", seq_len: 256, }, stories42M: { url: "stories42M.bin", seq_len: 1024, }, stories110M: { url: "stories110M.bin", seq_len: 1024, }, }; const llamaWorker = new Worker("./llama2cWorker.js", { type: "module", }); async function generateSequence(controller) { const getValue = (id) => document.querySelector(`#${id}`).value; const modelID = getValue("model"); const model = MODELS[modelID]; const weightsURL = `${MODELS_BASE_URL}/${model.url}`; const prompt = getValue("prompt"); const temperature = getValue("temperature"); const topP = getValue("top-p"); const repeatPenalty = getValue("repeat_penalty"); const seed = getValue("seed"); const maxSeqLen = getValue("max-seq"); function updateStatus(data) { const outStatus = document.querySelector("#output-status"); const outGen = document.querySelector("#output-generation"); const outCounter = document.querySelector("#output-counter"); switch (data.status) { case "loading": outStatus.hidden = false; outStatus.textContent = data.message; outGen.hidden = true; outCounter.hidden = true; break; case "generating": const { message, prompt, sentence, tokensSec, totalTime } = data; outStatus.hidden = true; outCounter.hidden = false; outGen.hidden = false; outGen.innerHTML = `<span class="font-semibold">${prompt}</span>${sentence.replace( /\<s\>|\<\/s\>/g, "" )}`; outCounter.innerHTML = `${(totalTime / 1000).toFixed( 2 )}s (${tokensSec.toFixed(2)} tok/s)`; break; case "complete": outStatus.hidden = true; outGen.hidden = false; break; } } return new Promise((resolve, reject) => { llamaWorker.postMessage({ weightsURL, modelID, tokenizerURL: "tokenizer.json", prompt, temp: temperature, top_p: topP, repeatPenalty, seed: BigInt(seed), maxSeqLen, command: "start", }); const handleAbort = () => { llamaWorker.postMessage({ command: "abort" }); }; const handleMessage = (event) => { const { status, error, message, prompt, sentence } = event.data; if (status) updateStatus(event.data); if (error) { llamaWorker.removeEventListener("message", handleMessage); reject(new Error(error)); } if (status === "aborted") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } if (status === "complete") { llamaWorker.removeEventListener("message", handleMessage); resolve(event.data); } }; controller.signal.addEventListener("abort", handleAbort); llamaWorker.addEventListener("message", handleMessage); }); } const form = document.querySelector("#form"); const prompt = document.querySelector("#prompt"); const clearBtn = document.querySelector("#clear-btn"); const runBtn = document.querySelector("#run"); const modelSelect = document.querySelector("#model"); let runController = new AbortController(); let isRunning = false; modelSelect.addEventListener("change", (e) => { const model = MODELS[e.target.value]; document.querySelector("#max-seq").max = model.seq_len; document.querySelector("#max-seq").nextElementSibling.value = model.seq_len; }); form.addEventListener("submit", async (e) => { e.preventDefault(); if (isRunning) { stopRunning(); } else { startRunning(); await generateSequence(runController); stopRunning(); } }); function startRunning() { isRunning = true; runBtn.textContent = "Stop"; } function stopRunning() { runController.abort(); runController = new AbortController(); runBtn.textContent = "Run"; isRunning = false; } clearBtn.addEventListener("click", (e) => { e.preventDefault(); prompt.value = ""; clearBtn.classList.add("invisible"); runBtn.disabled = true; stopRunning(); }); prompt.addEventListener("input", (e) => { runBtn.disabled = false; if (e.target.value.length > 0) { clearBtn.classList.remove("invisible"); } else { clearBtn.classList.add("invisible"); } }); </script> </head> <body class="container max-w-4xl mx-auto p-4 text-gray-800"> <main class="grid grid-cols-1 gap-8 relative"> <span class="absolute text-5xl -ml-[1em]"> 🕯️ </span> <div> <h1 class="text-5xl font-bold">Candle Llama2.c</h1> <h2 class="text-2xl font-bold">Rust/WASM Demo</h2> <p class="max-w-lg"> <a href="https://github.com/karpathy/llama2.c" target="_blank" class="underline hover:text-blue-500 hover:no-underline" target="_blank" >Llama2.c</a > is Andrey Karpathy's C implementation of the Llama 2 LLM model in C. This demo uses <a href="https://github.com/huggingface/candle/" target="_blank" class="underline hover:text-blue-500 hover:no-underline" >Candle </a> to run Llama2.c in the browser using rust/wasm. </p> </div> <div> <label for="model" class="font-medium">Models Options: </label> <select id="model" class="border-2 border-gray-500 rounded-md font-light"> <option value="stories15M" selected>stories 15M (60.8 MB)</option> <option value="stories42M">stories 42M (167 MB)</option> <option value="stories110M">stories 110M (438 MB)</option> </select> </div> <form id="form" class="flex text-normal px-1 py-1 border border-gray-700 rounded-md items-center"> <input type="submit" hidden /> <input type="text" id="prompt" class="font-light w-full px-3 py-2 mx-1 resize-none outline-none" placeholder="Add your prompt here..." value="Once upon a time" /> <button id="clear-btn"> <svg fill="none" xmlns="http://www.w3.org/2000/svg" width="40" viewBox="0 0 70 40"> <path opacity=".5" d="M39 .2v40.2" stroke="#1F2937" /> <path d="M1.5 11.5 19 29.1m0-17.6L1.5 29.1" opacity=".5" stroke="#1F2937" stroke-width="2" /> </svg> </button> <button id="run" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-2 w-16 rounded disabled:bg-gray-300 disabled:cursor-not-allowed"> Run </button> </form> <details> <summary class="font-medium cursor-pointer">Advanced Options</summary> <div class="grid grid-cols-3 max-w-md items-center gap-3 py-3"> <label class="text-sm font-medium" for="max-seq" >Maximum length </label> <input type="range" id="max-seq" name="max-seq" min="1" max="256" step="1" value="200" oninput="this.nextElementSibling.value = Number(this.value)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 200</output > <label class="text-sm font-medium" for="temperature" >Temperature</label > <input type="range" id="temperature" name="temperature" min="0" max="2" step="0.01" value="0.40" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 0.40</output > <label class="text-sm font-medium" for="top-p">Top-p</label> <input type="range" id="top-p" name="top-p" min="0" max="1" step="0.01" value="1.00" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md"> 1.00</output > <label class="text-sm font-medium" for="repeat_penalty" >Repeat Penalty</label > <input type="range" id="repeat_penalty" name="repeat_penalty" min="1" max="2" step="0.01" value="1.10" oninput="this.nextElementSibling.value = Number(this.value).toFixed(2)" /> <output class="text-xs w-[50px] text-center font-light px-1 py-1 border border-gray-700 rounded-md" >1.10</output > <label class="text-sm font-medium" for="seed">Seed</label> <input type="number" id="seed" name="seed" value="299792458" class="font-light border border-gray-700 text-right rounded-md p-2" /> <button id="run" onclick="document.querySelector('#seed').value = BigInt(Math.floor(Math.random() * 2**64-1))" class="bg-gray-700 hover:bg-gray-800 text-white font-normal py-1 w-[50px] rounded disabled:bg-gray-300 disabled:cursor-not-allowed text-sm"> Rand </button> </div> </details> <div> <h3 class="font-medium">Generation:</h3> <div class="min-h-[250px] bg-slate-100 text-gray-500 p-4 rounded-md flex flex-col gap-2"> <div id="output-counter" hidden class="ml-auto font-semibold grid-rows-1 text-sm"></div> <p hidden id="output-generation" class="grid-rows-2"></p> <span id="output-status" class="m-auto font-light" >No output yet</span > </div> </div> </main> </body> </html>
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/llama2-c/Cargo.toml
[package] name = "candle-wasm-example-llama2" version.workspace = true edition.workspace = true description.workspace = true repository.workspace = true keywords.workspace = true categories.workspace = true license.workspace = true [dependencies] candle = { path = "../../candle-core", version = "0.3.1", package = "candle-core" } candle-nn = { path = "../../candle-nn", version = "0.3.1" } candle-transformers = { path = "../../candle-transformers", version = "0.3.1" } num-traits = { workspace = true } tokenizers = { workspace = true, features = ["unstable_wasm"] } # App crates. anyhow = { workspace = true } byteorder = { workspace = true } log = { workspace = true } rand = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } # Wasm specific crates. console_error_panic_hook = "0.1.7" getrandom = { version = "0.2", features = ["js"] } gloo = "0.8" js-sys = "0.3.64" wasm-bindgen = "0.2.87" wasm-bindgen-futures = "0.4.37" wasm-logger = "0.2" yew-agent = "0.2.0" yew = { version = "0.20.0", features = ["csr"] } [dependencies.web-sys] version = "0.3.64" features = [ 'Blob', 'Document', 'Element', 'HtmlElement', 'Node', 'Window', 'Request', 'RequestCache', 'RequestInit', 'RequestMode', 'Response', 'Performance', ]
0
hf_public_repos/candle/candle-wasm-examples/llama2-c
hf_public_repos/candle/candle-wasm-examples/llama2-c/src/worker.rs
use crate::model::{Cache, Config, Llama}; use byteorder::{LittleEndian, ReadBytesExt}; use candle::{DType, Device, IndexOp, Result, Shape, Tensor}; use candle_nn::VarBuilder; use candle_transformers::generation::LogitsProcessor; use serde::{Deserialize, Serialize}; use tokenizers::Tokenizer; use wasm_bindgen::prelude::*; use yew_agent::{HandlerId, Public, WorkerLink}; #[wasm_bindgen] extern "C" { // Use `js_namespace` here to bind `console.log(..)` instead of just // `log(..)` #[wasm_bindgen(js_namespace = console)] pub fn log(s: &str); } #[macro_export] macro_rules! console_log { // Note that this is using the `log` function imported above during // `bare_bones` ($($t:tt)*) => ($crate::worker::log(&format_args!($($t)*).to_string())) } // Communication to the worker happens through bincode, the model weights and configs are fetched // on the main thread and transfered via the following structure. #[derive(Serialize, Deserialize)] pub struct ModelData { pub tokenizer: Vec<u8>, pub model: Vec<u8>, } fn read_i32<R: std::io::Read>(r: &mut R) -> Result<i32> { let mut buf = [0u8; 4]; r.read_exact(&mut buf)?; Ok(i32::from_le_bytes(buf)) } fn read_tensor<R: std::io::Read, S: Into<Shape>>( r: &mut R, shape: S, dev: &Device, ) -> Result<Tensor> { let shape = shape.into(); let mut data_t = vec![0f32; shape.elem_count()]; r.read_f32_into::<LittleEndian>(&mut data_t)?; let tensor = Tensor::from_vec(data_t, shape, dev)?; Ok(tensor) } pub struct Model { pub cache: Cache, pub config: Config, pub llama: Llama, pub tokenizer: Tokenizer, } impl Model { fn run( &self, link: &WorkerLink<Worker>, id: HandlerId, temp: f64, top_p: f64, prompt: String, ) -> Result<()> { let dev = Device::Cpu; let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1.0 { None } else { Some(top_p) }; console_log!("temp: {temp:?} top_p: {top_p:?} prompt: {prompt}"); let mut logits_processor = LogitsProcessor::new(299792458, temp, top_p); let mut index_pos = 0; let mut tokens = self .tokenizer .encode(prompt.to_string(), true) .map_err(|m| candle::Error::Msg(m.to_string()))? .get_ids() .to_vec(); link.respond(id, Ok(WorkerOutput::Generated(prompt))); for index in 0.. { if tokens.len() >= self.config.seq_len { break; } let context_size = if self.cache.use_kv_cache && index > 0 { 1 } else { tokens.len() }; let ctxt = &tokens[tokens.len().saturating_sub(context_size)..]; let input = Tensor::new(ctxt, &dev)?.unsqueeze(0)?; let logits = self.llama.forward(&input, index_pos)?; let logits = logits.squeeze(0)?; index_pos += ctxt.len(); let next_token = logits_processor.sample(&logits)?; tokens.push(next_token); if let Some(text) = self.tokenizer.id_to_token(next_token) { let text = text.replace('▁', " ").replace("<0x0A>", "\n"); link.respond(id, Ok(WorkerOutput::Generated(text))); } } Ok(()) } } impl Config { fn from_reader<R: std::io::Read>(r: &mut R) -> Result<Self> { let dim = read_i32(r)? as usize; let hidden_dim = read_i32(r)? as usize; let n_layers = read_i32(r)? as usize; let n_heads = read_i32(r)? as usize; let n_kv_heads = read_i32(r)? as usize; let vocab_size = read_i32(r)? as usize; let seq_len = read_i32(r)? as usize; Ok(Self { dim, hidden_dim, n_layers, n_heads, n_kv_heads, vocab_size, seq_len, norm_eps: 1e-5, }) } pub fn head_size(&self) -> usize { self.dim / self.n_heads } } struct TransformerWeights { // token embedding table token_embedding_table: Tensor, // (vocab_size, dim) // weights for rmsnorms rms_att_weight: Tensor, // (layer, dim) rmsnorm weights rms_ffn_weight: Tensor, // (layer, dim) // weights for matmuls wq: Tensor, // (layer, dim, dim) wk: Tensor, // (layer, dim, dim) wv: Tensor, // (layer, dim, dim) wo: Tensor, // (layer, dim, dim) // weights for ffn w1: Tensor, // (layer, hidden_dim, dim) w2: Tensor, // (layer, dim, hidden_dim) w3: Tensor, // (layer, hidden_dim, dim) // final rmsnorm rms_final_weight: Tensor, // (dim,) // freq_cis for RoPE relatively positional embeddings freq_cis_real: Tensor, // (seq_len, head_size/2) freq_cis_imag: Tensor, // (seq_len, head_size/2) } impl TransformerWeights { fn from_reader<R: std::io::Read>(r: &mut R, c: &Config, dev: &Device) -> Result<Self> { let token_embedding_table = read_tensor(r, (c.vocab_size, c.dim), dev)?; let rms_att_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let wq = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wk = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wv = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let wo = read_tensor(r, (c.n_layers, c.dim, c.dim), dev)?; let rms_ffn_weight = read_tensor(r, (c.n_layers, c.dim), dev)?; let w1 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let w2 = read_tensor(r, (c.n_layers, c.dim, c.hidden_dim), dev)?; let w3 = read_tensor(r, (c.n_layers, c.hidden_dim, c.dim), dev)?; let rms_final_weight = read_tensor(r, c.dim, dev)?; let head_size = c.head_size(); let freq_cis_real = read_tensor(r, (c.seq_len, head_size / 2), dev)?; let freq_cis_imag = read_tensor(r, (c.seq_len, head_size / 2), dev)?; Ok(Self { token_embedding_table, rms_att_weight, wq, wk, wv, wo, rms_ffn_weight, w1, w2, w3, rms_final_weight, freq_cis_real, freq_cis_imag, }) } fn var_builder(&self, cfg: &Config, device: &Device) -> Result<VarBuilder> { let mut ws = std::collections::HashMap::new(); let mut insert = |name: &str, t: Tensor| { ws.insert(name.to_string(), t); }; insert("rot.freq_cis_real", self.freq_cis_real.clone()); insert("rot.freq_cis_imag", self.freq_cis_imag.clone()); insert( "model.embed_tokens.weight", self.token_embedding_table.clone(), ); insert("lm_head.weight", self.token_embedding_table.clone()); insert("model.norm.weight", self.rms_final_weight.clone()); for layer in 0..cfg.n_layers { ws.insert( format!("model.layers.{layer}.self_attn.q_proj.weight"), self.wq.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.k_proj.weight"), self.wk.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.v_proj.weight"), self.wv.i(layer)?, ); ws.insert( format!("model.layers.{layer}.self_attn.o_proj.weight"), self.wo.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.gate_proj.weight"), self.w1.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.down_proj.weight"), self.w2.i(layer)?, ); ws.insert( format!("model.layers.{layer}.mlp.up_proj.weight"), self.w3.i(layer)?, ); ws.insert( format!("model.layers.{layer}.input_layernorm.weight"), self.rms_att_weight.i(layer)?, ); ws.insert( format!("model.layers.{layer}.post_attention_layernorm.weight"), self.rms_ffn_weight.i(layer)?, ); } let vb = VarBuilder::from_tensors(ws, DType::F32, device); Ok(vb) } } impl Model { pub fn load(md: ModelData) -> Result<Self> { let dev = Device::Cpu; let mut model = std::io::Cursor::new(md.model); let config = Config::from_reader(&mut model)?; let weights = TransformerWeights::from_reader(&mut model, &config, &dev)?; let vb = weights.var_builder(&config, &dev)?; let cache = Cache::new(true, &config, vb.pp("rot"))?; let llama = Llama::load(vb, &cache, &config)?; let tokenizer = Tokenizer::from_bytes(&md.tokenizer).map_err(|m| candle::Error::Msg(m.to_string()))?; Ok(Self { cache, config, llama, tokenizer, }) } } pub struct Worker { link: WorkerLink<Self>, model: Option<Model>, } #[derive(Serialize, Deserialize)] pub enum WorkerInput { ModelData(ModelData), Run(f64, f64, String), } #[derive(Serialize, Deserialize)] pub enum WorkerOutput { Generated(String), GenerationDone(std::result::Result<(), String>), WeightsLoaded, } impl yew_agent::Worker for Worker { type Input = WorkerInput; type Message = (); type Output = std::result::Result<WorkerOutput, String>; type Reach = Public<Self>; fn create(link: WorkerLink<Self>) -> Self { Self { link, model: None } } fn update(&mut self, _msg: Self::Message) { // no messaging } fn handle_input(&mut self, msg: Self::Input, id: HandlerId) { let output = match msg { WorkerInput::ModelData(md) => match Model::load(md) { Ok(model) => { self.model = Some(model); Ok(WorkerOutput::WeightsLoaded) } Err(err) => Err(format!("model creation error {err:?}")), }, WorkerInput::Run(temp, top_p, prompt) => match &mut self.model { None => Err("model has not been set yet".to_string()), Some(model) => { { let mut cache = model.cache.kvs.lock().unwrap(); for elem in cache.iter_mut() { *elem = None } } let result = model .run(&self.link, id, temp, top_p, prompt) .map_err(|e| e.to_string()); Ok(WorkerOutput::GenerationDone(result)) } }, }; self.link.respond(id, output); } fn name_of_resource() -> &'static str { "worker.js" } fn resource_path_is_relative() -> bool { true } }
0
hf_public_repos/candle/candle-wasm-examples/llama2-c
hf_public_repos/candle/candle-wasm-examples/llama2-c/src/lib.rs
mod app; pub mod model; pub mod worker; pub use app::App; pub use worker::Worker;
0
hf_public_repos/candle/candle-wasm-examples/llama2-c
hf_public_repos/candle/candle-wasm-examples/llama2-c/src/app.rs
use crate::console_log; use crate::worker::{ModelData, Worker, WorkerInput, WorkerOutput}; use std::str::FromStr; use wasm_bindgen::prelude::*; use wasm_bindgen_futures::JsFuture; use yew::{html, Component, Context, Html}; use yew_agent::{Bridge, Bridged}; async fn fetch_url(url: &str) -> Result<Vec<u8>, JsValue> { use web_sys::{Request, RequestCache, RequestInit, RequestMode, Response}; let window = web_sys::window().ok_or("window")?; let mut opts = RequestInit::new(); let opts = opts .method("GET") .mode(RequestMode::Cors) .cache(RequestCache::NoCache); let request = Request::new_with_str_and_init(url, opts)?; let resp_value = JsFuture::from(window.fetch_with_request(&request)).await?; // `resp_value` is a `Response` object. assert!(resp_value.is_instance_of::<Response>()); let resp: Response = resp_value.dyn_into()?; let data = JsFuture::from(resp.blob()?).await?; let blob = web_sys::Blob::from(data); let array_buffer = JsFuture::from(blob.array_buffer()).await?; let data = js_sys::Uint8Array::new(&array_buffer).to_vec(); Ok(data) } pub enum Msg { Refresh, Run, UpdateStatus(String), SetModel(ModelData), WorkerInMsg(WorkerInput), WorkerOutMsg(Result<WorkerOutput, String>), } pub struct CurrentDecode { start_time: Option<f64>, } pub struct App { status: String, loaded: bool, temperature: std::rc::Rc<std::cell::RefCell<f64>>, top_p: std::rc::Rc<std::cell::RefCell<f64>>, prompt: std::rc::Rc<std::cell::RefCell<String>>, generated: String, n_tokens: usize, current_decode: Option<CurrentDecode>, worker: Box<dyn Bridge<Worker>>, } async fn model_data_load() -> Result<ModelData, JsValue> { let tokenizer = fetch_url("tokenizer.json").await?; let model = fetch_url("model.bin").await?; console_log!("{}", model.len()); Ok(ModelData { tokenizer, model }) } fn performance_now() -> Option<f64> { let window = web_sys::window()?; let performance = window.performance()?; Some(performance.now() / 1000.) } impl Component for App { type Message = Msg; type Properties = (); fn create(ctx: &Context<Self>) -> Self { let status = "loading weights".to_string(); let cb = { let link = ctx.link().clone(); move |e| link.send_message(Self::Message::WorkerOutMsg(e)) }; let worker = Worker::bridge(std::rc::Rc::new(cb)); Self { status, n_tokens: 0, temperature: std::rc::Rc::new(std::cell::RefCell::new(0.)), top_p: std::rc::Rc::new(std::cell::RefCell::new(1.0)), prompt: std::rc::Rc::new(std::cell::RefCell::new("".to_string())), generated: String::new(), current_decode: None, worker, loaded: false, } } fn rendered(&mut self, ctx: &Context<Self>, first_render: bool) { if first_render { ctx.link().send_future(async { match model_data_load().await { Err(err) => { let status = format!("{err:?}"); Msg::UpdateStatus(status) } Ok(model_data) => Msg::SetModel(model_data), } }); } } fn update(&mut self, ctx: &Context<Self>, msg: Self::Message) -> bool { match msg { Msg::SetModel(md) => { self.status = "weights loaded succesfully!".to_string(); self.loaded = true; console_log!("loaded weights"); self.worker.send(WorkerInput::ModelData(md)); true } Msg::Run => { if self.current_decode.is_some() { self.status = "already generating some sample at the moment".to_string() } else { let start_time = performance_now(); self.current_decode = Some(CurrentDecode { start_time }); self.status = "generating...".to_string(); self.n_tokens = 0; self.generated.clear(); let temp = *self.temperature.borrow(); let top_p = *self.top_p.borrow(); let prompt = self.prompt.borrow().clone(); console_log!("temp: {}, top_p: {}, prompt: {}", temp, top_p, prompt); ctx.link() .send_message(Msg::WorkerInMsg(WorkerInput::Run(temp, top_p, prompt))) } true } Msg::WorkerOutMsg(output) => { match output { Ok(WorkerOutput::WeightsLoaded) => self.status = "weights loaded!".to_string(), Ok(WorkerOutput::GenerationDone(Err(err))) => { self.status = format!("error in worker process: {err}"); self.current_decode = None } Ok(WorkerOutput::GenerationDone(Ok(()))) => { let dt = self.current_decode.as_ref().and_then(|current_decode| { current_decode.start_time.and_then(|start_time| { performance_now().map(|stop_time| stop_time - start_time) }) }); self.status = match dt { None => "generation succeeded!".to_string(), Some(dt) => format!( "generation succeeded in {:.2}s ({:.1} ms/token)", dt, dt * 1000.0 / (self.n_tokens as f64) ), }; self.current_decode = None } Ok(WorkerOutput::Generated(token)) => { self.n_tokens += 1; self.generated.push_str(&token) } Err(err) => { self.status = format!("error in worker {err:?}"); } } true } Msg::WorkerInMsg(inp) => { self.worker.send(inp); true } Msg::UpdateStatus(status) => { self.status = status; true } Msg::Refresh => true, } } fn view(&self, ctx: &Context<Self>) -> Html { use yew::TargetCast; let temperature = self.temperature.clone(); let oninput_temperature = ctx.link().callback(move |e: yew::InputEvent| { let input: web_sys::HtmlInputElement = e.target_unchecked_into(); if let Ok(temp) = f64::from_str(&input.value()) { *temperature.borrow_mut() = temp } Msg::Refresh }); let top_p = self.top_p.clone(); let oninput_top_p = ctx.link().callback(move |e: yew::InputEvent| { let input: web_sys::HtmlInputElement = e.target_unchecked_into(); if let Ok(top_p_input) = f64::from_str(&input.value()) { *top_p.borrow_mut() = top_p_input } Msg::Refresh }); let prompt = self.prompt.clone(); let oninput_prompt = ctx.link().callback(move |e: yew::InputEvent| { let input: web_sys::HtmlInputElement = e.target_unchecked_into(); *prompt.borrow_mut() = input.value(); Msg::Refresh }); html! { <div style="margin: 2%;"> <div><p>{"Running "} <a href="https://github.com/karpathy/llama2.c" target="_blank">{"llama2.c"}</a> {" in the browser using rust/wasm with "} <a href="https://github.com/huggingface/candle" target="_blank">{"candle!"}</a> </p> <p>{"Once the weights have loaded, click on the run button to start generating content."} </p> </div> {"temperature \u{00a0} "} <input type="range" min="0." max="1.2" step="0.1" value={self.temperature.borrow().to_string()} oninput={oninput_temperature} id="temp"/> {format!(" \u{00a0} {}", self.temperature.borrow())} <br/ > {"top_p \u{00a0} "} <input type="range" min="0." max="1.0" step="0.05" value={self.top_p.borrow().to_string()} oninput={oninput_top_p} id="top_p"/> {format!(" \u{00a0} {}", self.top_p.borrow())} <br/ > {"prompt: "}<input type="text" value={self.prompt.borrow().to_string()} oninput={oninput_prompt} id="prompt"/> <br/ > { if self.loaded{ html!(<button class="button" onclick={ctx.link().callback(move |_| Msg::Run)}> { "run" }</button>) }else{ html! { <progress id="progress-bar" aria-label="Loading weights..."></progress> } } } <br/ > <h3> {&self.status} </h3> { if self.current_decode.is_some() { html! { <progress id="progress-bar" aria-label="generating…"></progress> } } else { html! {} } } <blockquote> <p> { self.generated.chars().map(|c| if c == '\r' || c == '\n' { html! { <br/> } } else { html! { {c} } }).collect::<Html>() } </p> </blockquote> </div> } } }
0
hf_public_repos/candle/candle-wasm-examples/llama2-c
hf_public_repos/candle/candle-wasm-examples/llama2-c/src/model.rs
use candle::{DType, Device, IndexOp, Result, Tensor, D}; use candle_nn::{ embedding, linear_no_bias as linear, rms_norm, Embedding, Linear, Module, RmsNorm, VarBuilder, }; use std::collections::HashMap; use std::sync::{Arc, Mutex}; #[derive(Debug, Clone)] pub struct Config { pub dim: usize, // transformer dimension pub hidden_dim: usize, // for ffn layers pub n_layers: usize, // number of layers pub n_heads: usize, // number of query heads pub n_kv_heads: usize, // number of key/value heads (can be < query heads because of multiquery) pub vocab_size: usize, // vocabulary size, usually 256 (byte-level) pub seq_len: usize, // max sequence length pub norm_eps: f64, } #[derive(Clone)] pub struct Cache { masks: Arc<Mutex<HashMap<usize, Tensor>>>, pub use_kv_cache: bool, #[allow(clippy::type_complexity)] pub kvs: Arc<Mutex<Vec<Option<(Tensor, Tensor)>>>>, cos: Tensor, sin: Tensor, device: Device, } impl Cache { pub fn new(use_kv_cache: bool, cfg: &Config, vb: VarBuilder) -> Result<Self> { let freq_cis_real = vb.get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_real")?; let freq_cis_imag = vb.get((cfg.seq_len, cfg.head_size() / 2), "freq_cis_imag")?; let cos = freq_cis_real.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; let sin = freq_cis_imag.reshape((cfg.seq_len, cfg.head_size() / 2, 1))?; Ok(Self { masks: Arc::new(Mutex::new(HashMap::new())), use_kv_cache, kvs: Arc::new(Mutex::new(vec![None; cfg.n_layers])), cos, sin, device: vb.device().clone(), }) } fn mask(&self, t: usize) -> Result<Tensor> { let mut masks = self.masks.lock().unwrap(); if let Some(mask) = masks.get(&t) { Ok(mask.clone()) } else { let mask: Vec<_> = (0..t) .flat_map(|i| (0..t).map(move |j| u8::from(j > i))) .collect(); let mask = Tensor::from_slice(&mask, (t, t), &self.device)?; masks.insert(t, mask.clone()); Ok(mask) } } } struct CausalSelfAttention { q_proj: Linear, k_proj: Linear, v_proj: Linear, o_proj: Linear, n_head: usize, n_key_value_head: usize, head_dim: usize, cache: Cache, } impl CausalSelfAttention { fn apply_rotary_emb(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (b_sz, seq_len, h, n_embd) = x.dims4()?; let cos = self.cache.cos.i(index_pos..index_pos + seq_len)?; let sin = self.cache.sin.i(index_pos..index_pos + seq_len)?; let cos = cos.unsqueeze(1)?; let sin = sin.unsqueeze(1)?; let cos = cos.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let sin = sin.broadcast_as((b_sz, seq_len, 1, n_embd / 2, 1))?; let x = x.reshape((b_sz, seq_len, h, n_embd / 2, 2))?; let x0 = x.narrow(D::Minus1, 0, 1)?; let x1 = x.narrow(D::Minus1, 1, 1)?; let dst0 = (x0.broadcast_mul(&cos)? - x1.broadcast_mul(&sin)?)?; let dst1 = (x0.broadcast_mul(&sin)? + x1.broadcast_mul(&cos)?)?; let rope = Tensor::cat(&[&dst0, &dst1], D::Minus1)?.reshape((b_sz, seq_len, h, n_embd))?; Ok(rope) } fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let (b_sz, seq_len, n_embd) = x.dims3()?; let q = self.q_proj.forward(x)?; let k = self.k_proj.forward(x)?; let v = self.v_proj.forward(x)?; let q = q.reshape((b_sz, seq_len, self.n_head, self.head_dim))?; let k = k.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let mut v = v.reshape((b_sz, seq_len, self.n_key_value_head, self.head_dim))?; let q = self.apply_rotary_emb(&q, index_pos)?; let mut k = self.apply_rotary_emb(&k, index_pos)?; if self.cache.use_kv_cache { let mut cache = self.cache.kvs.lock().unwrap(); if let Some((cache_k, cache_v)) = &cache[block_idx] { k = Tensor::cat(&[cache_k, &k], 1)?.contiguous()?; v = Tensor::cat(&[cache_v, &v], 1)?.contiguous()?; } cache[block_idx] = Some((k.clone(), v.clone())) } let k = self.repeat_kv(k)?; let v = self.repeat_kv(v)?; let q = q.transpose(1, 2)?.contiguous()?; let k = k.transpose(1, 2)?.contiguous()?; let v = v.transpose(1, 2)?.contiguous()?; let att = (q.matmul(&k.t()?)? / (self.head_dim as f64).sqrt())?; let mask = self.cache.mask(seq_len)?.broadcast_as(att.shape())?; let att = masked_fill(&att, &mask, f32::NEG_INFINITY)?; let att = candle_nn::ops::softmax(&att, D::Minus1)?; // Convert to contiguous as matmul doesn't support strided vs for now. let y = att.matmul(&v.contiguous()?)?; let y = y.transpose(1, 2)?.reshape(&[b_sz, seq_len, n_embd])?; let y = self.o_proj.forward(&y)?; Ok(y) } fn repeat_kv(&self, x: Tensor) -> Result<Tensor> { let n_rep = self.n_head / self.n_key_value_head; if n_rep == 1 { Ok(x) } else { let (b_sz, seq_len, n_kv_head, head_dim) = x.dims4()?; let x = x .unsqueeze(3)? .expand((b_sz, seq_len, n_kv_head, n_rep, head_dim))? .reshape((b_sz, seq_len, n_kv_head * n_rep, head_dim))?; Ok(x) } } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { let size_in = cfg.dim; let size_q = (cfg.dim / cfg.n_heads) * cfg.n_heads; let size_kv = (cfg.dim / cfg.n_heads) * cfg.n_kv_heads; let q_proj = linear(size_in, size_q, vb.pp("q_proj"))?; let k_proj = linear(size_in, size_kv, vb.pp("k_proj"))?; let v_proj = linear(size_in, size_kv, vb.pp("v_proj"))?; let o_proj = linear(size_q, size_in, vb.pp("o_proj"))?; Ok(Self { q_proj, k_proj, v_proj, o_proj, n_head: cfg.n_heads, n_key_value_head: cfg.n_kv_heads, head_dim: cfg.dim / cfg.n_heads, cache: cache.clone(), }) } } fn masked_fill(on_false: &Tensor, mask: &Tensor, on_true: f32) -> Result<Tensor> { let shape = mask.shape(); let on_true = Tensor::new(on_true, on_false.device())?.broadcast_as(shape.dims())?; let m = mask.where_cond(&on_true, on_false)?; Ok(m) } struct Mlp { c_fc1: Linear, c_fc2: Linear, c_proj: Linear, } impl Mlp { fn new(c_fc1: Linear, c_fc2: Linear, c_proj: Linear) -> Self { Self { c_fc1, c_fc2, c_proj, } } fn forward(&self, x: &Tensor) -> Result<Tensor> { let x = (candle_nn::ops::silu(&self.c_fc1.forward(x)?)? * self.c_fc2.forward(x)?)?; self.c_proj.forward(&x) } fn load(vb: VarBuilder, cfg: &Config) -> Result<Self> { let h_size = cfg.dim; let i_size = cfg.hidden_dim; let c_fc1 = linear(h_size, i_size, vb.pp("gate_proj"))?; let c_fc2 = linear(h_size, i_size, vb.pp("up_proj"))?; let c_proj = linear(i_size, h_size, vb.pp("down_proj"))?; Ok(Self::new(c_fc1, c_fc2, c_proj)) } } struct Block { rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp, } impl Block { fn new(rms_1: RmsNorm, attn: CausalSelfAttention, rms_2: RmsNorm, mlp: Mlp) -> Self { Self { rms_1, attn, rms_2, mlp, } } fn forward(&self, x: &Tensor, index_pos: usize, block_idx: usize) -> Result<Tensor> { let residual = x; let x = self.rms_1.forward(x)?; let x = (self.attn.forward(&x, index_pos, block_idx)? + residual)?; let residual = &x; let x = (self.mlp.forward(&self.rms_2.forward(&x)?)? + residual)?; Ok(x) } fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { let attn = CausalSelfAttention::load(vb.pp("self_attn"), cache, cfg)?; let mlp = Mlp::load(vb.pp("mlp"), cfg)?; let input_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("input_layernorm"))?; let post_attention_layernorm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("post_attention_layernorm"))?; Ok(Self::new( input_layernorm, attn, post_attention_layernorm, mlp, )) } } pub struct Llama { wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear, } impl Llama { fn new(wte: Embedding, blocks: Vec<Block>, ln_f: RmsNorm, lm_head: Linear) -> Self { Self { wte, blocks, ln_f, lm_head, } } pub fn forward(&self, x: &Tensor, index_pos: usize) -> Result<Tensor> { let (_b_sz, seq_len) = x.dims2()?; let mut x = self.wte.forward(x)?; for (block_idx, block) in self.blocks.iter().enumerate() { x = block.forward(&x, index_pos, block_idx)?; } let x = self.ln_f.forward(&x)?; let x = x.i((.., seq_len - 1, ..))?; let logits = self.lm_head.forward(&x)?; logits.to_dtype(DType::F32) } pub fn load(vb: VarBuilder, cache: &Cache, cfg: &Config) -> Result<Self> { let wte = embedding(cfg.vocab_size, cfg.dim, vb.pp("model.embed_tokens"))?; let lm_head = linear(cfg.dim, cfg.vocab_size, vb.pp("lm_head"))?; let norm = rms_norm(cfg.dim, cfg.norm_eps, vb.pp("model.norm"))?; let blocks: Vec<_> = (0..cfg.n_layers) .map(|i| Block::load(vb.pp(&format!("model.layers.{i}")), cache, cfg).unwrap()) .collect(); Ok(Self::new(wte, blocks, norm, lm_head)) } }
0
hf_public_repos/candle/candle-wasm-examples/llama2-c/src
hf_public_repos/candle/candle-wasm-examples/llama2-c/src/bin/worker.rs
use yew_agent::PublicWorker; fn main() { console_error_panic_hook::set_once(); candle_wasm_example_llama2::Worker::register(); }
0
hf_public_repos/candle/candle-wasm-examples/llama2-c/src
hf_public_repos/candle/candle-wasm-examples/llama2-c/src/bin/m.rs
use candle::{Device, Tensor}; use candle_transformers::generation::LogitsProcessor; use candle_wasm_example_llama2::worker::{Model as M, ModelData}; use wasm_bindgen::prelude::*; #[wasm_bindgen] pub struct Model { inner: M, logits_processor: LogitsProcessor, tokens: Vec<u32>, repeat_penalty: f32, } impl Model { fn process(&mut self, tokens: &[u32]) -> candle::Result<String> { const REPEAT_LAST_N: usize = 64; let dev = Device::Cpu; let input = Tensor::new(tokens, &dev)?.unsqueeze(0)?; let logits = self.inner.llama.forward(&input, tokens.len())?; let logits = logits.squeeze(0)?; let logits = if self.repeat_penalty == 1. || tokens.is_empty() { logits } else { let start_at = self.tokens.len().saturating_sub(REPEAT_LAST_N); candle_transformers::utils::apply_repeat_penalty( &logits, self.repeat_penalty, &self.tokens[start_at..], )? }; let next_token = self.logits_processor.sample(&logits)?; self.tokens.push(next_token); let text = match self.inner.tokenizer.id_to_token(next_token) { Some(text) => text.replace('▁', " ").replace("<0x0A>", "\n"), None => "".to_string(), }; Ok(text) } } #[wasm_bindgen] impl Model { #[wasm_bindgen(constructor)] pub fn new(weights: Vec<u8>, tokenizer: Vec<u8>) -> Result<Model, JsError> { let model = M::load(ModelData { tokenizer, model: weights, }); let logits_processor = LogitsProcessor::new(299792458, None, None); match model { Ok(inner) => Ok(Self { inner, logits_processor, tokens: vec![], repeat_penalty: 1., }), Err(e) => Err(JsError::new(&e.to_string())), } } #[wasm_bindgen] pub fn get_seq_len(&mut self) -> usize { self.inner.config.seq_len } #[wasm_bindgen] pub fn init_with_prompt( &mut self, prompt: String, temp: f64, top_p: f64, repeat_penalty: f32, seed: u64, ) -> Result<String, JsError> { // First reset the cache. { let mut cache = self.inner.cache.kvs.lock().unwrap(); for elem in cache.iter_mut() { *elem = None } } let temp = if temp <= 0. { None } else { Some(temp) }; let top_p = if top_p <= 0. || top_p >= 1. { None } else { Some(top_p) }; self.logits_processor = LogitsProcessor::new(seed, temp, top_p); self.repeat_penalty = repeat_penalty; self.tokens.clear(); let tokens = self .inner .tokenizer .encode(prompt, true) .map_err(|m| JsError::new(&m.to_string()))? .get_ids() .to_vec(); let text = self .process(&tokens) .map_err(|m| JsError::new(&m.to_string()))?; Ok(text) } #[wasm_bindgen] pub fn next_token(&mut self) -> Result<String, JsError> { let last_token = *self.tokens.last().unwrap(); let text = self .process(&[last_token]) .map_err(|m| JsError::new(&m.to_string()))?; Ok(text) } } fn main() {}
0
hf_public_repos/candle/candle-wasm-examples/llama2-c/src
hf_public_repos/candle/candle-wasm-examples/llama2-c/src/bin/app.rs
fn main() { wasm_logger::init(wasm_logger::Config::new(log::Level::Trace)); console_error_panic_hook::set_once(); yew::Renderer::<candle_wasm_example_llama2::App>::new().render(); }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/bert/README.md
## Running BERT with Candle and WASM Here, we provide two examples of how to run Bert using a Candle-compiled WASM binary and runtime. ### Vanilla JS and WebWorkers To build and test the UI made in Vanilla JS and WebWorkers, first we need to build the WASM library: ```bash sh build-lib.sh ``` This will bundle the library under `./build` and we can import it inside our WebWorker like a normal JS module: ```js import init, { Model } from "./build/m.js"; ``` The full example can be found under `./lib-example.html`. All needed assets are fetched from the web, so no need to download anything. Finally, you can preview the example by running a local HTTP server. For example: ```bash python -m http.server ``` Then open `http://localhost:8000/lib-example.html` in your browser.
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/bert/utils.js
export async function getEmbeddings( worker, weightsURL, tokenizerURL, configURL, modelID, sentences, updateStatus = null ) { return new Promise((resolve, reject) => { worker.postMessage({ weightsURL, tokenizerURL, configURL, modelID, sentences, }); function messageHandler(event) { if ("error" in event.data) { worker.removeEventListener("message", messageHandler); reject(new Error(event.data.error)); } if (event.data.status === "complete") { worker.removeEventListener("message", messageHandler); resolve(event.data); } if (updateStatus) updateStatus(event.data); } worker.addEventListener("message", messageHandler); }); } const MODELS = { intfloat_e5_small_v2: { base_url: "https://huggingface.co/intfloat/e5-small-v2/resolve/main/", search_prefix: "query: ", document_prefix: "passage: ", }, intfloat_e5_base_v2: { base_url: "https://huggingface.co/intfloat/e5-base-v2/resolve/main/", search_prefix: "query: ", document_prefix: "passage:", }, intfloat_multilingual_e5_small: { base_url: "https://huggingface.co/intfloat/multilingual-e5-small/resolve/main/", search_prefix: "query: ", document_prefix: "passage: ", }, sentence_transformers_all_MiniLM_L6_v2: { base_url: "https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2/resolve/refs%2Fpr%2F21/", search_prefix: "", document_prefix: "", }, sentence_transformers_all_MiniLM_L12_v2: { base_url: "https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2/resolve/refs%2Fpr%2F4/", search_prefix: "", document_prefix: "", }, }; export function getModelInfo(id) { return { modelURL: MODELS[id].base_url + "model.safetensors", configURL: MODELS[id].base_url + "config.json", tokenizerURL: MODELS[id].base_url + "tokenizer.json", search_prefix: MODELS[id].search_prefix, document_prefix: MODELS[id].document_prefix, }; } export function cosineSimilarity(vec1, vec2) { const dot = vec1.reduce((acc, val, i) => acc + val * vec2[i], 0); const a = Math.sqrt(vec1.reduce((acc, val) => acc + val * val, 0)); const b = Math.sqrt(vec2.reduce((acc, val) => acc + val * val, 0)); return dot / (a * b); } export async function getWikiText(article) { // thanks to wikipedia for the API const URL = `https://en.wikipedia.org/w/api.php?action=query&prop=extracts&exlimit=1&titles=${article}&explaintext=1&exsectionformat=plain&format=json&origin=*`; return fetch(URL, { method: "GET", headers: { Accept: "application/json", }, }) .then((r) => r.json()) .then((data) => { const pages = data.query.pages; const pageId = Object.keys(pages)[0]; const extract = pages[pageId].extract; if (extract === undefined || extract === "") { throw new Error("No article found"); } return extract; }) .catch((error) => console.error("Error:", error)); }
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/bert/bertWorker.js
//load Candle Bert Module wasm module import init, { Model } from "./build/m.js"; async function fetchArrayBuffer(url) { const cacheName = "bert-candle-cache"; const cache = await caches.open(cacheName); const cachedResponse = await cache.match(url); if (cachedResponse) { const data = await cachedResponse.arrayBuffer(); return new Uint8Array(data); } const res = await fetch(url, { cache: "force-cache" }); cache.put(url, res.clone()); return new Uint8Array(await res.arrayBuffer()); } class Bert { static instance = {}; static async getInstance(weightsURL, tokenizerURL, configURL, modelID) { if (!this.instance[modelID]) { await init(); self.postMessage({ status: "loading", message: "Loading Model" }); const [weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8] = await Promise.all([ fetchArrayBuffer(weightsURL), fetchArrayBuffer(tokenizerURL), fetchArrayBuffer(configURL), ]); this.instance[modelID] = new Model( weightsArrayU8, tokenizerArrayU8, mel_filtersArrayU8 ); } else { self.postMessage({ status: "ready", message: "Model Already Loaded" }); } return this.instance[modelID]; } } self.addEventListener("message", async (event) => { const { weightsURL, tokenizerURL, configURL, modelID, sentences, normalize = true, } = event.data; try { self.postMessage({ status: "ready", message: "Starting Bert Model" }); const model = await Bert.getInstance( weightsURL, tokenizerURL, configURL, modelID ); self.postMessage({ status: "embedding", message: "Calculating Embeddings", }); const output = model.get_embeddings({ sentences: sentences, normalize_embeddings: normalize, }); self.postMessage({ status: "complete", message: "complete", output: output.data, }); } catch (e) { self.postMessage({ error: e }); } });
0
hf_public_repos/candle/candle-wasm-examples
hf_public_repos/candle/candle-wasm-examples/bert/build-lib.sh
cargo build --target wasm32-unknown-unknown --release wasm-bindgen ../../target/wasm32-unknown-unknown/release/m.wasm --out-dir build --target web
0