repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/conv_tests.rs | candle-core/tests/conv_tests.rs | use anyhow::Result;
use candle_core::{test_device, test_utils, Device, IndexOp, Tensor};
/* This test is based on the following script.
import torch
torch.manual_seed(4242)
t = torch.randn((1, 4, 5))
w = torch.randn((2, 4, 3))
print(t.flatten())
print(w.flatten())
res = torch.nn.functional.conv1d(t, w)
print(res.flatten())
res = torch.nn.functional.conv1d(t, w, padding=1)
print(res.flatten())
w_t = w.transpose(0, 1)
res = torch.nn.functional.conv_transpose1d(t, w_t)
print(res.shape)
print(res)
res = torch.nn.functional.conv_transpose1d(t, w_t, groups=2)
print(res.shape)
print(res)
*/
fn conv1d(dev: &Device) -> Result<()> {
let t = Tensor::new(
&[
0.4056f32, -0.8689, -0.0773, -1.5630, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145,
1.8025, -0.1536, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278, -1.0124, 0.5599,
],
dev,
)?
.reshape((1, 4, 5))?;
let w = Tensor::new(
&[
-0.8404f32, -0.3490, 0.0130, 1.3123, 0.1763, -1.9249, 1.4270, 0.9421, 0.8670, -0.7181,
-1.1111, 0.8869, -1.2429, 1.8357, 1.6052, -1.3844, 0.3951, -1.2036, 0.6686, 1.6261,
-0.6451, -0.0840, -1.4247, 0.5512,
],
dev,
)?
.reshape((2, 4, 3))?;
let res = t.conv1d(&w, 0, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 2, 3]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[2.6357, -1.3336, 4.1393, -1.1784, 3.5675, 0.5069]
);
let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 2, 5]);
// Same as pytorch default padding: use zeros.
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[2.4509, 2.6357, -1.3336, 4.1393, 0.5657, 1.8091, -1.1784, 3.5675, 0.5069, 3.3352]
);
let res = {
let t = Tensor::cat(&[&t.zeros_like()?, &t, &t.zeros_like()?], 0)?;
t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?
};
assert_eq!(res.dims(), [3, 2, 5]);
// Same as pytorch default padding: use zeros.
assert_eq!(
test_utils::to_vec1_round(&res.i(0)?.flatten_all()?, 4)?,
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
);
assert_eq!(
test_utils::to_vec1_round(&res.i(1)?.flatten_all()?, 4)?,
[2.4509, 2.6357, -1.3336, 4.1393, 0.5657, 1.8091, -1.1784, 3.5675, 0.5069, 3.3352]
);
let w = w.transpose(0, 1)?;
// The CPU kernels applied in the contiguous and non contiguous cases are different.
for w in [w.clone(), w.contiguous()?] {
let res = t.conv_transpose1d(&w, 0, 0, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 2, 7]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[
0.0699, -1.2899, 8.3018, 5.5873, 2.4572, -2.6143, -0.0706, 1.8765, 4.8318, 1.1538,
4.7076, -5.9745, -0.8276, 1.621
],
);
let res = t.conv_transpose1d(&w, 0, 0, 1, 1, 2)?;
assert_eq!(res.dims(), [1, 4, 7]);
assert_eq!(
test_utils::to_vec2_round(&res.squeeze(0)?, 4)?,
[
[-1.5596, -1.8099, 2.0407, 4.8764, -0.1743, -0.735, -0.7819],
[0.7816, 3.8152, -0.5926, 2.2515, -5.1844, -0.3157, 1.4721],
[1.6295, 0.52, 6.2611, 0.7109, 2.6315, -1.8793, 0.7113],
[1.0949, 1.0166, 1.7464, 2.4561, -0.79, -0.5119, 0.1488]
]
);
}
Ok(())
}
fn conv1d_small(dev: &Device) -> Result<()> {
let t = Tensor::new(&[0.4056f32, -0.8689, -0.0773, -1.5630], dev)?.reshape((1, 1, 4))?;
let w = Tensor::new(&[1f32, 0., 0.], dev)?.reshape((1, 1, 3))?;
let res = t.conv1d(&w, 0, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 1, 2]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[0.4056, -0.8689]
);
let res = t.conv1d(&w, /*padding*/ 1, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 1, 4]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[0.0, 0.4056, -0.8689, -0.0773],
);
Ok(())
}
/* This test is based on the following script.
import torch
torch.manual_seed(4242)
t = torch.randn((1, 4, 5, 5))
w = torch.randn((2, 4, 3, 3))
print(t.flatten())
print(w.flatten())
res = torch.nn.functional.conv2d(t, w)
print(res.flatten())
w_t = w.transpose(0, 1)
res = torch.nn.functional.conv_transpose2d(t, w_t)
print(res.shape)
print(res)
res = torch.nn.functional.conv2d(t, w, dilation=2)
print(res.shape)
print(res[0])
res = torch.nn.functional.conv_transpose2d(t, w_t, dilation=2)
print(res.shape)
print(res)
*/
fn conv2d(dev: &Device) -> Result<()> {
let t = Tensor::new(
&[
0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616,
1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395,
1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836,
0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123,
1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586,
0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049,
0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712,
0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790,
-0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006,
-0.8, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085,
],
dev,
)?;
let w = Tensor::new(
&[
-0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273,
-2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514,
-0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027,
0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667,
0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679,
-0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646,
1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860,
0.5583, 0.4623, 0.6026,
],
dev,
)?;
let t = t.reshape((1, 4, 5, 5))?;
let w = w.reshape((2, 4, 3, 3))?;
let res = t.conv2d(&w, 0, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 2, 3, 3]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[
-4.2812, 2.0923, 5.2187, 7.5184, 0.752, -14.9426, 10.0087, 4.391, 0.2918, 1.6715,
10.389, 3.6023, -4.2808, 0.2672, 5.3646, -5.2023, -2.1955, -9.4075
]
);
let res = {
let t = Tensor::cat(&[&t.zeros_like()?, &t, &t.zeros_like()?], 0)?;
t.conv2d(&w, 0, 1, 1, 1)?
};
assert_eq!(res.dims(), [3, 2, 3, 3]);
assert_eq!(
test_utils::to_vec1_round(&res.i(0)?.flatten_all()?, 4)?,
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]
);
assert_eq!(
test_utils::to_vec1_round(&res.i(1)?.flatten_all()?, 4)?,
[
-4.2812, 2.0923, 5.2187, 7.5184, 0.752, -14.9426, 10.0087, 4.391, 0.2918, 1.6715,
10.389, 3.6023, -4.2808, 0.2672, 5.3646, -5.2023, -2.1955, -9.4075
]
);
let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?;
assert_eq!(res.dims(), [1, 2, 7, 7]);
assert_eq!(
test_utils::to_vec3_round(&res.i(0)?, 4)?,
[
[
[-1.9918, 2.6797, -0.4599, -1.6037, 1.4131, -2.4012, 2.9277],
[1.8016, -3.5361, 1.0757, 3.5395, -8.2168, -3.2023, 0.5375],
[0.8243, 1.8675, 7.8929, -4.0746, -6.4415, 5.1139, 1.6889],
[0.2722, 8.9679, 3.3477, 1.8514, -4.2896, -3.8228, -7.5632],
[-8.5412, -5.8142, -7.1587, -1.6095, 0.4651, 0.2748, -2.0985],
[2.0833, -0.6482, -12.1692, -4.1284, -2.9765, -0.0656, -4.5114],
[5.307, 2.6957, 2.3087, 1.0478, 0.7808, -1.1519, -0.9579]
],
[
[1.089, 0.1872, -0.6408, -0.9897, 0.8503, 1.1019, -0.9211],
[-0.1741, -0.2915, 4.2472, 1.9417, 1.65, 0.6303, -4.7131],
[1.6555, 2.4026, -2.9293, 2.9953, 0.5328, 3.5873, -0.9621],
[-1.4289, -3.2787, 4.1747, -6.0341, -4.6341, -5.7945, 4.142],
[7.5973, 6.4431, 5.9872, 2.1639, -8.6566, 3.3143, -3.4059],
[-0.8775, -3.048, 11.6543, 0.6442, 2.3218, -0.4765, 1.1516],
[-5.5423, -2.5188, 1.0754, -0.0563, -2.9386, -1.1504, 1.0171]
]
]
);
// Dilations.
let res = t.conv2d(&w, 0, 1, 2, 1)?;
assert_eq!(res.dims(), [1, 2, 1, 1]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[2.45, -2.3504],
);
// Transpose and dilations.
let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 2)?;
assert_eq!(res.dims(), [1, 2, 9, 9]);
assert_eq!(
test_utils::to_vec3_round(&res.i(0)?, 4)?,
[
[
[-1.9918, 3.1652, -0.6778, -4.3442, 4.4351, 0.6652, -3.0124, -0.6031, 2.9277],
[2.7036, -1.7156, -0.3969, 1.0516, 1.6381, -2.8886, -0.205, 2.4682, -1.0499],
[-0.9459, 3.1631, 3.707, -4.8369, -8.5166, -1.4496, -2.7559, -3.2698, 1.4376],
[-0.2157, 3.7786, -2.0252, -4.2633, 3.6731, -1.5142, 5.9391, -0.2622, -0.141],
[-6.8121, -3.1744, 1.5945, 3.0637, -9.6088, 1.4446, 2.9489, -3.0082, -7.3822],
[0.2371, 3.3303, 0.3861, 2.2646, -4.6784, 4.1235, -0.0109, 0.3176, -0.03],
[-2.5339, -2.9564, -3.4518, -4.4594, -9.1873, -1.9709, -0.4676, 0.51, -3.5024],
[4.007, 0.3067, -2.2954, 1.1105, -0.1992, 1.6372, -2.9268, 0.2807, -1.2787],
[5.307, 1.1317, 1.3518, 0.9049, 3.8116, -0.4075, -0.8874, -0.2241, -0.9579]
],
[
[1.089, -0.6483, 0.0726, -0.4752, -1.3283, 1.7103, 1.0703, 0.1076, -0.9211],
[-0.8629, 0.1376, 0.3202, 2.0955, 0.9696, 2.8988, -1.0012, 1.5049, -0.1278],
[1.9286, -1.5255, -2.9563, 2.4589, 3.3611, -0.6951, 0.3525, -1.7724, -5.9861],
[1.1226, 2.1561, 3.6417, 4.7546, -0.692, 4.4126, -5.1902, 6.0805, 2.3185],
[1.0111, 0.3604, 0.6432, -3.6605, 7.9517, -9.2955, -5.2988, -3.7803, -2.0642],
[3.3172, -1.7967, -3.6576, -2.0942, 1.3158, 0.112, -1.7405, 2.9167, 0.7957],
[5.1001, 1.8995, -1.8639, 1.1262, 9.9629, 2.683, -3.6319, -1.1607, 0.5856],
[-4.8445, -0.5642, 4.2317, 0.0856, 1.2267, -0.5712, 1.736, 1.0997, 0.6908],
[-5.5423, -1.1831, -1.2176, 0.0843, 0.0446, -0.7545, -2.4798, -0.0827, 1.0171]
]
]
);
Ok(())
}
/* This test is based on the following script.
import torch
torch.manual_seed(4242)
t = torch.randn((1, 2, 3, 3))
w = torch.randn((1, 2, 1, 1))
print(t.flatten())
print(w.flatten())
res = torch.nn.functional.conv2d(t, w)
print(res.flatten())
w_t = w.transpose(0, 1)
res = torch.nn.functional.conv_transpose2d(t, w_t)
print(res.shape)
print(res.flatten())
t_t = w.transpose(0, 1)
res = torch.nn.functional.conv_transpose2d(t_t, w)
print(res.shape)
print(res.flatten())
*/
fn conv2d_small(dev: &Device) -> Result<()> {
let t = Tensor::new(
&[
0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866, 0.4145,
-0.6266, 0.3529, 2.2013, -0.6836, 0.2477, 1.3127, -0.6957, 0.3278,
],
dev,
)?;
let w = Tensor::new(&[-0.9259f32, 1.3017], dev)?;
let t = t.reshape((1, 2, 3, 3))?;
let w = w.reshape((1, 2, 1, 1))?;
let res = t.conv2d(&w, 0, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 1, 3, 3]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539]
);
let res = t.conv2d(&w, 2, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 1, 7, 7]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1640,
-0.0111, -0.1742, 0.0, 0.0, 0.0, 0.0, 2.6437, -2.0268, 1.1823, 0.0, 0.0, 0.0, 0.0,
3.2855, -1.0324, 0.2539, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0, 0.0, 0.0
]
);
let res = t.conv_transpose2d(&w.transpose(0, 1)?, 0, 0, 1, 1)?;
assert_eq!(res.dims(), [1, 1, 3, 3]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[0.164, -0.0111, -0.1742, 2.6437, -2.0268, 1.1823, 3.2855, -1.0324, 0.2539],
);
let res = t.transpose(0, 1)?.conv_transpose2d(&w, 0, 0, 1, 1)?;
assert_eq!(res.dims(), [2, 2, 3, 3]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[
-0.3755, 0.8045, -0.6336, -0.2218, -1.1369, 0.8599, 1.5768, -0.1268, -0.1728, 0.528,
-1.131, 0.8908, 0.3118, 1.5984, -1.2089, -2.2168, 0.1783, 0.2429, -0.3838, 0.5802,
-0.3268, -2.0382, 0.6329, -0.2293, -1.2154, 0.6441, -0.3035, 0.5396, -0.8156, 0.4594,
2.8654, -0.8898, 0.3224, 1.7087, -0.9056, 0.4267
]
);
Ok(())
}
fn conv2d_smaller(dev: &Device) -> Result<()> {
let t = Tensor::new(
&[
0.4056f32, -0.8689, 0.6843, 0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.1866,
],
dev,
)?;
let w = Tensor::new(&[1f32, 1., 1., 1., 1., 1., 1., 1., 1.], dev)?;
let t = t.reshape((1, 1, 3, 3))?;
let w = w.reshape((1, 1, 3, 3))?;
let res = t.conv2d(&w, 0, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 1, 1, 1]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[-0.6197]
);
Ok(())
}
/* This test is based on the following script.
import torch
torch.manual_seed(4242)
t = torch.randn((1, 2, 4, 2))
w = torch.randn((1, 2, 1, 1))
print(t.flatten())
print(w.flatten())
res = torch.nn.functional.conv2d(t, w)
print(res.flatten())
*/
fn conv2d_non_square(dev: &Device) -> Result<()> {
let t = Tensor::new(
&[
0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616,
1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699,
],
dev,
)?;
let w = Tensor::new(&[-1.1351f32, 1.3841], dev)?;
let t = t.reshape((1, 2, 4, 2))?;
let w = w.reshape((1, 2, 1, 1))?;
let res = t.conv2d(&w, 0, 1, 1, 1)?;
assert_eq!(res.dims(), [1, 1, 4, 2]);
assert_eq!(
test_utils::to_vec1_round(&res.flatten_all()?, 4)?,
[0.2312, 5.2238, 2.3772, 1.9076, 2.0256, -0.5776, -1.6028, -1.467]
);
Ok(())
}
/*
import torch
torch.manual_seed(4242)
t = torch.randn((1, 4, 5, 5), requires_grad=True)
w = torch.randn((2, 4, 3, 3), requires_grad=True)
print(t.flatten())
print(w.flatten())
res = torch.nn.functional.conv2d(t, w)
print(res.flatten())
loss = (res ** 2).sum()
print(loss)
loss.backward()
print(t.grad.shape)
print(t.grad.flatten())
print(w.grad.shape)
print(w.grad.flatten())
t.grad.zero_()
w.grad.zero_()
res = torch.nn.functional.conv2d(t, w, stride=2)
print(res.flatten())
loss = (res ** 2).sum()
print(loss)
loss.backward()
print(t.grad.shape)
print(t.grad[0])
print(w.grad.shape)
print(w.grad[0])
*/
fn conv2d_grad(dev: &Device) -> Result<()> {
// conv-transposes are not implemented for metal
use candle_core::Var;
let t = Var::from_slice(
&[
0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616,
1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395,
1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836,
0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130, 1.3123,
1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071, 1.1586,
0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090, 0.2049,
0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323, -1.3712,
0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742, 0.3790,
-0.4431, -0.4720, -0.7890, 0.2620, 0.7875, 0.5377, -0.6779, -0.8088, 1.9098, 1.2006,
-0.8, -0.4983, 1.5480, 0.8265, -0.1025, 0.5138, 0.5748, 0.3821, -0.4607, 0.0085,
],
(1, 4, 5, 5),
dev,
)?;
let w = Var::from_slice(
&[
-0.9325f32, 0.6451, -0.8537, 0.2378, 0.8764, -0.1832, 0.2987, -0.6488, -0.2273,
-2.4184, -0.1192, -0.4821, -0.5079, -0.5766, -2.4729, 1.6734, 0.4558, 0.2851, 1.1514,
-0.9013, 1.0662, -0.1817, -0.0259, 0.1709, 0.5367, 0.7513, 0.8086, -2.2586, -0.5027,
0.9141, -1.3086, -1.3343, -1.5669, -0.1657, 0.7958, 0.1432, 0.3896, -0.4501, 0.1667,
0.0714, -0.0952, 1.2970, -0.1674, -0.3178, 1.0677, 0.3060, 0.7080, 0.1914, 1.1679,
-0.3602, 1.9265, -1.8626, -0.5112, -0.0982, 0.2621, 0.6565, 0.5908, 1.0089, -0.1646,
1.8032, -0.6286, 0.2016, -0.3370, 1.2555, 0.8009, -0.6488, -0.4652, -1.5685, 1.5860,
0.5583, 0.4623, 0.6026,
],
(2, 4, 3, 3),
dev,
)?;
let res = t.conv2d(&w, 0, 1, 1, 1)?;
let loss = res.sqr()?.sum_all()?;
assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 741.12f32);
let grads = loss.backward()?;
let grad_t = grads.get(&t).unwrap();
let grad_w = grads.get(&w).unwrap();
assert_eq!(grad_t.dims(), [1, 4, 5, 5]);
assert_eq!(grad_w.dims(), [2, 4, 3, 3]);
assert_eq!(
test_utils::to_vec1_round(&grad_t.flatten_all()?, 2)?,
[
9.29, -2.84, -5.71, 3.38, -7.71, -19.15, 7.02, 29.1, 9.34, 34.73, -22.87, 24.35,
-39.88, -14.01, 21.08, 9.94, 13.63, -34.68, 11.21, -6.26, 7.72, -6.32, -16.64, -1.08,
-20.22, 21.73, -0.37, -4.06, 5.82, -3.65, -30.73, 14.55, 87.7, 31.6, 4.53, -89.78,
-75.37, -57.43, -7.56, 92.96, 18.79, -4.63, -159.75, -42.47, -47.26, 52.88, 37.32,
49.0, 12.82, 2.01, -8.98, 20.18, 16.62, 12.06, 15.38, 20.0, 2.57, -15.22, 72.62,
-10.75, 2.25, -31.2, 3.75, -0.2, 9.76, -0.68, 5.21, -40.44, -22.59, -61.61, 17.28,
20.41, 37.55, 5.23, 6.81, 23.54, 23.62, -9.99, -9.13, 4.87, -35.06, -26.1, 63.48,
25.81, -39.21, -70.68, -46.96, 2.33, 41.81, 82.42, -28.63, -11.78, -35.33, -10.28,
-28.57, -9.13, 7.21, -9.05, -9.62, -11.25
]
);
assert_eq!(
test_utils::to_vec1_round(&grad_w.flatten_all()?, 2)?,
[
-28.92, -22.88, -141.23, 73.35, 61.07, 47.81, -20.0, -73.71, -41.82, -13.59, 21.5,
28.72, 28.57, -46.85, -90.19, 143.61, 16.68, 7.43, 18.88, -90.81, -20.29, 54.79, 82.63,
22.94, 77.81, -16.39, -13.2, 9.34, -40.39, -26.62, 5.33, -60.91, 9.09, -59.37, 7.08,
58.64, 5.55, 20.52, 2.5, -17.25, -6.8, 22.21, 30.15, -7.52, -37.46, 5.67, 22.58, 9.03,
47.05, 17.61, 37.31, -98.13, -14.61, -4.8, -6.36, 44.69, 23.34, 8.37, -13.52, 80.05,
-34.24, -16.36, -12.31, 1.92, -33.62, -14.1, -49.23, -7.39, 11.5, -9.98, 9.66, 29.6
]
);
// Same as before but with stride.
let res = t.conv2d(&w, 0, 2, 1, 1)?;
let loss = res.sqr()?.sum_all()?;
assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 277.16f32);
let grads = loss.backward()?;
let grad_t = grads.get(&t).unwrap();
let grad_w = grads.get(&w).unwrap();
assert_eq!(grad_t.dims(), [1, 4, 5, 5]);
assert_eq!(grad_w.dims(), [2, 4, 3, 3]);
assert_eq!(
test_utils::to_vec3_round(&grad_t.i(0)?, 2)?,
[
[
[9.29, -7.03, 0.94, 3.49, -7.71],
[-1.8, -7.82, 8.9, 8.46, 7.43],
[-25.84, 22.09, -19.27, -0.22, 1.69],
[4.02, 18.53, -18.37, 2.3, -24.51],
[7.72, -9.68, -12.34, 5.6, -20.22]
],
[
[21.73, 3.39, -18.27, 3.86, -3.65],
[8.25, 3.73, 30.73, -8.61, -11.93],
[-72.15, -15.36, -17.53, -12.32, -1.61],
[-22.32, -7.79, -91.82, 6.44, -37.69],
[52.88, 14.44, 42.75, 9.88, 2.01]
],
[
[-8.98, 9.91, 6.75, -4.68, 15.38],
[4.93, -0.33, 9.94, -1.46, 14.78],
[13.62, -30.63, 3.96, -3.58, -4.48],
[-14.13, 1.19, -34.43, 3.08, -33.83],
[17.28, 12.94, 31.83, -3.35, 6.81]
],
[
[23.54, 6.98, -24.52, 0.52, 4.87],
[9.65, 6.18, 1.71, -25.23, -4.93],
[-54.99, -23.66, 3.19, -3.73, 18.58],
[-21.35, -10.39, -39.88, 28.73, -30.76],
[-9.13, 11.12, -14.0, -8.23, -11.25]
]
]
);
assert_eq!(
test_utils::to_vec3_round(&grad_w.i(0)?, 2)?,
[
[
[28.34, -7.91, -45.75],
[21.03, 3.86, 29.86],
[0.72, -36.58, -35.28]
],
[
[-16.04, 11.53, -16.38],
[29.62, -16.32, -48.35],
[57.5, 28.29, 25.81]
],
[
[2.93, -19.6, 1.57],
[27.15, 53.88, -24.64],
[12.74, -22.6, -26.2]
],
[
[-0.18, -14.86, -6.82],
[-19.55, -2.72, 45.9],
[-2.54, 36.97, 27.11]
]
]
);
// Replicate the issue from https://github.com/huggingface/candle/issues/1212
let res = t.i((.., .., 0..4, 0..4))?.conv2d(&w, 0, 2, 1, 1)?;
let loss = res.sqr()?.sum_all()?;
assert_eq!(test_utils::to_vec0_round(&loss, 2)?, 21.12f32);
let grads = loss.backward()?;
let grad_t = grads.get(&t).unwrap();
let grad_w = grads.get(&w).unwrap();
assert_eq!(grad_t.dims(), [1, 4, 5, 5]);
assert_eq!(grad_w.dims(), [2, 4, 3, 3]);
assert_eq!(
test_utils::to_vec3_round(&grad_t.i(0)?, 2)?,
[
[
[9.29, -7.03, 7.87, 0.0, 0.0],
[-1.8, -7.82, 5.9, 0.0, 0.0],
[-3.12, 4.49, 5.52, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[21.73, 3.39, 4.77, 0.0, 0.0],
[8.25, 3.73, 27.61, 0.0, 0.0],
[-20.55, -5.61, -2.77, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[-8.98, 9.91, -7.15, 0.0, 0.0],
[4.93, -0.33, 4.56, 0.0, 0.0],
[-6.7, -5.76, -8.05, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]
],
[
[23.54, 6.98, -10.0, 0.0, 0.0],
[9.65, 6.18, 18.72, 0.0, 0.0],
[3.29, -5.27, 0.79, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0]
]
]
);
assert_eq!(
test_utils::to_vec3_round(&grad_w.i(0)?, 2)?,
[
[
[-3.47, 7.44, 0.66],
[12.89, -3.4, -9.29],
[-14.16, -0.83, 7.14]
],
[
[-3.23, 5.37, -3.02],
[-2.12, -11.24, 1.94],
[6.97, 7.2, 2.99]
],
[
[-4.04, -3.31, 4.87],
[-6.68, -5.68, 1.73],
[-5.54, 4.32, 0.52]
],
[[-4.72, 1.5, 4.72], [3.79, 4.04, 6.76], [-4.6, 5.8, 6.93]]
]
);
// Conv Transpose 2d Test
//tested against following python
// import torch
// torch.manual_seed(4242)
// padding = 4
// outpadding = 2
// dilation = 3
// stride = 3
// input = torch.randn((1, 4, 7, 5), requires_grad=True)
// kernel = torch.randn((4, 2, 3, 5), requires_grad=True)
// print("input", input.flatten())
// print("kernel", kernel.flatten())
// res = torch.nn.functional.conv_transpose2d(
// input,
// kernel,
// stride=stride,
// padding=padding,
// dilation=dilation,
// output_padding=outpadding,
// )
// res.retain_grad()
// print(res.shape)
// loss = (res**2).sum()
// print(loss)
// loss.backward()
// print(input.grad.shape)
// print("input grad", torch.round(input.grad, decimals=1))
// print(kernel.grad.shape)
// print("kernel grad", torch.round(kernel.grad.flatten(), decimals=1))
let padding = 4;
let outpadding = 2;
let dilation = 3;
let stride = 3;
let t = Var::from_slice(
&[
0.4056_f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997,
3.0616, 1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843,
0.2395, 1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013,
-0.6836, 0.2477, 1.3127, -0.2260, 0.2622, -1.2974, -0.8140, -0.8404, -0.3490, 0.0130,
1.3123, 1.7569, -0.3956, -1.8255, 0.1727, -0.3538, 2.6941, 1.0529, 0.4219, -0.2071,
1.1586, 0.4717, 0.3865, -0.5690, -0.5010, -0.1310, 0.7796, 0.6630, -0.2021, 2.6090,
0.2049, 0.6466, -0.5042, -0.0603, -1.6538, -1.2429, 1.8357, 1.6052, -1.3844, 0.3323,
-1.3712, 0.9634, -0.4799, -0.6451, -0.0840, -1.4247, 0.5512, -0.1747, -0.5509, -0.3742,
0.3790, -0.4431, -0.4720, -0.7890, 0.2620, 0.5411, -1.1715, -2.4997, 2.3249, -0.8912,
-0.4733, -0.5701, -2.8888, -1.4112, -0.5471, -0.9234, -1.1660, 0.4189, -0.7465,
-0.6473, 0.1402, 0.7875, 0.5377, -0.6779, -0.8088, -0.4864, -0.2312, 0.9279, 0.1264,
1.5480, 0.8265, -0.1025, 0.5138, -0.2512, 0.1576, 1.2705, 0.3641, -0.9325, 0.6451,
-0.8537, 0.2378, 0.1794, 0.2752, -0.3687, -1.1149, -0.1410, -0.5829, -0.0892, 1.4258,
-2.2789, 0.5270, 0.1825, 1.7007, -0.5263, -0.2954, 0.4440, 0.5537, 0.3492, 0.6186,
1.6475, 0.2219,
],
(1, 4, 7, 5),
dev,
)?;
#[rustfmt::skip]
let w = Var::from_slice(
&[
-1.1744_f32, 0.3266, 2.5893, 1.0142, 0.1763, 0.7752, 0.6604, 0.2029, -0.2145, 0.7234,
-0.3441, -1.5400, -0.6333, 0.6613, 0.2083, 0.6230, -1.7002, 0.3393, 0.4049, 1.0762,
0.2723, 1.4181, 0.0029, -0.2122, 1.7668, 1.4168, 0.3320, -0.2719, 0.7932, -0.7204,
0.4447, 0.1211, 0.5908, 1.0089, -0.1646, 1.8033, -0.6286, 0.2016, -0.3370, 1.2555,
0.8009, -0.6488, -0.4652, -1.5685, 1.5860, 0.5583, 0.4623, 0.6026, 0.8828, 2.4990,
0.6811, -0.3369, 1.3320, 1.7669, -1.1067, 1.2958, -0.9415, -0.9655, -0.4462, 0.7181,
0.5181, -1.1658, -1.8467, -0.7763, 1.2769, 0.8651, 0.9890, 1.5092, 0.7207, -0.8481,
0.7417, 0.3375, -1.2685, 1.4572, 1.0915, 0.1093, -0.8550, -0.5831, -0.6309, -0.2509,
0.5220, -0.0914, 0.7900, 0.1096, 0.3258, 0.2723, -1.0942, -0.3393, -0.1653, 0.5732,
-0.8014, 1.8194, -1.9023, 0.2127, 1.8636, -0.8979, 0.1927, -0.2778, 0.3105, 0.0071,
-1.1823, 0.2476, -0.7178, -1.3821, 1.0769, -0.4376, -0.9967, -0.1227, 1.6197, -1.0604,
0.1372, 0.8141, -0.6163, 0.7304, -0.8285, 2.0636, -0.7176, 0.2495, -0.2581, -0.4478,
],
(4, 2, 3, 5),
dev,
)?;
let res = t.conv_transpose2d(&w, padding, outpadding, stride, dilation)?;
let loss = res.sqr()?.sum_all()?;
assert_eq!(test_utils::to_vec0_round(&loss, 0)?, 2904.0);
let grads = loss.backward()?;
let grad_t = grads.get(&t).unwrap();
let grad_w = grads.get(&w).unwrap();
assert_eq!(grad_t.dims(), [1, 4, 7, 5]);
assert_eq!(grad_w.dims(), [4, 2, 3, 5]);
assert_eq!(
test_utils::to_vec1_round(&grad_w.flatten_all()?, 1)?,
[
// torch gets 89.1
-89.0, -135.3, 136.7, 102.0, -53.4, 117.9, 118.6, -43.9, -218.0, -58.5, -114.3, -150.0,
-15.6, 172.1, 66.3, -64.3, -27.9, -19.8, 31.7, 62.1, 5.5, 92.6, 28.2, -29.6, 55.9,
52.7, -72.7, -119.8, 53.8, -25.5, 128.8, 19.3, 68.0, 190.9, -64.1, -86.2, -111.2,
106.6, -67.7, 37.8, 115.9, 50.4, -77.7, -54.9, 22.3, -4.6, 89.8, 61.7, 122.4, 192.6,
-27.8, -104.6, 57.0, 166.4, 27.1, 6.1, 18.7, -93.2, 31.5, 168.2, -3.7, -99.5, -55.5,
-10.8, 17.5, 20.8, 16.9, 43.8, 42.0, -89.2, 18.8, -9.6, -84.1, 212.6, 19.7, -50.0,
-52.0, -40.0, -166.6, -73.2, -10.8, -73.3, 31.5, -23.4, -79.3, -27.0, -84.4, -42.9,
-20.3, 51.8, -16.7, 76.3, -120.5, -65.8, 96.5, -10.7, -45.9, -88.1, 65.4, -7.0, -1.5,
92.8, -25.1, -114.2, -5.8, -14.8, -51.2, -20.7, 54.2, -79.8, 47.7, -29.2, -8.8, 53.5,
-28.4, 85.0, -18.3, 107.0, 28.3, -71.8
]
);
assert_eq!(
test_utils::to_vec3_round(&grad_t.i(0)?, 1)?,
[
[
[32.3, -41.6, -24.0, 14.1, 17.6],
[-11.8, 72.5, 87.6, 46.4, 61.5],
[115.0, 108.5, -48.6, -63.4, -50.0],
[51.3, 5.4, 31.3, 91.1, -30.9],
[52.7, 92.8, -68.0, -47.0, 83.0],
// pytorch gets -107.1
[-10.2, -107.0, -5.4, 213.1, -31.4],
[-2.4, 65.1, 9.2, -146.2, -24.2]
],
[
[-72.6, -63.9, -61.9, 45.3, 33.0],
[79.3, -0.5, -26.2, 78.2, 42.7],
[90.9, 141.6, 40.1, -62.7, 37.0],
[32.8, 198.2, -0.8, -31.1, 27.3],
// torch gets 48.0
[34.5, 34.9, -47.9, 127.6, -12.3],
[-61.4, -3.2, -2.9, -10.9, -16.6],
[74.6, 60.1, -68.9, 34.5, -50.4]
],
[
[37.5, -56.9, -43.6, -13.5, -9.9],
[40.0, 97.3, 28.6, 14.2, -30.1],
[-22.3, -126.3, -68.8, -8.2, 26.1],
[-32.9, 37.3, 108.5, -54.8, 29.6],
[34.9, -176.9, -125.0, -28.3, -13.9],
[-54.9, 142.6, 62.1, -80.4, -65.6],
[7.4, -91.1, -67.6, 35.0, 39.7]
],
[
[-57.2, -40.9, -10.1, 32.6, 29.4],
[18.7, -18.0, 29.5, -1.2, 59.2],
[-14.0, -74.4, 19.8, -117.0, 58.2],
[-21.8, 163.5, -71.1, -99.0, 80.9],
[-58.9, -10.9, 93.8, -139.6, 98.0],
// torch gets 54.5
[-54.4, 135.3, 6.0, -79.1, 134.6],
[27.5, -76.0, 43.4, -2.8, -7.8]
]
]
);
// Test the same, but then with the following properties, t & w are unmodified.
let padding = 1;
let outpadding = 1;
let dilation = 1;
let stride = 2;
let res = t.conv_transpose2d(&w, padding, outpadding, stride, dilation)?;
let loss = res.sqr()?.sum_all()?;
assert_eq!(test_utils::to_vec0_round(&loss, 0)?, 3627.0); // torch gives 3626.8560
let grads = loss.backward()?;
let grad_t = grads.get(&t).unwrap();
let grad_w = grads.get(&w).unwrap();
assert_eq!(grad_t.dims(), [1, 4, 7, 5]);
assert_eq!(grad_w.dims(), [4, 2, 3, 5]);
#[rustfmt::skip]
assert_eq!(
test_utils::to_vec3_round(&grad_t.i(0)?, 1)?,
[
[
[ 13.2, -40.7, -9.7, -47.3, -82.7],
[ -98.2, 9.7, 57.7, -6.2, 180.7],
[ 100.2, 24.1, 3.7, -100.5, -48.1],
[ -0.3, 13.5, -2.9, 80.0, -49.8],
[ 47.2, -25.6, -74.4, 61.2, -18.4],
[ 4.6, -69.5, 27.9, 66.5, -88.1],
// 4th column on next row; torch is 4.2
[ -12.0, 79.2, -40.0, 4.1, -97.1],
],
[
[ -42.2, -36.5, -51.1, 7.5, 32.3],
[ 74.1, -44.6, -68.8, 19.5, 7.7],
[ 137.1, 54.2, 153.8, -58.0, 45.5],
[ 24.4, -56.8, 9.7, -41.0, -14.5],
[ -3.7, 72.6, 8.3, 134.8, 40.5],
[ 43.2, -56.9, -47.5, -89.4, -95.4],
[ 68.2, 108.1, -80.0, 57.0, -121.1]
],
[
[ 31.1, -11.4, -34.8, 33.1, -44.2],
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/pth_tests.rs | candle-core/tests/pth_tests.rs | /// Regression test for pth files not loading on Windows.
#[test]
fn test_pth() {
let tensors = candle_core::pickle::PthTensors::new("tests/test.pt", None).unwrap();
tensors.get("test").unwrap().unwrap();
}
#[test]
fn test_pth_with_key() {
let tensors =
candle_core::pickle::PthTensors::new("tests/test_with_key.pt", Some("model_state_dict"))
.unwrap();
tensors.get("test").unwrap().unwrap();
}
#[test]
fn test_pth_fortran_contiguous() {
let tensors =
candle_core::pickle::PthTensors::new("tests/fortran_tensor_3d.pth", None).unwrap();
let tensor = tensors.get("tensor_fortran").unwrap().unwrap();
assert_eq!(tensor.dims3().unwrap(), (2, 3, 4));
assert_eq!(
tensor.to_vec3::<i64>().unwrap(),
[
[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
[[13, 14, 15, 16], [17, 18, 19, 20], [21, 22, 23, 24]]
]
);
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/custom_op_tests.rs | candle-core/tests/custom_op_tests.rs | use candle_core::backend::BackendStorage;
use candle_core::cpu_backend;
use candle_core::test_utils::to_vec1_round;
use candle_core::{CpuStorage, CustomOp1, DType, Device, Error, Layout, Result, Shape, Tensor};
fn fwd<T: num_traits::Float>(v: T, alpha: f64) -> T {
if v.is_sign_positive() {
v
} else {
let alpha = T::from(alpha).unwrap_or(T::nan());
(v.exp() - T::one()) * alpha
}
}
struct Elu {
alpha: f64,
}
impl CustomOp1 for Elu {
fn name(&self) -> &'static str {
"elu"
}
fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> {
let storage = candle_core::map_dtype!(
"elu",
s,
|s| cpu_backend::unary_map(s, l, |v| fwd(v, self.alpha)),
(F8E4M3, BF16, F16, F32, F64)
);
Ok((storage, l.shape().clone()))
}
}
#[test]
fn custom_op1_no_backward() -> Result<()> {
let cpu = &Device::Cpu;
let t = Tensor::arange(0u32, 12u32, cpu)?.to_dtype(DType::F32)?;
let t = (t - 5.)?;
let elu_t = t.apply_op1_no_bwd(&Elu { alpha: 1. })?;
assert_eq!(
to_vec1_round(&elu_t, 4)?,
&[-0.9933, -0.9817, -0.9502, -0.8647, -0.6321, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
);
Ok(())
}
// Define a similar struct as Elu but with backward support.
fn bwd<T: num_traits::Float>(v: T, alpha: f64) -> T {
if v.is_sign_positive() {
T::one()
} else {
let alpha = T::from(alpha).unwrap_or(T::nan());
v.exp() * alpha
}
}
struct EluBackward {
alpha: f64,
}
impl CustomOp1 for EluBackward {
fn name(&self) -> &'static str {
"elu-bwd"
}
fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> {
let storage = candle_core::map_dtype!(
"elu-bwd",
s,
|s| cpu_backend::unary_map(s, l, |v| bwd(v, self.alpha)),
(F8E4M3, BF16, F16, F32, F64)
);
Ok((storage, l.shape().clone()))
}
}
struct EluWithBackward(Elu);
impl EluWithBackward {
fn new(alpha: f64) -> Self {
Self(Elu { alpha })
}
}
impl CustomOp1 for EluWithBackward {
fn name(&self) -> &'static str {
"elu"
}
fn cpu_fwd(&self, s: &CpuStorage, l: &Layout) -> Result<(CpuStorage, Shape)> {
self.0.cpu_fwd(s, l)
}
fn bwd(&self, arg: &Tensor, _res: &Tensor, grad_res: &Tensor) -> Result<Option<Tensor>> {
let alpha = self.0.alpha;
let bwd = arg.apply_op1(EluBackward { alpha })?;
Ok(Some(grad_res.mul(&bwd)?))
}
}
#[test]
fn custom_op1_with_backward() -> Result<()> {
let cpu = &Device::Cpu;
let t = candle_core::Var::new(&[-2f32, 0f32, 2f32], cpu)?;
let elu_t = t.apply_op1(EluWithBackward::new(2.))?;
assert_eq!(to_vec1_round(&elu_t, 4)?, &[-1.7293, 0.0, 2.0]);
let grads = elu_t.backward()?;
let grad_x = grads.get(&t).unwrap();
assert_eq!(to_vec1_round(grad_x, 4)?, [0.2707, 1.0, 1.0]);
Ok(())
}
impl candle_core::InplaceOp1 for Elu {
fn name(&self) -> &'static str {
"elu"
}
fn cpu_fwd(&self, s: &mut CpuStorage, _l: &Layout) -> Result<()> {
let alpha = self.alpha;
match s {
CpuStorage::F8E4M3(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)),
CpuStorage::BF16(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)),
CpuStorage::F16(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)),
CpuStorage::F32(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)),
CpuStorage::F64(s) => s.iter_mut().for_each(|v| *v = fwd(*v, alpha)),
_ => candle_core::bail!("unsupported dtype for inplace elu"),
}
Ok(())
}
}
#[test]
fn inplace_op1() -> Result<()> {
let cpu = &Device::Cpu;
let t = Tensor::arange(0u32, 12u32, cpu)?.to_dtype(DType::F32)?;
let t = (t - 5.)?;
t.inplace_op1(&Elu { alpha: 1. })?;
assert_eq!(
to_vec1_round(&t, 4)?,
&[-0.9933, -0.9817, -0.9502, -0.8647, -0.6321, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
);
Ok(())
}
#[cfg(all(feature = "ug", any(feature = "cuda", feature = "metal")))]
#[allow(clippy::approx_constant)]
#[test]
fn ug_op() -> Result<()> {
let kernel = {
use candle_ug::lang::op;
let layout = candle_ug::Layout::from_shape(&[12]);
let ptr = op::Arg::ptr(candle_ug::DType::F32);
let src = op::load(ptr.id(), layout.clone(), candle_ug::DType::F32)?;
let src = op::unary(op::UnaryOp::Exp, src)?;
let st = op::store(ptr.id(), layout, src)?;
let kernel = op::Kernel::new("exp".to_string(), vec![ptr], vec![st]);
let opts: candle_ug::lower_op::Opts = Default::default();
kernel.lower(&opts)?
};
let device = if candle_core::utils::cuda_is_available() {
Device::new_cuda(0)?
} else if candle_core::utils::metal_is_available() {
Device::new_metal(0)?
} else {
candle_core::bail!("metal/cuda is mandatory for this test")
};
let op = candle_core::UgIOp1::new("test", kernel, &device)?;
let t = Tensor::arange(0u32, 12u32, &device)?.to_dtype(DType::F32)?;
t.inplace_op1(&op)?;
assert_eq!(
to_vec1_round(&t, 2)?,
&[
1.0, 2.72, 7.39, 20.09, 54.6, 148.41, 403.43, 1096.63, 2980.96, 8103.08, 22026.47,
59874.13
]
);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/matmul_tests.rs | candle-core/tests/matmul_tests.rs | use candle_core::{test_device, DType, Device, IndexOp, Result, Tensor};
fn matmul(device: &Device) -> Result<()> {
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let a = Tensor::from_slice(&data, (2, 2), device)?;
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let b = Tensor::from_slice(&data, (2, 2), device)?;
let c = a.matmul(&b)?;
assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]);
let data = vec![1.0f32, 2.0];
let a = Tensor::from_slice(&data, (2, 1), device)?;
let data = vec![3.0f32, 4.0];
let b = Tensor::from_slice(&data, (1, 2), device)?;
let c = a.matmul(&b)?;
assert_eq!(c.to_vec2::<f32>()?, &[&[3.0, 4.0], &[6.0, 8.0]]);
let data: Vec<_> = (0..6).map(|i| i as f32).collect();
let a = Tensor::from_slice(&data, (2, 3), device)?;
let data: Vec<_> = (0..6).map(|i| (i + 2) as f32).collect();
let b = Tensor::from_slice(&data, (3, 2), device)?;
let c = a.matmul(&b)?;
assert_eq!(c.to_vec2::<f32>()?, &[&[16., 19.], &[52., 64.]]);
let data: Vec<_> = (0..12).map(|i| i as f32).collect();
let a = Tensor::from_slice(&data, (2, 2, 3), device)?;
let data: Vec<_> = (0..12).map(|i| (i + 2) as f32).collect();
let b = Tensor::from_slice(&data, (2, 3, 2), device)?;
let expected = [[[16., 19.], [52., 64.]], [[214., 235.], [304., 334.]]];
let c = a.matmul(&b)?;
assert_eq!(c.to_vec3::<f32>()?, &expected);
// Also perform the matmul on contiguous transposed versions.
let a_tt = a.t()?.contiguous()?.t()?;
assert!(!a_tt.is_contiguous());
assert_eq!(a.dims(), a_tt.dims());
assert_eq!(a_tt.stride(), &[6, 1, 2]);
let b_tt = b.t()?.contiguous()?.t()?;
assert!(!b_tt.is_contiguous());
assert_eq!(b.dims(), b_tt.dims());
assert_eq!(b_tt.stride(), &[6, 1, 3]);
assert_eq!(a_tt.matmul(&b)?.to_vec3::<f32>()?, &expected);
assert_eq!(a.matmul(&b_tt)?.to_vec3::<f32>()?, &expected);
assert_eq!(a_tt.matmul(&b_tt)?.to_vec3::<f32>()?, &expected);
Ok(())
}
fn matmul_bf16(device: &Device) -> Result<()> {
if !device.supports_bf16() {
return Ok(());
}
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let a = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?;
let data = vec![1.0f32, 2.0, 3.0, 4.0];
let b = Tensor::from_slice(&data, (2, 2), device)?.to_dtype(DType::BF16)?;
let c = a.matmul(&b)?.to_dtype(DType::F32)?;
assert_eq!(c.to_vec2::<f32>()?, &[[7.0f32, 10.0], [15.0, 22.0]]);
Ok(())
}
fn broadcast_matmul(device: &Device) -> Result<()> {
let lhs = Tensor::randn(0f32, 1f32, (3, 1, 4, 5), device)?;
let rhs = Tensor::randn(0f32, 1f32, (6, 5, 2), device)?;
let out = lhs.broadcast_matmul(&rhs)?;
assert_eq!(out.dims(), &[3, 6, 4, 2]);
for idx1 in 0..3 {
for idx2 in 0..6 {
let out = out.i((idx1, idx2))?;
let lhs = lhs.i((idx1, 0))?;
let rhs = rhs.i(idx2)?;
let out2 = lhs.matmul(&rhs);
let sum_diff2 = (out - out2)?.sqr()?.sum_all()?;
// With cuda, we see errors of up to ~1e-12.
assert!(sum_diff2.to_vec0::<f32>()? < 1e-6)
}
}
Ok(())
}
#[test]
fn tensor_dot() -> Result<()> {
let lhs = Tensor::new(&[1., 2., 3.], &Device::Cpu)?;
let rhs = Tensor::new(&[4., 5., 6.], &Device::Cpu)?;
let expected = Tensor::new(32., &Device::Cpu)?;
let dot_ret = lhs.dot(&rhs)?;
candle_core::test_utils::assert_tensor_eq(&dot_ret, &expected)?;
Ok(())
}
#[test]
fn tensor_mv() -> Result<()> {
let mat = Tensor::new(&[[1., 2., 3.], [4., 5., 6.]], &Device::Cpu)?;
let vec = Tensor::new(&[1., 1., 1.], &Device::Cpu)?;
let expected = Tensor::new(&[6., 15.], &Device::Cpu)?;
let mv_ret = mat.mv(&vec)?;
candle_core::test_utils::assert_tensor_eq(&mv_ret, &expected)?;
Ok(())
}
// https://github.com/huggingface/candle/issues/1948
fn squeeze_mm(device: &Device) -> Result<()> {
let seq_len = 8_usize;
let a = Tensor::zeros((1, seq_len, 16), DType::F32, device)?;
let x = a.i((.., seq_len - 1, ..))?;
let w = Tensor::zeros((32, 16), DType::F32, device)?.t()?;
let x = x.matmul(&w)?;
assert_eq!(x.dims(), &[1, 32]);
Ok(())
}
// https://github.com/huggingface/candle/issues/1992
fn mm_layout(device: &Device) -> Result<()> {
let a = Tensor::arange(0f32, 16f32, device)?.reshape((1, 1, 4, 4))?;
let b = Tensor::arange(0f32, 8f32, device)?.reshape((1, 1, 4, 2))?;
let mm1 = a.matmul(&b)?;
// Forces the layout to be:
// shape: [1, 1, 4, 2], stride: [8, 2, 2, 1], start_offset: 0
// This is still a contiguous matrix but matmul checks are only the two last dimensions have
// non 1 sizes but matmul check may be reluctant to handle it.
let b = b.transpose(1, 2)?.force_contiguous()?.transpose(1, 2)?;
let mm2 = a.matmul(&b)?;
let diff = (mm1 - mm2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
Ok(())
}
test_device!(matmul, matmul_cpu, matmul_gpu, matmul_metal);
test_device!(
matmul_bf16,
matmul_bf16_cpu,
matmul_bf16_gpu,
matmul_bf16_metal
);
test_device!(
broadcast_matmul,
broadcast_matmul_cpu,
broadcast_matmul_gpu,
broadcast_matmul_metal
);
test_device!(squeeze_mm, squeeze_mm_cpu, squeeze_mm_gpu, squeeze_mm_metal);
test_device!(mm_layout, mm_layout_cpu, mm_layout_gpu, mm_layout_metal);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/layout_tests.rs | candle-core/tests/layout_tests.rs | use candle::{test_device, Device, IndexOp, Result, Tensor};
use candle_core as candle;
fn contiguous(device: &Device) -> Result<()> {
let tensor = Tensor::arange(0u32, 24u32, device)?.reshape((2, 3, 4))?;
assert_eq!(
tensor.to_vec3::<u32>()?,
&[
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11]],
[[12, 13, 14, 15], [16, 17, 18, 19], [20, 21, 22, 23]]
]
);
assert_eq!(
tensor.t()?.contiguous()?.to_vec3::<u32>()?,
&[
[[0, 4, 8], [1, 5, 9], [2, 6, 10], [3, 7, 11]],
[[12, 16, 20], [13, 17, 21], [14, 18, 22], [15, 19, 23]]
]
);
assert_eq!(
tensor.transpose(0, 1)?.contiguous()?.to_vec3::<u32>()?,
&[
[[0, 1, 2, 3], [12, 13, 14, 15]],
[[4, 5, 6, 7], [16, 17, 18, 19]],
[[8, 9, 10, 11], [20, 21, 22, 23]]
]
);
assert_eq!(
tensor.transpose(0, 1)?.flatten_all()?.to_vec1::<u32>()?,
&[0, 1, 2, 3, 12, 13, 14, 15, 4, 5, 6, 7, 16, 17, 18, 19, 8, 9, 10, 11, 20, 21, 22, 23]
);
assert_eq!(
tensor
.i(1..)?
.transpose(0, 1)?
.contiguous()?
.to_vec3::<u32>()?,
&[[[12, 13, 14, 15]], [[16, 17, 18, 19]], [[20, 21, 22, 23]]]
);
assert_eq!(
tensor.transpose(0, 2)?.contiguous()?.to_vec3::<u32>()?,
&[
[[0, 12], [4, 16], [8, 20]],
[[1, 13], [5, 17], [9, 21]],
[[2, 14], [6, 18], [10, 22]],
[[3, 15], [7, 19], [11, 23]]
]
);
Ok(())
}
test_device!(contiguous, contiguous_cpu, contiguous_gpu, contiguous_metal);
#[test]
fn strided_blocks() -> Result<()> {
use candle::Device::Cpu;
let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?;
match tensor.strided_blocks() {
candle::StridedBlocks::SingleBlock { start_offset, len } => {
assert_eq!(start_offset, 0);
assert_eq!(len, 24);
}
candle::StridedBlocks::MultipleBlocks { .. } => {
panic!("unexpected block structure")
}
};
let tensor = Tensor::arange(0u32, 26u32, &Cpu)?
.i(2..)?
.reshape((2, 3, 4))?;
match tensor.strided_blocks() {
candle::StridedBlocks::SingleBlock { start_offset, len } => {
assert_eq!(start_offset, 2);
assert_eq!(len, 24);
}
candle::StridedBlocks::MultipleBlocks { .. } => {
panic!("unexpected block structure")
}
};
let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?;
let tensor = tensor.i(1)?;
match tensor.strided_blocks() {
candle::StridedBlocks::SingleBlock { start_offset, len } => {
assert_eq!(start_offset, 12);
assert_eq!(len, 12);
}
candle::StridedBlocks::MultipleBlocks { .. } => {
panic!("unexpected block structure")
}
};
let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?;
let tensor = tensor.i((.., 1))?.contiguous()?;
match tensor.strided_blocks() {
candle::StridedBlocks::SingleBlock { start_offset, len } => {
assert_eq!(start_offset, 0);
assert_eq!(len, 8);
assert_eq!(tensor.to_vec2::<u32>()?, &[[4, 5, 6, 7], [16, 17, 18, 19]]);
}
candle::StridedBlocks::MultipleBlocks { .. } => {
panic!("unexpected block structure")
}
};
let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?;
let tensor = tensor.i((.., 1))?;
match tensor.strided_blocks() {
candle::StridedBlocks::SingleBlock { .. } => {
panic!("unexpected block structure")
}
candle::StridedBlocks::MultipleBlocks {
block_len,
block_start_index,
} => {
assert_eq!(block_len, 4);
assert_eq!(block_start_index.collect::<Vec<_>>(), &[4, 16])
}
};
let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?;
match tensor.t()?.strided_blocks() {
candle::StridedBlocks::SingleBlock { .. } => {
panic!("unexpected block structure")
}
candle::StridedBlocks::MultipleBlocks {
block_start_index,
block_len,
} => {
assert_eq!(block_len, 1);
assert_eq!(
block_start_index.collect::<Vec<_>>(),
&[
0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11, 12, 16, 20, 13, 17, 21, 14, 18, 22, 15,
19, 23
]
)
}
};
let tensor = Tensor::arange(0u32, 24u32, &Cpu)?.reshape((2, 3, 4))?;
match tensor.transpose(0, 1)?.strided_blocks() {
candle::StridedBlocks::SingleBlock { .. } => {
panic!("unexpected block structure")
}
candle::StridedBlocks::MultipleBlocks {
block_start_index,
block_len,
} => {
assert_eq!(block_len, 4);
assert_eq!(
block_start_index.collect::<Vec<_>>(),
&[0, 12, 4, 16, 8, 20]
)
}
};
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/serialization_tests.rs | candle-core/tests/serialization_tests.rs | use candle_core::{DType, Result, Tensor};
struct TmpFile(std::path::PathBuf);
impl TmpFile {
fn create(base: &str) -> TmpFile {
let filename = std::env::temp_dir().join(format!(
"candle-{}-{}-{:?}",
base,
std::process::id(),
std::thread::current().id(),
));
TmpFile(filename)
}
}
impl std::convert::AsRef<std::path::Path> for TmpFile {
fn as_ref(&self) -> &std::path::Path {
self.0.as_path()
}
}
impl Drop for TmpFile {
fn drop(&mut self) {
std::fs::remove_file(&self.0).unwrap()
}
}
#[test]
fn npy() -> Result<()> {
let npy = Tensor::read_npy("tests/test.npy")?;
assert_eq!(
npy.to_dtype(DType::U8)?.to_vec1::<u8>()?,
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
);
Ok(())
}
#[test]
fn npz() -> Result<()> {
let npz = Tensor::read_npz("tests/test.npz")?;
assert_eq!(npz.len(), 2);
assert_eq!(npz[0].0, "x");
assert_eq!(npz[1].0, "x_plus_one");
assert_eq!(
npz[1].1.to_dtype(DType::U8)?.to_vec1::<u8>()?,
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
);
Ok(())
}
#[test]
fn safetensors() -> Result<()> {
use candle_core::safetensors::Load;
let tmp_file = TmpFile::create("st");
let t = Tensor::arange(0f32, 24f32, &candle_core::Device::Cpu)?;
t.save_safetensors("t", &tmp_file)?;
// Load from file.
let st = candle_core::safetensors::load(&tmp_file, &candle_core::Device::Cpu)?;
let t2 = st.get("t").unwrap();
let diff = (&t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0f32);
// Load from bytes.
let bytes = std::fs::read(tmp_file)?;
let st = candle_core::safetensors::SliceSafetensors::new(&bytes)?;
let t2 = st.get("t").unwrap().load(&candle_core::Device::Cpu);
let diff = (&t - t2)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0f32);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/display_tests.rs | candle-core/tests/display_tests.rs | use anyhow::Result;
use candle_core::{DType, Device::Cpu, Tensor};
#[test]
fn display_scalar() -> Result<()> {
let t = Tensor::new(1234u32, &Cpu)?;
let s = format!("{t}");
assert_eq!(&s, "[1234]\nTensor[[], u32]");
let t = t.to_dtype(DType::F32)?.neg()?;
let s = format!("{}", (&t / 10.0)?);
assert_eq!(&s, "[-123.4000]\nTensor[[], f32]");
let s = format!("{}", (&t / 1e8)?);
assert_eq!(&s, "[-1.2340e-5]\nTensor[[], f32]");
let s = format!("{}", (&t * 1e8)?);
assert_eq!(&s, "[-1.2340e11]\nTensor[[], f32]");
let s = format!("{}", (&t * 0.)?);
assert_eq!(&s, "[0.]\nTensor[[], f32]");
Ok(())
}
#[test]
fn display_vector() -> Result<()> {
let t = Tensor::new::<&[u32; 0]>(&[], &Cpu)?;
let s = format!("{t}");
assert_eq!(&s, "[]\nTensor[[0], u32]");
let t = Tensor::new(&[0.1234567, 1.0, -1.2, 4.1, f64::NAN], &Cpu)?;
let s = format!("{t}");
assert_eq!(
&s,
"[ 0.1235, 1.0000, -1.2000, 4.1000, NaN]\nTensor[[5], f64]"
);
let t = (Tensor::ones(50, DType::F32, &Cpu)? * 42.)?;
let s = format!("\n{t}");
let expected = r#"
[42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.,
42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.,
42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42., 42.,
42., 42.]
Tensor[[50], f32]"#;
assert_eq!(&s, expected);
let t = (Tensor::ones(11000, DType::F32, &Cpu)? * 42.)?;
let s = format!("{t}");
assert_eq!(
&s,
"[42., 42., 42., ..., 42., 42., 42.]\nTensor[[11000], f32]"
);
Ok(())
}
#[test]
fn display_multi_dim() -> Result<()> {
let t = (Tensor::ones((200, 100), DType::F32, &Cpu)? * 42.)?;
let s = format!("\n{t}");
let expected = r#"
[[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
...
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.]]
Tensor[[200, 100], f32]"#;
assert_eq!(&s, expected);
let t = t.reshape(&[2, 1, 1, 100, 100])?;
let t = format!("\n{t}");
let expected = r#"
[[[[[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
...
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.]]]],
[[[[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
...
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.],
[42., 42., 42., ..., 42., 42., 42.]]]]]
Tensor[[2, 1, 1, 100, 100], f32]"#;
assert_eq!(&t, expected);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/pool_tests.rs | candle-core/tests/pool_tests.rs | use candle_core::{test_device, test_utils, Device, IndexOp, Result, Tensor};
// https://github.com/huggingface/candle/issues/364
fn avg_pool2d(dev: &Device) -> Result<()> {
let data: Vec<f32> = vec![
1., 1., 1., 1., 0., 0., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
];
let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?;
let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?;
assert_eq!(pool.to_vec2::<f32>()?, [[0.5f32, 1.], [1., 1.]]);
let data: Vec<f32> = vec![
1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1.,
];
let t = Tensor::from_vec(data, (1, 1, 2, 8), dev)?;
let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?;
assert_eq!(pool.to_vec2::<f32>()?, [[5. / 4., 6. / 4., 6. / 4., 1.]]);
Ok(())
}
fn max_pool2d(dev: &Device) -> Result<()> {
let data: Vec<f32> = vec![
1., 2., 1., 3., 0., 0., 1., 1., 1., 1., 1., 1., 5., 1., 1., 1.,
];
let t = Tensor::from_vec(data, (1, 1, 4, 4), dev)?;
let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?;
assert_eq!(pool.to_vec2::<f32>()?, [[2f32, 3.], [5., 1.]]);
let t = t.reshape((1, 1, 2, 8))?;
let pool = t.max_pool2d(2)?.squeeze(0)?.squeeze(0)?;
assert_eq!(pool.to_vec2::<f32>()?, [[2.0, 3.0, 5.0, 1.0]]);
Ok(())
}
/* This test corresponds to the following PyTorch script.
import torch
torch.manual_seed(4242)
t = torch.randn((1, 2, 4, 4))
print(t.flatten())
res = torch.nn.functional.avg_pool2d(t, 2)
print(res)
*/
fn avg_pool2d_pytorch(dev: &Device) -> Result<()> {
if dev.is_metal() {
return Ok(());
}
let t = Tensor::new(
&[
0.4056f32, -0.8689, -0.0773, -1.5630, -2.8012, -1.5059, 0.3972, 1.0852, 0.4997, 3.0616,
1.6541, 0.0964, -0.8338, -1.6523, -0.8323, -0.1699, 0.0823, 0.3526, 0.6843, 0.2395,
1.2279, -0.9287, -1.7030, 0.1370, 0.6047, 0.3770, -0.6266, 0.3529, 2.2013, -0.6836,
0.2477, 1.3127,
],
dev,
)?
.reshape((1, 2, 4, 4))?;
let pool = t.avg_pool2d(2)?.squeeze(0)?;
assert_eq!(
test_utils::to_vec3_round(&pool, 4)?,
[
[[-1.1926, -0.0395], [0.2688, 0.1871]],
[[0.1835, -0.1606], [0.6249, 0.3217]]
]
);
let pool = t.avg_pool2d(3)?.squeeze(0)?;
assert_eq!(
test_utils::to_vec3_round(&pool, 4)?,
[[[0.085]], [[0.0078]]]
);
let t = t.reshape((1, 1, 4, 8))?;
let pool = t.avg_pool2d(2)?.squeeze(0)?.squeeze(0)?;
assert_eq!(
test_utils::to_vec2_round(&pool, 4)?,
[
[0.7745, 0.0276, -1.6983, 0.12],
[0.3542, 0.1625, 0.4542, -0.0014]
]
);
Ok(())
}
fn upsample_nearest2d(dev: &Device) -> Result<()> {
let t = Tensor::arange(0f32, 6f32, dev)?.reshape((1, 1, 2, 3))?;
let upsampled = t.upsample_nearest2d(4, 6)?.i(0)?.i(0)?;
assert_eq!(
t.i(0)?.i(0)?.to_vec2::<f32>()?,
[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]
);
assert_eq!(
upsampled.to_vec2::<f32>()?,
[
[0.0, 0.0, 1.0, 1.0, 2.0, 2.0],
[0.0, 0.0, 1.0, 1.0, 2.0, 2.0],
[3.0, 3.0, 4.0, 4.0, 5.0, 5.0],
[3.0, 3.0, 4.0, 4.0, 5.0, 5.0]
]
);
Ok(())
}
test_device!(avg_pool2d, avg_pool2d_cpu, avg_pool2d_gpu, avg_pool2d_metal);
test_device!(
avg_pool2d_pytorch,
avg_pool2d_pytorch_cpu,
avg_pool2d_pytorch_gpu,
avg_pool2d_pytorch_metal
);
test_device!(max_pool2d, max_pool2d_cpu, max_pool2d_gpu, max_pool2d_metal);
test_device!(
upsample_nearest2d,
upsample_nearest2d_cpu,
upsample_nearest2d_gpu,
upsample_nearest2d_metal
);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/bilinear_tests.rs | candle-core/tests/bilinear_tests.rs | use candle_core::{test_device, Device, IndexOp, Result, Tensor};
// ============================================================================
// PyTorch Exact Comparison Tests
// ============================================================================
// These tests compare against exact PyTorch outputs to ensure correctness
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
input = torch.arange(16, dtype=torch.float32).reshape(1, 1, 4, 4)
output = F.interpolate(input, size=(8, 8), mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_2x_upscale(dev: &Device) -> Result<()> {
let input = Tensor::arange(0f32, 16f32, dev)?.reshape((1, 1, 4, 4))?;
let output = input.upsample_bilinear2d(8, 8, false)?;
// PyTorch expected output (verified from PyTorch 2.10.0)
let expected = Tensor::new(
&[
0.0000f32, 0.2500, 0.7500, 1.2500, 1.7500, 2.2500, 2.7500, 3.0000, 1.0000, 1.2500,
1.7500, 2.2500, 2.7500, 3.2500, 3.7500, 4.0000, 3.0000, 3.2500, 3.7500, 4.2500, 4.7500,
5.2500, 5.7500, 6.0000, 5.0000, 5.2500, 5.7500, 6.2500, 6.7500, 7.2500, 7.7500, 8.0000,
7.0000, 7.2500, 7.7500, 8.2500, 8.7500, 9.2500, 9.7500, 10.0000, 9.0000, 9.2500,
9.7500, 10.2500, 10.7500, 11.2500, 11.7500, 12.0000, 11.0000, 11.2500, 11.7500,
12.2500, 12.7500, 13.2500, 13.7500, 14.0000, 12.0000, 12.2500, 12.7500, 13.2500,
13.7500, 14.2500, 14.7500, 15.0000,
],
dev,
)?
.reshape((1, 1, 8, 8))?;
let diff = (&output - &expected)?.abs()?.flatten_all()?.max(0)?;
let max_diff = diff.to_vec0::<f32>()?;
assert!(
max_diff < 1e-4,
"Max difference {} exceeds threshold 1e-4",
max_diff
);
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
input = torch.arange(64, dtype=torch.float32).reshape(1, 1, 8, 8)
output = F.interpolate(input, size=(4, 4), mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_downscale(dev: &Device) -> Result<()> {
let input = Tensor::arange(0f32, 64f32, dev)?.reshape((1, 1, 8, 8))?;
let output = input.upsample_bilinear2d(4, 4, false)?;
// PyTorch expected output
let expected = Tensor::new(
&[
4.5f32, 6.5, 8.5, 10.5, 20.5, 22.5, 24.5, 26.5, 36.5, 38.5, 40.5, 42.5, 52.5, 54.5,
56.5, 58.5,
],
dev,
)?
.reshape((1, 1, 4, 4))?;
let diff = (&output - &expected)?.abs()?.flatten_all()?.max(0)?;
let max_diff = diff.to_vec0::<f32>()?;
assert!(
max_diff < 1e-4,
"Max difference {} exceeds threshold 1e-4",
max_diff
);
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
torch.manual_seed(42)
input = torch.randn(1, 2, 4, 4, dtype=torch.float32)
output = F.interpolate(input, size=(8, 8), mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_multi_channel(dev: &Device) -> Result<()> {
// Using fixed seed data from PyTorch (seed=42)
let input = Tensor::new(
&[
// Channel 0
1.9269f32, 1.4873, 0.9007, -2.1055, 0.6784, -1.2345, -0.0431, -1.6047, -0.7521, 1.6487,
-0.3925, -1.4036, -0.7279, -0.5594, -0.7688, 0.7624, // Channel 1
1.6423f32, -0.1596, -0.4974, 0.4396, -0.7581, 1.0783, 0.8008, 1.6806, 1.2791, 1.2964,
0.6105, 1.3347, -0.2316, 0.0418, -0.2516, 0.8599,
],
dev,
)?
.reshape((1, 2, 4, 4))?;
let output = input.upsample_bilinear2d(8, 8, false)?;
assert_eq!(output.dims(), &[1, 2, 8, 8]);
// Verify output is finite and in reasonable range
let output_vec = output.flatten_all()?.to_vec1::<f32>()?;
for &val in &output_vec {
assert!(val.is_finite(), "Output contains non-finite value");
}
// Check first row of channel 0 from PyTorch output
let output_ch0_row0 = output.i((0, 0, 0, ..))?.to_vec1::<f32>()?;
let expected_ch0_row0 = [
1.9269f32, 1.8170, 1.5972, 1.3406, 1.0474, 0.1492, -1.3540, -2.1055,
];
for (i, (&out, &exp)) in output_ch0_row0
.iter()
.zip(expected_ch0_row0.iter())
.enumerate()
{
let diff = (out - exp).abs();
assert!(
diff < 1e-3,
"Channel 0, row 0, index {} differs: got {}, expected {}, diff {}",
i,
out,
exp,
diff
);
}
// Check first row of channel 1 from PyTorch output
let output_ch1_row0 = output.i((0, 1, 0, ..))?.to_vec1::<f32>()?;
let expected_ch1_row0 = [
1.6423f32, 1.1918, 0.2909, -0.2440, -0.4129, -0.2632, 0.2053, 0.4396,
];
for (i, (&out, &exp)) in output_ch1_row0
.iter()
.zip(expected_ch1_row0.iter())
.enumerate()
{
let diff = (out - exp).abs();
assert!(
diff < 1e-3,
"Channel 1, row 0, index {} differs: got {}, expected {}, diff {}",
i,
out,
exp,
diff
);
}
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
input = torch.tensor([[[[1.0, 2.0], [3.0, 4.0]]]], dtype=torch.float32)
output = F.interpolate(input, size=(4, 4), mode='bilinear', align_corners=True)
*/
fn bilinear_pytorch_align_corners_true(dev: &Device) -> Result<()> {
let input = Tensor::from_vec(vec![1.0f32, 2.0, 3.0, 4.0], (1, 1, 2, 2), dev)?;
let output = input.upsample_bilinear2d(4, 4, true)?;
// PyTorch expected output with align_corners=True
let expected = Tensor::new(
&[
1.0f32, 1.3333, 1.6667, 2.0, 1.6667, 2.0, 2.3333, 2.6667, 2.3333, 2.6667, 3.0, 3.3333,
3.0, 3.3333, 3.6667, 4.0,
],
dev,
)?
.reshape((1, 1, 4, 4))?;
let diff = (&output - &expected)?.abs()?.flatten_all()?.max(0)?;
let max_diff = diff.to_vec0::<f32>()?;
assert!(
max_diff < 1e-3,
"Max difference {} exceeds threshold 1e-3",
max_diff
);
// Verify corners are exactly preserved with align_corners=True
let output_vec = output.flatten_all()?.to_vec1::<f32>()?;
assert!(
(output_vec[0] - 1.0).abs() < 1e-5,
"Top-left corner not preserved"
);
assert!(
(output_vec[3] - 2.0).abs() < 1e-5,
"Top-right corner not preserved"
);
assert!(
(output_vec[12] - 3.0).abs() < 1e-5,
"Bottom-left corner not preserved"
);
assert!(
(output_vec[15] - 4.0).abs() < 1e-5,
"Bottom-right corner not preserved"
);
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
input = torch.arange(16, dtype=torch.float32).reshape(1, 1, 4, 4)
output = F.interpolate(input, scale_factor=2.0, mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_scale_factor(dev: &Device) -> Result<()> {
let input = Tensor::arange(0f32, 16f32, dev)?.reshape((1, 1, 4, 4))?;
let output_scale = input.upsample_bilinear2d_with_scale(2.0, 2.0, false)?;
let output_size = input.upsample_bilinear2d(8, 8, false)?;
// scale_factor=2.0 should produce identical results to size=(8, 8)
let diff = (&output_scale - &output_size)?
.abs()?
.flatten_all()?
.max(0)?;
let max_diff = diff.to_vec0::<f32>()?;
assert!(
max_diff < 1e-6,
"scale_factor and size methods differ by {}",
max_diff
);
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
input = torch.arange(24, dtype=torch.float32).reshape(1, 1, 4, 6)
output = F.interpolate(input, size=(8, 12), mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_non_square_exact(dev: &Device) -> Result<()> {
let input = Tensor::arange(0f32, 24f32, dev)?.reshape((1, 1, 4, 6))?;
let output = input.upsample_bilinear2d(8, 12, false)?;
// PyTorch expected output (verified from PyTorch 2.10.0)
#[rustfmt::skip]
let expected = Tensor::new(
&[
0.0f32, 0.25, 0.75, 1.25, 1.75, 2.25, 2.75, 3.25, 3.75, 4.25, 4.75, 5.0,
1.5, 1.75, 2.25, 2.75, 3.25, 3.75, 4.25, 4.75, 5.25, 5.75, 6.25, 6.5,
4.5, 4.75, 5.25, 5.75, 6.25, 6.75, 7.25, 7.75, 8.25, 8.75, 9.25, 9.5,
7.5, 7.75, 8.25, 8.75, 9.25, 9.75, 10.25, 10.75, 11.25, 11.75, 12.25, 12.5,
10.5, 10.75, 11.25, 11.75, 12.25, 12.75, 13.25, 13.75, 14.25, 14.75, 15.25, 15.5,
13.5, 13.75, 14.25, 14.75, 15.25, 15.75, 16.25, 16.75, 17.25, 17.75, 18.25, 18.5,
16.5, 16.75, 17.25, 17.75, 18.25, 18.75, 19.25, 19.75, 20.25, 20.75, 21.25, 21.5,
18.0, 18.25, 18.75, 19.25, 19.75, 20.25, 20.75, 21.25, 21.75, 22.25, 22.75, 23.0,
],
dev,
)?
.reshape((1, 1, 8, 12))?;
let diff = (&output - &expected)?.abs()?.flatten_all()?.max(0)?;
let max_diff = diff.to_vec0::<f32>()?;
assert!(
max_diff < 1e-4,
"Max difference {} exceeds threshold 1e-4",
max_diff
);
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
input = torch.tensor([[[[5.0]]]], dtype=torch.float32)
output = F.interpolate(input, size=(3, 3), mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_tiny_1x1_to_3x3(dev: &Device) -> Result<()> {
let input = Tensor::new(&[5.0f32], dev)?.reshape((1, 1, 1, 1))?;
let output = input.upsample_bilinear2d(3, 3, false)?;
// PyTorch expected output: all values should be 5.0
let expected = Tensor::new(&[5.0f32, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0], dev)?
.reshape((1, 1, 3, 3))?;
let diff = (&output - &expected)?.abs()?.flatten_all()?.max(0)?;
let max_diff = diff.to_vec0::<f32>()?;
assert!(
max_diff < 1e-6,
"Max difference {} exceeds threshold 1e-6",
max_diff
);
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
input = torch.tensor([[[[2.0, 8.0]]]], dtype=torch.float32)
output = F.interpolate(input, size=(3, 6), mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_tiny_1x2_to_3x6(dev: &Device) -> Result<()> {
let input = Tensor::new(&[2.0f32, 8.0], dev)?.reshape((1, 1, 1, 2))?;
let output = input.upsample_bilinear2d(3, 6, false)?;
// PyTorch expected output
#[rustfmt::skip]
let expected = Tensor::new(
&[
2.0f32, 2.0, 4.0, 6.0, 8.0, 8.0,
2.0, 2.0, 4.0, 6.0, 8.0, 8.0,
2.0, 2.0, 4.0, 6.0, 8.0, 8.0,
],
dev,
)?
.reshape((1, 1, 3, 6))?;
let diff = (&output - &expected)?.abs()?.flatten_all()?.max(0)?;
let max_diff = diff.to_vec0::<f32>()?;
assert!(
max_diff < 1e-6,
"Max difference {} exceeds threshold 1e-6",
max_diff
);
Ok(())
}
/* Test corresponds to PyTorch:
import torch
import torch.nn.functional as F
torch.manual_seed(123)
input = torch.randn(1, 1, 64, 64, dtype=torch.float32)
output = F.interpolate(input, size=(128, 128), mode='bilinear', align_corners=False)
*/
fn bilinear_pytorch_large_64x64_to_128x128(dev: &Device) -> Result<()> {
// Test large tensor for numerical stability
// We'll just verify dimensions and that output is finite
use candle_core::DType;
let input = Tensor::randn(0f32, 1f32, (1, 1, 64, 64), dev)?;
let output = input.upsample_bilinear2d(128, 128, false)?;
assert_eq!(output.dims(), &[1, 1, 128, 128]);
assert_eq!(output.dtype(), DType::F32);
// Verify all values are finite
let output_vec = output.flatten_all()?.to_vec1::<f32>()?;
for &val in &output_vec {
assert!(
val.is_finite(),
"Large tensor output contains non-finite value"
);
}
// Verify output is in reasonable range (should be similar to input range)
let min_val = output_vec.iter().copied().fold(f32::INFINITY, f32::min);
let max_val = output_vec.iter().copied().fold(f32::NEG_INFINITY, f32::max);
assert!(
min_val > -10.0 && max_val < 10.0,
"Large tensor output values out of expected range: min={}, max={}",
min_val,
max_val
);
Ok(())
}
// ============================================================================
// Dimension and Shape Tests (Consolidated)
// ============================================================================
// These tests verify correct output dimensions for various input configurations
fn bilinear_output_dimensions(dev: &Device) -> Result<()> {
// Test 1: Non-square dimensions
let t1 = Tensor::arange(0f32, 32f32, dev)?.reshape((1, 1, 4, 8))?;
let out1 = t1.upsample_bilinear2d(6, 12, false)?;
assert_eq!(out1.dims(), &[1, 1, 6, 12], "Non-square upscale failed");
// Test 2: Batch processing
let t2 = Tensor::arange(0f32, 192f32, dev)?.reshape((4, 3, 4, 4))?;
let out2 = t2.upsample_bilinear2d(8, 8, false)?;
assert_eq!(out2.dims(), &[4, 3, 8, 8], "Batch processing failed");
// Test 3: Asymmetric scale factors
let t3 = Tensor::arange(0f32, 16f32, dev)?.reshape((1, 1, 4, 4))?;
let out3 = t3.upsample_bilinear2d_with_scale(2.0, 3.0, false)?;
assert_eq!(out3.dims(), &[1, 1, 8, 12], "Asymmetric scale failed");
// Test 4: Fractional scale factors
let t4 = Tensor::arange(0f32, 16f32, dev)?.reshape((1, 1, 4, 4))?;
let out4 = t4.upsample_bilinear2d_with_scale(1.5, 1.5, false)?;
assert_eq!(out4.dims(), &[1, 1, 6, 6], "Fractional scale failed");
// Test 5: Single pixel output
let t5 = Tensor::arange(0f32, 16f32, dev)?.reshape((1, 1, 4, 4))?;
let out5 = t5.upsample_bilinear2d(1, 1, false)?;
assert_eq!(out5.dims(), &[1, 1, 1, 1], "Single pixel output failed");
let val = out5.flatten_all()?.to_vec1::<f32>()?[0];
assert!(val.is_finite(), "Single pixel value is not finite");
// Test 6: Large scale factor
let t6 = Tensor::arange(0f32, 4f32, dev)?.reshape((1, 1, 2, 2))?;
let out6 = t6.upsample_bilinear2d_with_scale(5.0, 5.0, false)?;
assert_eq!(out6.dims(), &[1, 1, 10, 10], "Large scale factor failed");
Ok(())
}
// ============================================================================
// Special Behavior Tests
// ============================================================================
fn bilinear_identity(dev: &Device) -> Result<()> {
// Test that upsampling to the same size returns an identical tensor
let t = Tensor::arange(0f32, 16f32, dev)?.reshape((1, 1, 4, 4))?;
let output = t.upsample_bilinear2d(4, 4, false)?;
let diff = (&t - &output)?.abs()?.flatten_all()?.max(0)?;
assert!(diff.to_vec0::<f32>()? < 1e-6);
Ok(())
}
fn bilinear_align_corners_difference(dev: &Device) -> Result<()> {
// Test that align_corners parameter produces different results
let t = Tensor::arange(0f32, 16f32, dev)?.reshape((1, 1, 4, 4))?;
let output_false = t.upsample_bilinear2d(8, 8, false)?;
let output_true = t.upsample_bilinear2d(8, 8, true)?;
// Results should be different between align_corners modes
let diff = (&output_false - &output_true)?.abs()?.sum_all()?;
assert!(diff.to_vec0::<f32>()? > 0.1);
Ok(())
}
// ============================================================================
// Test Device Macros
// ============================================================================
// PyTorch exact comparison tests
test_device!(
bilinear_pytorch_2x_upscale,
bilinear_pytorch_2x_upscale_cpu,
bilinear_pytorch_2x_upscale_gpu,
bilinear_pytorch_2x_upscale_metal
);
test_device!(
bilinear_pytorch_downscale,
bilinear_pytorch_downscale_cpu,
bilinear_pytorch_downscale_gpu,
bilinear_pytorch_downscale_metal
);
test_device!(
bilinear_pytorch_multi_channel,
bilinear_pytorch_multi_channel_cpu,
bilinear_pytorch_multi_channel_gpu,
bilinear_pytorch_multi_channel_metal
);
test_device!(
bilinear_pytorch_align_corners_true,
bilinear_pytorch_align_corners_true_cpu,
bilinear_pytorch_align_corners_true_gpu,
bilinear_pytorch_align_corners_true_metal
);
test_device!(
bilinear_pytorch_scale_factor,
bilinear_pytorch_scale_factor_cpu,
bilinear_pytorch_scale_factor_gpu,
bilinear_pytorch_scale_factor_metal
);
test_device!(
bilinear_pytorch_non_square_exact,
bilinear_pytorch_non_square_exact_cpu,
bilinear_pytorch_non_square_exact_gpu,
bilinear_pytorch_non_square_exact_metal
);
test_device!(
bilinear_pytorch_tiny_1x1_to_3x3,
bilinear_pytorch_tiny_1x1_to_3x3_cpu,
bilinear_pytorch_tiny_1x1_to_3x3_gpu,
bilinear_pytorch_tiny_1x1_to_3x3_metal
);
test_device!(
bilinear_pytorch_tiny_1x2_to_3x6,
bilinear_pytorch_tiny_1x2_to_3x6_cpu,
bilinear_pytorch_tiny_1x2_to_3x6_gpu,
bilinear_pytorch_tiny_1x2_to_3x6_metal
);
test_device!(
bilinear_pytorch_large_64x64_to_128x128,
bilinear_pytorch_large_64x64_to_128x128_cpu,
bilinear_pytorch_large_64x64_to_128x128_gpu,
bilinear_pytorch_large_64x64_to_128x128_metal
);
// Dimension tests (consolidated)
test_device!(
bilinear_output_dimensions,
bilinear_output_dimensions_cpu,
bilinear_output_dimensions_gpu,
bilinear_output_dimensions_metal
);
// Special behavior tests
test_device!(
bilinear_identity,
bilinear_identity_cpu,
bilinear_identity_gpu,
bilinear_identity_metal
);
test_device!(
bilinear_align_corners_difference,
bilinear_align_corners_difference_cpu,
bilinear_align_corners_difference_gpu,
bilinear_align_corners_difference_metal
);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/tests/tensor_tests.rs | candle-core/tests/tensor_tests.rs | use candle_core::{test_device, test_utils, DType, Device, IndexOp, Result, Tensor, D};
use float8::F8E4M3;
fn zeros(device: &Device) -> Result<()> {
let tensor = Tensor::zeros((5, 2), DType::F32, device)?;
let (dim1, dim2) = tensor.dims2()?;
assert_eq!(dim1, 5);
assert_eq!(dim2, 2);
Ok(())
}
fn ones(device: &Device) -> Result<()> {
assert_eq!(
Tensor::ones((2, 3), DType::U8, device)?.to_vec2::<u8>()?,
[[1, 1, 1], [1, 1, 1]],
);
assert_eq!(
Tensor::ones((2, 3), DType::U32, device)?.to_vec2::<u32>()?,
[[1, 1, 1], [1, 1, 1]],
);
assert_eq!(
Tensor::ones((2, 3), DType::I64, device)?.to_vec2::<i64>()?,
[[1, 1, 1], [1, 1, 1]],
);
assert_eq!(
Tensor::ones((2, 3), DType::F32, device)?.to_vec2::<f32>()?,
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
);
if !device.is_metal() {
assert_eq!(
Tensor::ones((2, 3), DType::F64, device)?.to_vec2::<f64>()?,
[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],
);
}
assert_eq!(
Tensor::ones((2, 3), DType::F16, device)?.to_vec2::<half::f16>()?,
[
[
half::f16::from_f32(1.0),
half::f16::from_f32(1.0),
half::f16::from_f32(1.0)
],
[
half::f16::from_f32(1.0),
half::f16::from_f32(1.0),
half::f16::from_f32(1.0)
]
],
);
assert_eq!(
Tensor::ones((2, 3), DType::BF16, device)?.to_vec2::<half::bf16>()?,
[
[
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0)
],
[
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0),
half::bf16::from_f32(1.0)
]
],
);
if !device.is_metal() {
assert_eq!(
Tensor::ones((2, 3), DType::F8E4M3, device)?.to_vec2::<F8E4M3>()?,
[
[
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.)
],
[
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.),
F8E4M3::from_f32(1.)
]
],
);
}
Ok(())
}
fn full(device: &Device) -> Result<()> {
let tensor = Tensor::zeros((3, 4), DType::U32, device)?;
tensor.const_set(42u32.into())?;
assert_eq!(
tensor.to_vec2::<u32>()?,
[[42, 42, 42, 42], [42, 42, 42, 42], [42, 42, 42, 42]]
);
tensor.i((.., 2))?.const_set(1337u32.into())?;
assert_eq!(
tensor.to_vec2::<u32>()?,
[[42, 42, 1337, 42], [42, 42, 1337, 42], [42, 42, 1337, 42]]
);
tensor.i((2, ..))?.const_set(1u32.into())?;
assert_eq!(
tensor.to_vec2::<u32>()?,
[[42, 42, 1337, 42], [42, 42, 1337, 42], [1, 1, 1, 1]]
);
Ok(())
}
fn const_set(device: &Device) -> Result<()> {
assert_eq!(
Tensor::full(42u32, (2, 3), device)?.to_vec2::<u32>()?,
[[42, 42, 42], [42, 42, 42]],
);
Ok(())
}
fn arange(device: &Device) -> Result<()> {
assert_eq!(
Tensor::arange(0u8, 5u8, device)?.to_vec1::<u8>()?,
[0, 1, 2, 3, 4],
);
assert_eq!(
Tensor::arange_step(0u8, 5u8, 2, device)?.to_vec1::<u8>()?,
[0, 2, 4],
);
assert_eq!(
Tensor::arange_step(0u8, 5u8, 3, device)?.to_vec1::<u8>()?,
[0, 3],
);
assert_eq!(
Tensor::arange_step(5i64, 0i64, -1, device)?.to_vec1::<i64>()?,
[5, 4, 3, 2, 1],
);
if !device.is_metal() {
assert_eq!(
Tensor::arange_step(
F8E4M3::from_f32(0.),
F8E4M3::from_f32(5.),
F8E4M3::from_f32(2.),
device
)?
.to_vec1::<F8E4M3>()?,
[
F8E4M3::from_f32(0.),
F8E4M3::from_f32(2.),
F8E4M3::from_f32(4.),
],
);
}
Ok(())
}
fn add_mul(device: &Device) -> Result<()> {
let tensor = Tensor::new(&[3f32, 1., 4.], device)?;
let dim1 = tensor.dims1()?;
assert_eq!(dim1, 3);
let content: Vec<f32> = tensor.to_vec1()?;
assert_eq!(content, [3., 1., 4.]);
let tensor = Tensor::add(&tensor, &tensor)?;
let content: Vec<f32> = tensor.to_vec1()?;
assert_eq!(content, [6., 2., 8.]);
let tensor = Tensor::mul(&tensor, &tensor)?;
let content: Vec<f32> = tensor.to_vec1()?;
assert_eq!(content, [36., 4., 64.]);
Ok(())
}
fn tensor_2d(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?;
let dims = tensor.dims2()?;
assert_eq!(dims, (2, 5));
let content: Vec<Vec<f32>> = tensor.to_vec2()?;
assert_eq!(content, data);
Ok(())
}
fn clamp(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?;
let tensor = tensor.clamp(1.5, 6.2)?;
assert_eq!(
tensor.to_vec2::<f32>()?,
[[3.0, 1.5, 4.0, 1.5, 5.0], [2.0, 1.5, 6.2, 6.2, 2.0]],
);
Ok(())
}
fn asort(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1.1, 5.], [2.1, 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?;
let indexes = tensor.arg_sort_last_dim(true)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[1, 3, 0, 2, 4], [1, 4, 0, 2, 3]],
);
let indexes = tensor.arg_sort_last_dim(false)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[4, 2, 0, 3, 1], [3, 2, 0, 4, 1]],
);
let (sorted, indexes) = tensor.sort_last_dim(true)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[1, 3, 0, 2, 4], [1, 4, 0, 2, 3]],
);
assert_eq!(
sorted.to_vec2::<f32>()?,
[[1.0, 1.1, 3.0, 4.0, 5.0], [1.0, 2.0, 2.1, 7.0, 8.0]]
);
let (sorted, indexes) = tensor.sort_last_dim(false)?;
assert_eq!(
indexes.to_vec2::<u32>()?,
[[4, 2, 0, 3, 1], [3, 2, 0, 4, 1]],
);
assert_eq!(
sorted.to_vec2::<f32>()?,
[[5.0, 4.0, 3.0, 1.1, 1.0], [8.0, 7.0, 2.1, 2.0, 1.0]]
);
Ok(())
}
/// Test sorting a large tensor that exceeds 1024 elements.
fn asort_big(device: &Device) -> Result<()> {
// Skip on metal for now
if device.is_metal() {
return Ok(());
}
const SIZE: usize = 2000;
let data: Vec<f32> = (0..SIZE).map(|x| (SIZE - x) as f32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
let indexes = tensor.arg_sort_last_dim(true)?;
let expected_indexes: Vec<u32> = (0..SIZE).rev().map(|x| x as u32).collect();
assert_eq!(indexes.to_vec1::<u32>()?, expected_indexes);
let indexes = tensor.arg_sort_last_dim(false)?;
let expected_indexes: Vec<u32> = (0..SIZE).map(|x| x as u32).collect();
assert_eq!(indexes.to_vec1::<u32>()?, expected_indexes);
Ok(())
}
fn unary_op(device: &Device) -> Result<()> {
let data = &[[-3f32, 1., 4., -0.1, 0.5], [2.7, -1.8, -0.28, 1.8, 2.8]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
test_utils::to_vec2_round(&tensor.gelu()?, 4)?,
[
[-0.0036, 0.8412, 3.9999, -0.046, 0.3457],
[2.6911, -0.0647, -0.1091, 1.7353, 2.7933]
]
);
let t_f16 = tensor.to_dtype(DType::F16)?.gelu()?.to_dtype(DType::F32)?;
let max_diff = (tensor.gelu()? - t_f16)?.flatten_all()?.max(0)?;
assert!(max_diff.to_vec0::<f32>()? < 5e-3);
assert_eq!(
test_utils::to_vec2_round(&tensor.gelu_erf()?, 4)?,
[
[-0.004, 0.8413, 3.9999, -0.046, 0.3457],
[2.6906, -0.0647, -0.1091, 1.7353, 2.7928]
]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.erf()?, 4)?,
[
[-1.0, 0.8427, 1.0, -0.1125, 0.5205],
[0.9999, -0.9891, -0.3079, 0.9891, 0.9999]
]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.silu()?, 4)?,
[
[-0.1423, 0.7311, 3.9281, -0.0475, 0.3112],
[2.53, -0.2553, -0.1205, 1.5447, 2.6395]
]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.ceil()?, 4)?,
[[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -1.0, -0.0, 2.0, 3.0]]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.floor()?, 4)?,
[[-3.0, 1.0, 4.0, -1.0, 0.0], [2.0, -2.0, -1.0, 1.0, 2.0]]
);
assert_eq!(
test_utils::to_vec2_round(&tensor.round()?, 4)?,
[[-3.0, 1.0, 4.0, -0.0, 1.0], [3.0, -2.0, -0.0, 2.0, 3.0]]
);
let tensor = Tensor::new(&[2997.9246, 314.15926f32], device)?;
assert_eq!(
test_utils::to_vec1_round(&tensor.round_to(2)?, 4)?,
[2997.92, 314.16]
);
assert_eq!(
test_utils::to_vec1_round(&tensor.round_to(-2)?, 4)?,
[3000.0, 300.]
);
let tensor = Tensor::new(
&[-1.01f32, -0.9, -0.1, 0.0, -0.0, 0.1, 0.9, 1.0, 1.1],
device,
)?;
assert_eq!(
tensor.sign()?.to_vec1::<f32>()?,
[-1., -1., -1., 0., 0., 1., 1., 1., 1.]
);
let tensor = Tensor::new(&[-1.0f32, 0., -2., 3.], device)?;
let y = tensor.elu(2.)?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-1.2642, 0.0000, -1.7293, 3.0000]
);
// This test failed on metal prior to the following PR:
// https://github.com/huggingface/candle/pull/2490
let y = tensor.reshape((2, 2))?.t()?.elu(2.)?.flatten_all()?;
assert_eq!(
test_utils::to_vec1_round(&y, 4)?,
[-1.2642, -1.7293, 0.0000, 3.0000]
);
Ok(())
}
fn binary_op(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor1 = Tensor::new(data, device)?;
let data2 = &[[5f32, 5., 5., 5., 5.], [2., 1., 7., 8., 2.]];
let tensor2 = Tensor::new(data2, device)?;
let tensor = (&tensor1 + (&tensor1 * &tensor1)? / (&tensor1 + &tensor2))?;
let dims = tensor.dims2()?;
assert_eq!(dims, (2, 5));
let content: Vec<Vec<f32>> = tensor.to_vec2()?;
assert_eq!(content[0], [4.125, 1.1666666, 5.7777777, 1.1666666, 7.5]);
assert_eq!(content[1], [3.0, 1.5, 10.5, 12.0, 3.0]);
#[allow(clippy::eq_op)]
let tensor = (&tensor - &tensor)?;
let content: Vec<Vec<f32>> = tensor.to_vec2()?;
assert_eq!(content[0], [0., 0., 0., 0., 0.]);
let min = tensor1.minimum(&(&tensor2 * 0.5)?)?;
let max = tensor1.maximum(&(&tensor2 * 0.5)?)?;
assert_eq!(
min.to_vec2::<f32>()?,
[[2.5, 1.0, 2.5, 1.0, 2.5], [1.0, 0.5, 3.5, 4.0, 1.0]],
);
assert_eq!(
max.to_vec2::<f32>()?,
[[3.0, 2.5, 4.0, 2.5, 5.0], [2.0, 1.0, 7.0, 8.0, 2.0]]
);
Ok(())
}
fn ternary_op(device: &Device) -> Result<()> {
let data = &[[0u8, 1, 0, 1, 0], [1, 1, 1, 0, 0]];
let ids = Tensor::new(data, device)?;
let data = &[[0f32, 1., 2., 3., 4.], [5., 6., 7., 8., 9.]];
let a = Tensor::new(data, device)?;
let data = &[[10f32, 11., 12., 13., 14.], [15., 16., 17., 18., 19.]];
let b = Tensor::new(data, device)?;
let tensor = ids.where_cond(&a, &b)?;
let dims = tensor.dims();
assert_eq!(dims, [2, 5]);
let result: Vec<f32> = tensor.flatten_all()?.to_vec1()?;
assert_eq!(result, [10., 1., 12., 3., 14., 5., 6., 7., 18., 19.]);
Ok(())
}
fn transpose(device: &Device) -> Result<()> {
let data = &[[3f32, 1., 4., 1., 5.], [2., 1., 7., 8., 2.]];
let tensor = Tensor::new(data, device)?.t()?;
let dims = tensor.dims2()?;
assert_eq!(dims, (5, 2));
assert_eq!(
tensor.to_vec2::<f32>()?,
&[[3f32, 2.], [1., 1.], [4., 7.], [1., 8.], [5., 2.]]
);
assert_eq!(tensor.t()?.to_vec2::<f32>()?, data);
assert_eq!(tensor.contiguous()?.t()?.to_vec2::<f32>()?, data);
assert_eq!(((tensor + 1.)?.t()? - 1.)?.to_vec2::<f32>()?, data);
Ok(())
}
fn var(device: &Device) -> Result<()> {
// Values taken from https://pytorch.org/docs/stable/generated/torch.var.html
let data = &[
[0.2035f32, 1.2959, 1.8101, -0.4644],
[1.5027, -0.3270, 0.5905, 0.6538],
[-1.5745, 1.3330, -0.5596, -0.6548],
[0.1264, -0.5080, 1.6420, 0.1992],
];
let tensor = Tensor::new(data, device)?;
assert_eq!(
test_utils::to_vec2_round(&tensor.var_keepdim(1)?, 4)?,
&[[1.0631], [0.559], [1.4893], [0.8258]]
);
Ok(())
}
fn sum(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.sum_keepdim(2)?.to_vec3::<u32>()?,
&[[[8], [15]], [[10], [18]]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec3::<u32>()?,
&[[[5, 2, 11], [9, 7, 17]]],
);
assert_eq!(tensor.sum_keepdim((0, 2, 1))?.to_vec3::<u32>()?, &[[[51]]],);
assert_eq!(
tensor.t()?.sum_keepdim(1)?.t()?.to_vec3::<u32>()?,
&[[[8], [15]], [[10], [18]]]
);
assert_eq!(
tensor.sum_keepdim((2, 1))?.to_vec3::<u32>()?,
&[[[8 + 15]], [[10 + 18]]]
);
let data: Vec<u32> = (0..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.sum_keepdim(0)?.to_vec1::<u32>()?, &[7998000]);
let tensor = tensor.reshape((2000, 2))?;
assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]);
assert_eq!(
tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[3998000, 4000000]]
);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(tensor.sum_keepdim((0, 1))?.to_vec2::<u32>()?, &[[7998000]]);
assert_eq!(
tensor.sum_keepdim(0)?.sum_keepdim(1)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(1)?.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[7998000]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec2::<u32>()?,
&[[3998000, 4000000]]
);
let t1 = tensor.reshape((200, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor.sum_keepdim((0, 1, 2))?.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor
.sum_keepdim(0)?
.sum_keepdim(2)?
.sum_keepdim(1)?
.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor
.sum_keepdim(0)?
.sum_keepdim((1, 2))?
.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor
.sum_keepdim(1)?
.sum_keepdim((0, 2))?
.to_vec3::<u32>()?,
&[[[7998000]]]
);
assert_eq!(
tensor.sum_keepdim(0)?.to_vec3::<u32>()?,
&[[
[398000, 398200, 398400, 398600],
[398800, 399000, 399200, 399400],
[399600, 399800, 400000, 400200],
[400400, 400600, 400800, 401000],
[401200, 401400, 401600, 401800]
]]
);
}
Ok(())
}
fn min(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.min_keepdim(2)?.to_vec3::<u32>()?,
&[[[1], [1]], [[1], [2]]]
);
assert_eq!(
tensor.min_keepdim(0)?.to_vec3::<u32>()?,
&[[[2, 1, 4], [1, 2, 8]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.min_keepdim(0)?.to_vec1::<u32>()?, &[200]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(
tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor.min_keepdim(0)?.min_keepdim(1)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(
tensor.min_keepdim(1)?.min_keepdim(0)?.to_vec2::<u32>()?,
&[[200]]
);
assert_eq!(tensor.min_keepdim(0)?.to_vec2::<u32>()?, &[[200, 201]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.min_keepdim(0)?
.min_keepdim(2)?
.min_keepdim(1)?
.to_vec3::<u32>()?,
&[[[200]]]
);
assert_eq!(
tensor.min_keepdim(0)?.to_vec3::<u32>()?,
&[[
[200, 201, 202, 203],
[204, 205, 206, 207],
[208, 209, 210, 211],
[212, 213, 214, 215],
[216, 217, 218, 219]
]]
);
}
Ok(())
}
fn max(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.max_keepdim(2)?.to_vec3::<u32>()?,
&[[[4], [9]], [[7], [8]]]
);
assert_eq!(
tensor.max_keepdim(0)?.to_vec3::<u32>()?,
&[[[3, 1, 7], [8, 5, 9]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.max_keepdim(0)?.to_vec1::<u32>()?, &[3999]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(
tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor.max_keepdim(0)?.max_keepdim(1)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(
tensor.max_keepdim(1)?.max_keepdim(0)?.to_vec2::<u32>()?,
&[[3999]]
);
assert_eq!(tensor.max_keepdim(0)?.to_vec2::<u32>()?, &[[3998, 3999]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.max_keepdim(0)?
.max_keepdim(2)?
.max_keepdim(1)?
.to_vec3::<u32>()?,
&[[[3999]]]
);
assert_eq!(
tensor.max_keepdim(0)?.to_vec3::<u32>()?,
&[[
[3980, 3981, 3982, 3983],
[3984, 3985, 3986, 3987],
[3988, 3989, 3990, 3991],
[3992, 3993, 3994, 3995],
[3996, 3997, 3998, 3999]
]]
);
}
Ok(())
}
fn argmin(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.argmin_keepdim(2)?.to_vec3::<u32>()?,
&[[[1], [0]], [[1], [1]]]
);
assert_eq!(
tensor.argmin_keepdim(0)?.to_vec3::<u32>()?,
&[[[1, 0, 0], [0, 1, 1]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.argmin_keepdim(0)?.to_vec1::<u32>()?, &[0]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor
.argmin_keepdim(0)?
.argmin_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmin_keepdim(1)?
.argmin_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor
.argmin_keepdim(0)?
.argmin_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmin_keepdim(1)?
.argmin_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmin_keepdim(0)?.to_vec2::<u32>()?, &[[0, 0]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.argmin_keepdim(0)?
.argmin_keepdim(2)?
.argmin_keepdim(1)?
.to_vec3::<u32>()?,
&[[[0]]]
);
assert_eq!(
tensor.argmin_keepdim(0)?.to_vec3::<u32>()?,
&[[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]]
);
}
Ok(())
}
fn argmax(device: &Device) -> Result<()> {
let data = &[[[3u32, 1, 4], [1, 5, 9]], [[2, 1, 7], [8, 2, 8]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.argmax_keepdim(2)?.to_vec3::<u32>()?,
&[[[2], [2]], [[2], [0]]]
);
assert_eq!(
tensor.argmax_keepdim(0)?.to_vec3::<u32>()?,
&[[[0, 0, 1], [1, 0, 0]]],
);
let data: Vec<u32> = (200..4000u32).collect();
let tensor = Tensor::new(data.as_slice(), device)?;
assert_eq!(tensor.argmax_keepdim(0)?.to_vec1::<u32>()?, &[3799]);
let tensor = tensor.reshape((1900, 2))?;
assert_eq!(
tensor
.argmax_keepdim(0)?
.argmax_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmax_keepdim(1)?
.argmax_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]);
// Make the tensor non contiguous.
let tensor = tensor.t()?.contiguous()?.t()?;
assert_eq!(
tensor
.argmax_keepdim(0)?
.argmax_keepdim(1)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(
tensor
.argmax_keepdim(1)?
.argmax_keepdim(0)?
.to_vec2::<u32>()?,
&[[0]]
);
assert_eq!(tensor.argmax_keepdim(0)?.to_vec2::<u32>()?, &[[1899, 1899]]);
let t1 = tensor.reshape((190, 5, 4))?;
let t2 = t1.transpose(0, 2)?.contiguous()?.transpose(0, 2)?;
for tensor in [t1, t2] {
assert_eq!(
tensor
.argmax_keepdim(0)?
.argmax_keepdim(2)?
.argmax_keepdim(1)?
.to_vec3::<u32>()?,
&[[[0]]]
);
assert_eq!(
tensor.argmax_keepdim(0)?.to_vec3::<u32>()?,
&[[
[189, 189, 189, 189],
[189, 189, 189, 189],
[189, 189, 189, 189],
[189, 189, 189, 189],
[189, 189, 189, 189],
]]
);
}
Ok(())
}
fn narrow(device: &Device) -> Result<()> {
let data = &[[[3f32, 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.narrow(2, 1, 2)?.to_vec3::<f32>()?,
&[[[1.0, 4.0], [5.0, 9.0]], [[1.0, 7.0], [2.0, 8.0]]],
);
assert_eq!(
tensor.narrow(1, 1, 1)?.to_vec3::<f32>()?,
&[[[1.0, 5.0, 9.0]], [[8.0, 2.0, 8.0]]],
);
assert_eq!(
tensor.narrow(0, 0, 1)?.to_vec3::<f32>()?,
&[[[3.0, 1.0, 4.0], [1.0, 5.0, 9.0]]],
);
assert_eq!(
tensor.narrow(0, 1, 1)?.to_vec3::<f32>()?,
&[[[2.0, 1.0, 7.0], [8.0, 2.0, 8.0]]],
);
// The following has been checked against PyTorch via:
// import torch
// t = torch.tensor([[[3., 1., 4.], [1., 5., 9.]], [[2., 1., 7.], [8., 2., 8.]]])
// t.transpose(-1, -2).narrow(1, 1, 2)
assert_eq!(
tensor.t()?.narrow(1, 1, 2)?.to_vec3::<f32>()?,
&[[[1.0, 5.0], [4.0, 9.0]], [[1.0, 2.0], [7.0, 8.0]]],
);
Ok(())
}
fn broadcast(device: &Device) -> Result<()> {
let data = &[3f32, 1., 4.];
let tensor = Tensor::new(data, device)?;
assert_eq!(
tensor.broadcast_left((3, 1))?.to_vec3::<f32>()?,
&[[[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]], [[3.0, 1.0, 4.0]]]
);
Ok(())
}
fn slice_set(device: &Device) -> Result<()> {
let (b, h, max_t, d) = (2, 4, 7, 3);
let cache = Tensor::zeros((b, h, max_t, d), DType::F32, device)?;
let tensor = Tensor::randn(0f32, 1f32, (b, h, 4, d), device)?;
cache.slice_set(&tensor, 2, 0)?;
let cache_t = cache.narrow(2, 0, 4)?;
let diff = (cache_t - &tensor)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
cache.slice_set(&tensor, 2, 1)?;
let cache_t = cache.narrow(2, 1, 4)?;
let diff = (cache_t - &tensor)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
let ones = Tensor::ones((b, h, 1, d), DType::F32, device)?;
cache.slice_set(&ones, 2, 6)?;
let diff = cache.narrow(2, 5, 1)?.abs()?.sum_all()?.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
let diff = (cache.narrow(2, 6, 1)? - 1.)?
.abs()?
.sum_all()?
.to_vec0::<f32>()?;
assert_eq!(diff, 0.);
// This used to create a deadlock rather than returning an actual error.
assert!(cache.slice_set(&cache, 0, 0).is_err());
Ok(())
}
fn cat(device: &Device) -> Result<()> {
// 1D
let t1 = Tensor::new(&[3f32, 1., 4.], device)?;
let t2 = Tensor::new(&[1f32, 5., 9., 2.], device)?;
let t3 = Tensor::new(&[6f32, 5., 3., 5., 8., 9.], device)?;
assert_eq!(Tensor::cat(&[&t1], 0)?.to_vec1::<f32>()?, [3f32, 1., 4.],);
assert_eq!(
Tensor::cat(&[&t1, &t2], 0)?.to_vec1::<f32>()?,
[3f32, 1., 4., 1., 5., 9., 2.],
);
assert_eq!(
Tensor::cat(&[&t1, &t2, &t3], 0)?.to_vec1::<f32>()?,
[3f32, 1., 4., 1., 5., 9., 2., 6., 5., 3., 5., 8., 9.],
);
// 2D
let data = &[[3f32, 1., 4., 1., 5.], [2., 7., 1., 8., 2.]];
let t1 = Tensor::new(data, device)?;
let data2 = &[[5f32, 5., 5., 5., 5.], [2., 7., 1., 8., 2.]];
let t2 = Tensor::new(data2, device)?;
assert_eq!(
Tensor::cat(&[&t1, &t2], 0)?.to_vec2::<f32>()?,
[
[3.0, 1.0, 4.0, 1.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0]
]
);
// PyTorch equivalent:
// import torch
// t1 = torch.tensor([[3, 1, 4, 1, 5], [2, 7, 1, 8, 2]])
// t2 = torch.tensor([[5]*5, [2, 7, 1, 8, 2]])
// torch.cat([t1.t(), t2.t()], dim=1).t()
assert_eq!(
Tensor::cat(&[&t1.t()?, &t2.t()?], 1)?
.t()?
.to_vec2::<f32>()?,
[
[3.0, 1.0, 4.0, 1.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0],
[5.0, 5.0, 5.0, 5.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0]
]
);
assert_eq!(
Tensor::cat(&[&t1, &t2], 1)?.to_vec2::<f32>()?,
[
[3.0, 1.0, 4.0, 1.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0],
[2.0, 7.0, 1.0, 8.0, 2.0, 2.0, 7.0, 1.0, 8.0, 2.0]
]
);
// 3D
let t1 = Tensor::arange(0, 48i64, device)?.reshape((2, 6, 4))?;
let t2 = Tensor::arange(100, 124i64, device)?.reshape((2, 3, 4))?;
let t3 = Tensor::arange(10000, 10032i64, device)?.reshape((2, 4, 4))?;
let t_cat = Tensor::cat(&[&t1, &t2, &t3], 1)?;
let t1 = t1.t()?.contiguous()?.t()?;
let t2 = t2.t()?.contiguous()?.t()?;
let t3 = t3.t()?.contiguous()?.t()?;
let t_cat2 = Tensor::cat(&[&t1, &t2, &t3], 1)?;
let diff = t_cat.eq(&t_cat2)?.to_dtype(DType::F32)?.sum_all()?;
assert_eq!(diff.to_vec0::<f32>()?, 104.0);
assert_eq!(t_cat.i((0, 0, 0))?.to_vec0::<i64>()?, 0);
assert_eq!(t_cat.i((0, 4, 0))?.to_vec0::<i64>()?, 16);
assert_eq!(t_cat.i((0, 5, 0))?.to_vec0::<i64>()?, 20);
assert_eq!(t_cat.i((1, 5, 0))?.to_vec0::<i64>()?, 44);
assert_eq!(t_cat.i((0, 6, 0))?.to_vec0::<i64>()?, 100);
assert_eq!(t_cat.i((1, 6, 0))?.to_vec0::<i64>()?, 112);
assert_eq!(t_cat.i((0, 6, 1))?.to_vec0::<i64>()?, 101);
assert_eq!(t_cat.i((0, 7, 1))?.to_vec0::<i64>()?, 105);
assert_eq!(t_cat.i((0, 12, 1))?.to_vec0::<i64>()?, 10013);
assert_eq!(t_cat.i((1, 12, 3))?.to_vec0::<i64>()?, 10031);
Ok(())
}
fn embeddings(device: &Device) -> Result<()> {
let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?;
let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?;
let hs = t.embedding(&ids)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]);
let hs = t.index_select(&ids, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]);
let hs = t.index_select(&ids.to_dtype(DType::I64)?, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 1.0], [4.0, 5.0], [2.0, 3.0]]);
let ids = Tensor::new(&[u32::MAX, 2u32, u32::MAX], device)?;
let hs = t.index_select(&ids, 0)?;
assert_eq!(hs.to_vec2::<f32>()?, &[[0.0, 0.0], [4.0, 5.0], [0.0, 0.0]]);
Ok(())
}
#[test]
fn index_select_fail() -> Result<()> {
// Check that an error is properly reported on out of bounds.
let ids = Tensor::new(&[4u32, 2u32, 1u32], &Device::Cpu)?;
let t = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], &Device::Cpu)?;
let hs = t.index_select(&ids, 0);
assert!(hs.is_err());
Ok(())
}
// The test below triggers an unwinding panic as there is a panic within the
// #[cfg(feature = "cuda")]
// #[test]
// #[should_panic]
// fn index_select_fail_gpu() {
// // Check that a panic happens for out of bounds in cuda
// if let Ok(device) = Device::new_cuda(0) {
// if let Ok(ids) = Tensor::new(&[4u32, 2u32, 1u32], &device) {
// if let Ok(t) = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], &device) {
// let _ = t.index_select(&ids, 0);
// }
// }
// }
// }
fn cmp(device: &Device) -> Result<()> {
let t1 = Tensor::new(&[[0f32, 1f32], [2f32, 3f32], [4f32, 5f32]], device)?;
let t2 = Tensor::new(&[[1f32, 0f32], [3f32, 3f32], [4f32, 7f32]], device)?;
assert_eq!(t1.eq(&t2)?.to_vec2::<u8>()?, &[[0, 0], [0, 1], [1, 0]]);
assert_eq!(t1.ne(&t2)?.to_vec2::<u8>()?, &[[1, 1], [1, 0], [0, 1]]);
assert_eq!(t1.le(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 1], [1, 1]]);
assert_eq!(t1.lt(&t2)?.to_vec2::<u8>()?, &[[1, 0], [1, 0], [0, 1]]);
assert_eq!(t1.gt(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 0], [0, 0]]);
assert_eq!(t1.ge(&t2)?.to_vec2::<u8>()?, &[[0, 1], [0, 1], [1, 0]]);
Ok(())
}
fn index_select(device: &Device) -> Result<()> {
let ids = Tensor::new(&[0u32, 2u32, 1u32], device)?;
let t = Tensor::arange(0f32, 12f32, device)?.reshape((4, 3))?;
assert_eq!(
t.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[3.0, 4.0, 5.0],
[6.0, 7.0, 8.0],
[9.0, 10.0, 11.0]
]
);
for dtype in [DType::U8, DType::U32, DType::I64] {
let ids = ids.to_dtype(dtype)?;
let hs = t.index_select(&ids, 1)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[0.0, 2.0, 1.0],
[3.0, 5.0, 4.0],
[6.0, 8.0, 7.0],
[9.0, 11.0, 10.0]
]
);
let hs = t.index_select(&ids, 0)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[[0.0, 1.0, 2.0], [6.0, 7.0, 8.0], [3.0, 4.0, 5.0]]
);
// Prior to https://github.com/huggingface/candle/pull/1022
// There would be a bug where the last values in the result tensor would be set to 0.
let ids = Tensor::new(&[0u32, 2u32, 1u32, 0u32, 2u32, 1u32], device)?;
let hs = t.index_select(&ids, 0)?;
assert_eq!(
hs.to_vec2::<f32>()?,
&[
[0.0, 1.0, 2.0],
[6.0, 7.0, 8.0],
[3.0, 4.0, 5.0],
[0.0, 1.0, 2.0],
[6.0, 7.0, 8.0],
[3.0, 4.0, 5.0],
]
);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/bench_main.rs | candle-core/benches/bench_main.rs | mod benchmarks;
use criterion::criterion_main;
criterion_main!(
benchmarks::affine::benches,
benchmarks::binary::benches,
benchmarks::broadcast::benches,
benchmarks::copy::benches,
benchmarks::conv_transpose2d::benches,
benchmarks::matmul::benches,
benchmarks::qmatmul::benches,
benchmarks::random::benches,
benchmarks::reduce::benches,
benchmarks::unary::benches,
benchmarks::where_cond::benches,
);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/conv_transpose2d.rs | candle-core/benches/benchmarks/conv_transpose2d.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run(
x: &Tensor,
k: &Tensor,
padding: usize,
output_padding: usize,
stride: usize,
dilation: usize,
) {
x.conv_transpose2d(k, padding, output_padding, stride, dilation)
.unwrap();
}
fn run_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let t = Tensor::arange(0.0f32, 10000.0, device)
.unwrap()
.reshape((1, 4, 50, 50))
.unwrap()
.to_dtype(dtype)
.unwrap();
let kernel = Tensor::arange(0.0f32, 100.0, device)
.unwrap()
.reshape((4, 1, 5, 5))
.unwrap()
.to_dtype(dtype)
.unwrap();
let flops = t.dims().iter().product::<usize>() * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&t), black_box(&kernel), 1, 0, 1, 2);
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_benchmark(c, &device, DType::F32, "conv_transpose2d_f32");
run_benchmark(c, &device, DType::F16, "conv_transpose2d_f16");
run_benchmark(c, &device, DType::BF16, "conv_transpose2d_bf16");
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/unary.rs | candle-core/benches/benchmarks/unary.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run_sqrt(a: &Tensor) {
a.sqrt().unwrap();
}
fn run_unary_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let b = 1;
let m = 1024;
let k = 1024;
let tensor = Tensor::arange(0.0f32, (b * m * k) as f32, device)
.unwrap()
.to_dtype(dtype)
.unwrap()
.reshape((b, m, k))
.unwrap();
let flops = b * m * k * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run_sqrt(black_box(&tensor));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn run_cast(a: &Tensor, dtype: DType) {
a.to_dtype(dtype).unwrap();
}
fn run_cast_benchmark(
c: &mut Criterion,
device: &Device,
dtype: DType,
to_dtype: DType,
name: &str,
) {
let b = 1;
let m = 1024;
let k = 1024;
let tensor = Tensor::arange(0.0f32, (b * m * k) as f32, device)
.unwrap()
.to_dtype(dtype)
.unwrap()
.reshape((b, m, k))
.unwrap();
let flops = b * m * k * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run_cast(black_box(&tensor), black_box(to_dtype));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
for dtype in [DType::F32, DType::BF16, DType::F16] {
let to_dtype = if matches!(dtype, DType::F32) {
DType::F16
} else {
DType::F32
};
let name = format!("cast_{}_{}", dtype.as_str(), to_dtype.as_str());
run_cast_benchmark(c, &device, dtype, to_dtype, &name);
}
for dtype in [DType::F32, DType::BF16, DType::F16] {
let name = format!("sqrt_{dtype:?}");
run_unary_benchmark(c, &device, dtype, &name);
}
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/random.rs | candle-core/benches/benchmarks/random.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn rand_uniform(a: &Tensor) {
a.rand_like(-1.0, 123.0).unwrap();
}
fn rand_normal(a: &Tensor) {
a.randn_like(100.0, 15.0).unwrap();
}
fn run_random_bench(c: &mut Criterion, device: &Device) {
let b = 1;
let rows = 2048;
let cols = 2048;
let dtype = DType::F32;
let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap();
let flops = b * rows * cols * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name("random_uniform"));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |benches| {
benches.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
rand_uniform(black_box(&tensor));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
let tensor = Tensor::zeros((b, rows, cols), dtype, device).unwrap();
let mut group = c.benchmark_group(device.bench_name("random_normal"));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |benches| {
benches.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
rand_normal(black_box(&tensor));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_random_bench(c, &device);
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/where_cond.rs | candle-core/benches/benchmarks/where_cond.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run(a: &Tensor, b: &Tensor, c: &Tensor) {
a.where_cond(b, c).unwrap();
}
const fn create_cond_arr<const N: usize>() -> [u8; N] {
let mut arr = [0u8; N];
let mut i = 0;
while i < N {
arr[i] = (i % 2) as u8;
i += 1;
}
arr
}
const B: usize = 1;
const M: usize = 1024;
const K: usize = 1024;
const SIZE: usize = B * M * K;
static DATA: [u8; SIZE] = create_cond_arr::<SIZE>();
fn run_where_cond_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let tensor = Tensor::from_slice(DATA.as_slice(), (B, M, K), device).unwrap();
let on_true = Tensor::ones((B, M, K), dtype, device).unwrap();
let on_false = Tensor::zeros((B, M, K), dtype, device).unwrap();
let elements = B * M * K;
// E.g. 2 f32 tensors + 1 u8 tensor
let flops = (2 * elements * dtype.size_in_bytes()) + elements;
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(
black_box(&tensor),
black_box(&on_true),
black_box(&on_false),
);
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let device = BenchDeviceHandler::new().unwrap();
for d in device.devices {
run_where_cond_benchmark(c, &d, DType::F32, "where_cond_f32");
run_where_cond_benchmark(c, &d, DType::BF16, "where_cond_bf16");
run_where_cond_benchmark(c, &d, DType::F16, "where_cond_f16");
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/broadcast.rs | candle-core/benches/benchmarks/broadcast.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run(w: &Tensor, bias: &Tensor) {
w.broadcast_add(bias).unwrap();
}
fn run_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
// We simulate a candle-nn style conv2d + bias forward pass.
let batch_size = 1;
let ch = 1;
let m = 126;
let bias_size = 128;
let x = Tensor::ones((batch_size, ch, m, m), dtype, device).unwrap();
let bias = Tensor::ones((1, bias_size, 1, 1), dtype, device).unwrap();
let flops = batch_size * ch * m * bias_size * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&x), black_box(&bias));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_benchmark(c, &device, DType::F32, "broadcast_add_f32");
run_benchmark(c, &device, DType::F16, "broadcast_add_f16");
run_benchmark(c, &device, DType::BF16, "broadcast_add_bf16");
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/qmatmul.rs | candle-core/benches/benchmarks/qmatmul.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{
quantized::{self, GgmlDType, QMatMul},
Device, Module, Tensor,
};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run(matmul: &QMatMul, x: &Tensor) {
matmul.forward(x).unwrap();
}
fn run_bench(c: &mut Criterion, device: &Device, dtype: GgmlDType) {
let b = 1;
let m = 1;
let n = 1024;
let k = 1024;
let lhs = (0..(m * k))
.map(|v| v as f32 / (m * k) as f32)
.collect::<Vec<_>>();
let rhs = (0..(k * n))
.map(|v| v as f32 / (n * k) as f32)
.collect::<Vec<_>>();
let lhs = Tensor::from_slice(&lhs, (m, k), device).unwrap();
let rhs = Tensor::from_slice(&rhs, (k, n), device).unwrap();
let qtensor = quantized::QTensor::quantize(&rhs.t().unwrap(), dtype).unwrap();
let matmul = quantized::QMatMul::from_qtensor(qtensor).unwrap();
let flops = b * m * n * k;
let mut group = c.benchmark_group(device.bench_name(format!("qmatmul_{dtype:?}")));
group.sample_size(200);
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&matmul), black_box(&lhs));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
for dtype in [
GgmlDType::F32,
GgmlDType::F16,
GgmlDType::Q4_0,
GgmlDType::Q4_1,
GgmlDType::Q5_0,
GgmlDType::Q5_1,
GgmlDType::Q8_0,
GgmlDType::Q2K,
GgmlDType::Q3K,
GgmlDType::Q4K,
GgmlDType::Q5K,
GgmlDType::Q6K,
] {
run_bench(c, &device, dtype);
}
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/reduce.rs | candle-core/benches/benchmarks/reduce.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use half::{bf16, f16};
use std::hint::black_box;
use std::time::Instant;
fn run_sum(a: &Tensor) {
a.sum_keepdim(2).unwrap();
}
fn run_arg_min(a: &Tensor) {
a.argmin_keepdim(2).unwrap();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
let (lo, up) = (-1000.0f32, 1000.0f32);
for device in handler.devices {
run_reduce(c, &device, (lo, up), false);
run_reduce(c, &device, (f16::from_f32(lo), f16::from_f32(up)), false);
run_reduce(c, &device, (bf16::from_f32(lo), bf16::from_f32(up)), false);
run_arg_reduce(c, &device, (lo, up), false);
run_arg_reduce(c, &device, (f16::from_f32(lo), f16::from_f32(up)), false);
run_arg_reduce(c, &device, (bf16::from_f32(lo), bf16::from_f32(up)), false);
run_reduce(c, &device, (lo, up), true);
run_reduce(c, &device, (f16::from_f32(lo), f16::from_f32(up)), true);
run_reduce(c, &device, (bf16::from_f32(lo), bf16::from_f32(up)), true);
run_arg_reduce(c, &device, (lo, up), true);
run_arg_reduce(c, &device, (f16::from_f32(lo), f16::from_f32(up)), true);
run_arg_reduce(c, &device, (bf16::from_f32(lo), bf16::from_f32(up)), true);
}
}
fn run_reduce<T: candle_core::FloatDType>(
c: &mut Criterion,
device: &Device,
(lo, up): (T, T),
strided: bool,
) {
let b = 1;
let m = 1024;
let k = 1024;
let a = if strided {
Tensor::rand(lo, up, (b, m, k), device)
.unwrap()
.transpose(0, 2)
.unwrap()
} else {
Tensor::rand(lo, up, (b, m, k), device).unwrap()
};
let flops = b * m * k * T::DTYPE.size_in_bytes();
let name = match T::DTYPE {
DType::F32 => {
if strided {
"reduce_f32_strided"
} else {
"reduce_f32"
}
}
DType::F16 => {
if strided {
"reduce_f16_strided"
} else {
"reduce_f16"
}
}
DType::BF16 => {
if strided {
"reduce_bf16_strided"
} else {
"reduce_bf16"
}
}
_ => "unknown",
};
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run_sum(black_box(&a));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn run_arg_reduce<T: candle_core::FloatDType>(
c: &mut Criterion,
device: &Device,
(lo, up): (T, T),
strided: bool,
) {
let b = 1;
let m = 1024;
let k = 1024;
let a = if strided {
Tensor::rand(lo, up, (b, m, k), device)
.unwrap()
.transpose(0, 2)
.unwrap()
} else {
Tensor::rand(lo, up, (b, m, k), device).unwrap()
};
let flops = b * m * k * T::DTYPE.size_in_bytes();
let name = match T::DTYPE {
DType::F32 => {
if strided {
"arg_reduce_f32_strided"
} else {
"arg_reduce_f32"
}
}
DType::F16 => {
if strided {
"arg_reduce_f16_strided"
} else {
"arg_reduce_f16"
}
}
DType::BF16 => {
if strided {
"arg_reduce_bf16_strided"
} else {
"arg_reduce_bf16"
}
}
_ => "unknown",
};
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run_arg_min(black_box(&a));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/copy.rs | candle-core/benches/benchmarks/copy.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{Device, Tensor, WithDType};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run_copy_mask_benchmark<D: WithDType>(c: &mut Criterion, device: &Device, name: &str) {
let batch_size = 128;
let in_seq_len = 1;
let kv_seq_len = 1024;
let attn_mask = vec![vec![vec![D::zero(); kv_seq_len]; in_seq_len]; batch_size];
let size_in_bytes = batch_size * in_seq_len * kv_seq_len * D::DTYPE.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(size_in_bytes as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let attn_masks = vec![attn_mask.clone(); iters as usize];
let start = Instant::now();
for attn_mask in attn_masks.into_iter() {
let tensor = Tensor::new(black_box(attn_mask), device).unwrap();
black_box(tensor);
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_copy_mask_benchmark::<f32>(c, &device, "copy_mask");
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/mod.rs | candle-core/benches/benchmarks/mod.rs | pub(crate) mod affine;
pub(crate) mod binary;
pub(crate) mod broadcast;
pub(crate) mod conv_transpose2d;
pub(crate) mod copy;
pub(crate) mod matmul;
pub(crate) mod qmatmul;
pub(crate) mod random;
pub(crate) mod reduce;
pub(crate) mod unary;
pub(crate) mod where_cond;
use candle_core::{Device, Result};
pub(crate) trait BenchDevice {
fn sync(&self) -> Result<()>;
fn bench_name<S: Into<String>>(&self, name: S) -> String;
}
impl BenchDevice for Device {
fn sync(&self) -> Result<()> {
match self {
Device::Cpu => Ok(()),
Device::Cuda(device) => {
#[cfg(feature = "cuda")]
{
use candle_core::backend::BackendDevice;
return Ok(device.synchronize()?);
}
#[cfg(not(feature = "cuda"))]
panic!("Cuda device without cuda feature enabled: {device:?}")
}
Device::Metal(device) => {
#[cfg(feature = "metal")]
return device.wait_until_completed();
#[cfg(not(feature = "metal"))]
panic!("Metal device without metal feature enabled: {device:?}")
}
}
}
fn bench_name<S: Into<String>>(&self, name: S) -> String {
match self {
Device::Cpu => {
let cpu_type = if cfg!(feature = "accelerate") {
"accelerate"
} else if cfg!(feature = "mkl") {
"mkl"
} else {
"cpu"
};
format!("{}_{}", cpu_type, name.into())
}
Device::Cuda(_) => format!("cuda_{}", name.into()),
Device::Metal(_) => format!("metal_{}", name.into()),
}
}
}
struct BenchDeviceHandler {
devices: Vec<Device>,
}
impl BenchDeviceHandler {
pub fn new() -> Result<Self> {
let mut devices = Vec::new();
if cfg!(feature = "metal") {
devices.push(Device::new_metal(0)?);
} else if cfg!(feature = "cuda") {
devices.push(Device::new_cuda(0)?);
} else {
devices.push(Device::Cpu);
}
Ok(Self { devices })
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/matmul.rs | candle-core/benches/benchmarks/matmul.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run(a: &Tensor, b: &Tensor) {
a.matmul(&b.t().unwrap()).unwrap();
}
fn run_bench(c: &mut Criterion, device: &Device) {
let b = 1;
let m = 1;
let n = 2048;
let k = 2048;
let dtype = DType::F32;
let lhs = Tensor::zeros((b, m, k), dtype, device).unwrap();
let rhs = Tensor::zeros((b, n, k), dtype, device).unwrap();
let flops = b * m * n * k;
let mut group = c.benchmark_group(device.bench_name("matmul"));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&lhs), black_box(&rhs));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_bench(c, &device);
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/affine.rs | candle-core/benches/benchmarks/affine.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run(a: &Tensor) {
a.affine(12.34, 56.78).unwrap();
}
fn run_affine_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let b = 1;
let m = 1024;
let k = 1024;
let tensor = Tensor::zeros((b, m, k), dtype, device).unwrap();
let flops = b * m * k * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&tensor));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
run_affine_benchmark(c, &device, DType::F32, "affine_f32");
run_affine_benchmark(c, &device, DType::F16, "affine_f16");
run_affine_benchmark(c, &device, DType::BF16, "affine_bf16");
#[cfg(not(feature = "metal"))]
run_affine_benchmark(c, &device, DType::F8E4M3, "affine_fp8");
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/benches/benchmarks/binary.rs | candle-core/benches/benchmarks/binary.rs | use crate::benchmarks::{BenchDevice, BenchDeviceHandler};
use candle_core::{DType, Device, Tensor};
use criterion::{criterion_group, Criterion, Throughput};
use std::hint::black_box;
use std::time::Instant;
fn run(lhs: &Tensor, rhs: &Tensor) -> Tensor {
lhs.mul(rhs).unwrap()
}
fn run_unary_benchmark(c: &mut Criterion, device: &Device, dtype: DType, name: &str) {
let b = 1;
let m = 1024;
let k = 1024;
let lhs = Tensor::arange(0.0f32, (b * m * k) as f32, device)
.unwrap()
.to_dtype(dtype)
.unwrap()
.reshape((b, m, k))
.unwrap();
let rhs = Tensor::arange(0.0f32, (b * m * k) as f32, device)
.unwrap()
.to_dtype(dtype)
.unwrap()
.reshape((b, m, k))
.unwrap();
let flops = 2 * b * m * k * dtype.size_in_bytes();
let mut group = c.benchmark_group(device.bench_name(name));
group.throughput(Throughput::Bytes(flops as u64));
group.bench_function("iter", move |b| {
b.iter_custom(|iters| {
let start = Instant::now();
for _i in 0..iters {
run(black_box(&lhs), black_box(&rhs));
}
device.sync().unwrap();
start.elapsed()
})
});
group.finish();
}
fn criterion_benchmark(c: &mut Criterion) {
let handler = BenchDeviceHandler::new().unwrap();
for device in handler.devices {
for dtype in [DType::F32, DType::BF16, DType::F16] {
let name = format!("binary_mul_{dtype:?}");
run_unary_benchmark(c, &device, dtype, &name);
}
}
}
criterion_group!(benches, criterion_benchmark);
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/examples/cuda_basics.rs | candle-core/examples/cuda_basics.rs | #[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::Result;
use candle_core::{Device, Tensor};
// xs: [1024, 64, 1924], c Tensor[dims 128, 64, 8; f32, cuda:0] Conv1dConfig { padding: 0, stride: 4, dilation: 1, groups: 1 }
fn main() -> Result<()> {
let device = Device::new_cuda(0)?;
let x = Tensor::randn(0f32, 1.0, (1024, 64, 1924), &device)?;
let c = Tensor::randn(0f32, 1.0, (128, 64, 8), &device)?;
let _x1 = x.conv1d(&c, 0, 4, 1, 1)?;
drop(_x1);
for _ in 0..20 {
let start_time = std::time::Instant::now();
let _x1 = x.conv1d(&c, 0, 4, 1, 1)?;
device.synchronize()?;
println!("conv1d: {:?}", start_time.elapsed());
}
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/examples/metal_basics.rs | candle-core/examples/metal_basics.rs | #[cfg(feature = "accelerate")]
extern crate accelerate_src;
#[cfg(feature = "mkl")]
extern crate intel_mkl_src;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn main() -> Result<()> {
// This requires the code to be run with MTL_CAPTURE_ENABLED=1
let device = Device::new_metal(0)?;
let metal_device = match &device {
Device::Metal(m) => m,
_ => anyhow::bail!("unexpected device"),
};
metal_device.capture("/tmp/candle.gputrace")?;
// This first synchronize ensures that a new command buffer gets created after setting up the
// capture scope.
device.synchronize()?;
let x = Tensor::randn(0f32, 1.0, (128, 128), &device)?;
let x1 = x.add(&x)?;
println!("{x1:?}");
// This second synchronize ensures that the command buffer gets committed before the end of the
// capture scope.
device.synchronize()?;
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/examples/basics.rs | candle-core/examples/basics.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn main() -> Result<()> {
let a = Tensor::new(&[[0.0f32, 1.0, 2.0], [3.0, 4.0, 5.0]], &Device::Cpu)?;
let b = Tensor::new(&[[88.0f32], [99.0]], &Device::Cpu)?;
let new_a = a.slice_scatter(&b, 1, 2)?;
assert_eq!(a.to_vec2::<f32>()?, [[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]);
assert_eq!(
new_a.to_vec2::<f32>()?,
[[0.0, 1.0, 88.0], [3.0, 4.0, 99.0]]
);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-core/examples/cuda_sum_benchmark.rs | candle-core/examples/cuda_sum_benchmark.rs | #[cfg(feature = "mkl")]
extern crate intel_mkl_src;
#[cfg(feature = "accelerate")]
extern crate accelerate_src;
use std::str::FromStr;
use anyhow::Result;
use candle_core::{Device, Tensor};
fn cos_sin(n: usize, device: &Device) -> Result<Tensor> {
let thetas: Vec<_> = (0..n).map(|i| i as f32 / n as f32).collect();
let xs: Vec<_> = thetas.iter().map(|t| t.cos().abs()).collect();
let ys: Vec<_> = thetas.iter().map(|t| t.sin().abs()).collect();
let xs = Tensor::from_vec(xs, (n, 1), device)?;
let ys = Tensor::from_vec(ys, (1, n), device)?;
let ys = Tensor::cat(&[&ys, &ys, &ys, &ys, &ys, &ys], 1)?;
Ok(xs.matmul(&ys)?)
}
fn main() -> Result<()> {
let device = Device::new_cuda(0)?;
let args = std::env::args().collect::<Vec<String>>();
let n = if args.len() < 2 {
2000usize
} else {
usize::from_str(&args[1])?
};
let xys_cpu = cos_sin(n, &Device::Cpu)?;
let xys = cos_sin(n, &device)?;
println!("{xys_cpu:?} {xys:?}");
let sum_keepdim_cpu = xys_cpu.sum_keepdim(1)?;
println!("{sum_keepdim_cpu}");
let sum_keepdim = xys.sum_keepdim(1)?;
println!("{sum_keepdim}");
let start = std::time::Instant::now();
let n_iters = 100;
let mut v = 0f32;
for _i in 0..n_iters {
let sum_keepdim = xys.sum_keepdim(1)?;
let sum_keepdim = sum_keepdim.sum_keepdim(0)?;
let sum_keepdim: f32 = sum_keepdim.reshape(&[])?.to_scalar()?;
v += sum_keepdim;
}
let elapsed = start.elapsed();
if v > 0. {
println!(
"ran {n_iters} iterations, time per iter: {:?} ({v})",
elapsed.div_f64(n_iters as f64)
);
}
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn-v3/build.rs | candle-flash-attn-v3/build.rs | // build.rs
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright (c) 2024 Michael Feil
// adapted from https://github.com/huggingface/candle-flash-attn-v1 , Oliver Dehaene
// adapted further in 2025 by Eric Buehler for candle repo.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use anyhow::{anyhow, Context, Result};
use candle_flash_attn_build::{cutlass_include_arg, fetch_cutlass};
use rayon::prelude::*;
use std::path::PathBuf;
use std::str::FromStr;
const CUDA_NVCC_FLAGS: Option<&'static str> = option_env!("CUDA_NVCC_FLAGS");
const KERNEL_FILES: &[&str] = &[
"flash_api.cu",
"flash_fwd_hdim64_fp16_sm90.cu",
"flash_fwd_hdim64_bf16_sm90.cu",
"flash_fwd_hdim128_fp16_sm90.cu",
"flash_fwd_hdim128_bf16_sm90.cu",
"flash_fwd_hdim256_fp16_sm90.cu",
"flash_fwd_hdim256_bf16_sm90.cu",
// "flash_bwd_hdim64_fp16_sm90.cu",
// "flash_bwd_hdim96_fp16_sm90.cu",
// "flash_bwd_hdim128_fp16_sm90.cu",
// commented out in main repo: // "flash_bwd_hdim256_fp16_sm90.cu",
// "flash_bwd_hdim64_bf16_sm90.cu",
// "flash_bwd_hdim96_bf16_sm90.cu",
// "flash_bwd_hdim128_bf16_sm90.cu",
// "flash_fwd_hdim64_e4m3_sm90.cu",
// "flash_fwd_hdim128_e4m3_sm90.cu",
// "flash_fwd_hdim256_e4m3_sm90.cu",
"flash_fwd_hdim64_fp16_gqa2_sm90.cu",
"flash_fwd_hdim64_fp16_gqa4_sm90.cu",
"flash_fwd_hdim64_fp16_gqa8_sm90.cu",
"flash_fwd_hdim64_fp16_gqa16_sm90.cu",
"flash_fwd_hdim64_fp16_gqa32_sm90.cu",
"flash_fwd_hdim128_fp16_gqa2_sm90.cu",
"flash_fwd_hdim128_fp16_gqa4_sm90.cu",
"flash_fwd_hdim128_fp16_gqa8_sm90.cu",
"flash_fwd_hdim128_fp16_gqa16_sm90.cu",
"flash_fwd_hdim128_fp16_gqa32_sm90.cu",
"flash_fwd_hdim256_fp16_gqa2_sm90.cu",
"flash_fwd_hdim256_fp16_gqa4_sm90.cu",
"flash_fwd_hdim256_fp16_gqa8_sm90.cu",
"flash_fwd_hdim256_fp16_gqa16_sm90.cu",
"flash_fwd_hdim256_fp16_gqa32_sm90.cu",
"flash_fwd_hdim64_bf16_gqa2_sm90.cu",
"flash_fwd_hdim64_bf16_gqa4_sm90.cu",
"flash_fwd_hdim64_bf16_gqa8_sm90.cu",
"flash_fwd_hdim64_bf16_gqa16_sm90.cu",
"flash_fwd_hdim64_bf16_gqa32_sm90.cu",
"flash_fwd_hdim128_bf16_gqa2_sm90.cu",
"flash_fwd_hdim128_bf16_gqa4_sm90.cu",
"flash_fwd_hdim128_bf16_gqa8_sm90.cu",
"flash_fwd_hdim128_bf16_gqa16_sm90.cu",
"flash_fwd_hdim128_bf16_gqa32_sm90.cu",
"flash_fwd_hdim256_bf16_gqa2_sm90.cu",
"flash_fwd_hdim256_bf16_gqa4_sm90.cu",
"flash_fwd_hdim256_bf16_gqa8_sm90.cu",
"flash_fwd_hdim256_bf16_gqa16_sm90.cu",
"flash_fwd_hdim256_bf16_gqa32_sm90.cu",
// "flash_fwd_hdim64_e4m3_gqa2_sm90.cu",
// "flash_fwd_hdim64_e4m3_gqa4_sm90.cu",
// "flash_fwd_hdim64_e4m3_gqa8_sm90.cu",
// "flash_fwd_hdim64_e4m3_gqa16_sm90.cu",
// "flash_fwd_hdim64_e4m3_gqa32_sm90.cu",
// "flash_fwd_hdim128_e4m3_gqa2_sm90.cu",
// "flash_fwd_hdim128_e4m3_gqa4_sm90.cu",
// "flash_fwd_hdim128_e4m3_gqa8_sm90.cu",
// "flash_fwd_hdim128_e4m3_gqa16_sm90.cu",
// "flash_fwd_hdim128_e4m3_gqa32_sm90.cu",
// "flash_fwd_hdim256_e4m3_gqa2_sm90.cu",
// "flash_fwd_hdim256_e4m3_gqa4_sm90.cu",
// "flash_fwd_hdim256_e4m3_gqa8_sm90.cu",
// "flash_fwd_hdim256_e4m3_gqa16_sm90.cu",
// "flash_fwd_hdim256_e4m3_gqa32_sm90.cu",
];
const CUTLASS_COMMIT: &str = "4c42f73fdab5787e3bb57717f35a8cb1b3c0dc6d";
fn main() -> Result<()> {
// Use RAYON_NUM_THREADS or else default to the number of physical CPUs
let num_cpus = std::env::var("RAYON_NUM_THREADS").map_or_else(
|_| num_cpus::get_physical(),
|s| usize::from_str(&s).unwrap_or_else(|_| num_cpus::get_physical()),
);
// limit to 16 cpus to not use to much ram on large servers
let num_cpus = num_cpus.min(16);
rayon::ThreadPoolBuilder::new()
.num_threads(num_cpus)
.build_global()
.unwrap();
// Telling Cargo that if any of these files changes, rebuild.
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed=CUDA_COMPUTE_CAP");
println!("cargo:rerun-if-env-changed=CANDLE_NVCC_CCBIN");
for file in KERNEL_FILES {
println!("cargo:rerun-if-changed=hkernel/{file}");
}
println!("cargo:rerun-if-changed=kernels/**.h");
println!("cargo:rerun-if-changed=kernels/**.hpp");
println!("cargo:rerun-if-changed=kernels/**.cpp");
let out_dir = PathBuf::from(std::env::var("OUT_DIR").context("OUT_DIR not set")?);
// You can optionally allow an environment variable to cache the compiled artifacts.
// If not found, we compile into the standard OUT_DIR.
let build_dir = match std::env::var("CANDLE_FLASH_ATTN_BUILD_DIR") {
Err(_) => out_dir.clone(),
Ok(build_dir) => {
let path = PathBuf::from(build_dir);
path.canonicalize().map_err(|_| {
anyhow!(
"Directory doesn't exist: {} (the current directory is {})",
path.display(),
std::env::current_dir().unwrap().display()
)
})?
}
};
// Ensure we set CUDA_INCLUDE_DIR for our crates that might rely on it.
// Fetch cutlass headers on-demand
let cutlass_dir = fetch_cutlass(&out_dir, CUTLASS_COMMIT)?;
let cutlass_include: &'static str = Box::leak(cutlass_include_arg(&cutlass_dir).into_boxed_str());
set_cuda_include_dir()?;
// If set, pass along the custom compiler for NVCC
let ccbin_env = std::env::var("CANDLE_NVCC_CCBIN").ok();
// Determine the GPU architecture we’re targeting, e.g. 90 for `sm_90`.
let compute_cap = compute_cap()?;
// assert compute cap is sm90
assert!(compute_cap == 90, "Compute capability must be 90 (90a)");
// Our final library name
let out_file = build_dir.join("libflashattentionv3.a");
// Construct the list of (input_file -> output_object_file)
let kernel_dir = PathBuf::from("hkernel");
let cu_files: Vec<(PathBuf, PathBuf)> = KERNEL_FILES
.iter()
.map(|f| {
let mut obj_file = out_dir.join(f);
obj_file.set_extension("o");
(kernel_dir.join(f), obj_file)
})
.collect();
// Decide whether to skip recompile if outputs are up to date.
// This is a simplistic approach,
// so feel free to refine if you need more robust up-to-date checks.
let out_modified = out_file
.metadata()
.and_then(|m| m.modified())
.ok()
.unwrap_or_else(|| std::time::SystemTime::UNIX_EPOCH);
let should_compile = !out_file.exists()
|| cu_files.iter().any(|(input, _)| {
let input_modified = input
.metadata()
.and_then(|m| m.modified())
.unwrap_or(std::time::SystemTime::UNIX_EPOCH);
input_modified.duration_since(out_modified).is_ok() // True if input_modified >= out_modified
});
if should_compile {
// 1) Compile each .cu/.cpp -> .o
cu_files
.par_iter()
.try_for_each(|(input, obj)| -> Result<()> {
let mut command = std::process::Command::new("nvcc");
// Optimization and standard
command.arg("-O3");
command.arg("-std=c++17");
// GPU architecture, hard code sm_90a instead of sm90
command.arg(format!("--gpu-architecture={}", "sm_90a"));
// Compile to object file
command.arg("-c");
command.args(["-o", obj.to_str().unwrap()]);
// Default stream per-thread
command.args(["--default-stream", "per-thread"]);
// Include path
command.arg(&cutlass_include);
// Undefine CUDA “no half/bfloat” macros
command.arg("-U__CUDA_NO_HALF_OPERATORS__");
command.arg("-U__CUDA_NO_HALF_CONVERSIONS__");
command.arg("-U__CUDA_NO_BFLOAT16_OPERATORS__");
command.arg("-U__CUDA_NO_BFLOAT16_CONVERSIONS__");
command.arg("-U__CUDA_NO_BFLOAT162_OPERATORS__");
command.arg("-U__CUDA_NO_BFLOAT162_CONVERSIONS__");
// Enable relaxed/extended lambda and fast math
command.arg("--expt-relaxed-constexpr");
command.arg("--expt-extended-lambda");
command.arg("--use_fast_math");
// PTXAS options: verbose output, register usage info, etc.
command.arg("--ptxas-options=-v");
command.arg("--ptxas-options=--verbose,--register-usage-level=10,--warn-on-local-memory-usage");
// Additional debug/performance flags
command.arg("-lineinfo");
command.arg("-DCUTLASS_DEBUG_TRACE_LEVEL=0");
command.arg("-DNDEBUG");
// https://github.com/EricLBuehler/mistral.rs/issues/941
command.arg("-D_USE_MATH_DEFINES");
if let Some(ccbin_path) = &ccbin_env {
command.arg("-allow-unsupported-compiler");
command.args(["-ccbin", ccbin_path]);
}
// Add the source file
command.arg(input);
// https://github.com/EricLBuehler/mistral.rs/issues/286
if let Some(cuda_nvcc_flags_env) = CUDA_NVCC_FLAGS {
command.arg("--compiler-options");
command.arg(cuda_nvcc_flags_env);
}
let output = command
.spawn()
.with_context(|| format!("Failed to spawn nvcc for {input:?}"))?
.wait_with_output()
.with_context(|| format!("Failed during nvcc invocation for {input:?}"))?;
if !output.status.success() {
return Err(anyhow!(
"nvcc error:\nCommand: {:?}\nstdout:\n{}\nstderr:\n{}",
command,
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
));
}
Ok(())
})?;
// 2) Create static library from the .o files
let obj_files = cu_files
.iter()
.map(|(_, obj)| obj.clone())
.collect::<Vec<_>>();
let mut command = std::process::Command::new("nvcc");
command.arg("--lib");
command.args(["-o", out_file.to_str().unwrap()]);
command.args(obj_files);
let output = command
.spawn()
.context("Failed spawning nvcc to archive .o files")?
.wait_with_output()
.context("Failed during nvcc archive step")?;
if !output.status.success() {
return Err(anyhow!(
"nvcc error (archiving):\nCommand: {:?}\nstdout:\n{}\nstderr:\n{}",
command,
String::from_utf8_lossy(&output.stdout),
String::from_utf8_lossy(&output.stderr)
));
}
}
// Finally, instruct cargo to link your library
println!("cargo:rustc-link-search={}", build_dir.display());
println!("cargo:rustc-link-lib=static=flashattentionv3");
// Link required system libs
println!("cargo:rustc-link-lib=dylib=cudart");
println!("cargo:rustc-link-lib=dylib=stdc++");
Ok(())
}
/// This function attempts to find a CUDA toolkit root that contains `include/cuda.h`,
/// and prints that path as `CUDA_INCLUDE_DIR`.
fn set_cuda_include_dir() -> Result<()> {
// Adapted from cudarc build.rs
let env_vars = [
"CUDA_PATH",
"CUDA_ROOT",
"CUDA_TOOLKIT_ROOT_DIR",
"CUDNN_LIB",
];
let env_vars = env_vars
.into_iter()
.filter_map(|v| std::env::var(v).ok())
.map(Into::<PathBuf>::into);
let common_roots = [
"/usr",
"/usr/local/cuda",
"/opt/cuda",
"/usr/lib/cuda",
"C:/Program Files/NVIDIA GPU Computing Toolkit",
"C:/CUDA",
];
let candidates = env_vars.chain(common_roots.into_iter().map(Into::into));
let root = candidates
.filter(|path| path.join("include").join("cuda.h").is_file())
.next()
.ok_or_else(|| anyhow!("Cannot find a valid CUDA root with include/cuda.h"))?;
println!(
"cargo:rustc-env=CUDA_INCLUDE_DIR={}",
root.join("include").display()
);
Ok(())
}
/// Determine the compute capability we should target.
/// If the user sets `CUDA_COMPUTE_CAP` we trust that.
/// Otherwise, we attempt to parse it from `nvidia-smi`.
fn compute_cap() -> Result<usize> {
if let Ok(compute_cap_str) = std::env::var("CUDA_COMPUTE_CAP") {
let cc = compute_cap_str
.parse::<usize>()
.context("Failed to parse CUDA_COMPUTE_CAP")?;
Ok(cc)
} else {
// parse from nvidia-smi
let output = std::process::Command::new("nvidia-smi")
.args(["--query-gpu=compute_cap", "--format=csv"])
.output()
.context("Failed to run nvidia-smi. Make sure it's in PATH.")?;
let stdout = String::from_utf8_lossy(&output.stdout);
let mut lines = stdout.lines();
if lines.next().unwrap_or("") != "compute_cap" {
return Err(anyhow!("Unexpected output from nvidia-smi: {stdout}"));
}
if let Some(cap_line) = lines.next() {
// e.g. "9.0" -> "90"
let cc_str = cap_line.trim().replace('.', "");
let cc = cc_str.parse::<usize>()?;
Ok(cc)
} else {
Err(anyhow!("nvidia-smi did not return a compute_cap line"))
}
}
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn-v3/src/lib.rs | candle-flash-attn-v3/src/lib.rs | // SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright (c) 2024 Michael Feil
// 2025 adjusted by Eric Buehler for candle repo.
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod ffi;
use candle::backend::BackendStorage;
use candle::cuda_backend::cudarc::driver::DevicePtr;
use candle::{CpuStorage, DType, Layout, Result, Shape, Tensor};
use half::{bf16, f16};
fn round_multiple(x: usize, m: usize) -> usize {
(x + m - 1) / m * m
}
pub struct FlashAttn {
pub softmax_scale: f32,
pub alibi_slopes: Option<Tensor>,
pub window_size_left: Option<usize>,
pub window_size_right: Option<usize>,
pub use_gqa_packing: bool,
}
impl FlashAttn {
fn cuda_fwd_t<
T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr,
>(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
is_bf16: bool,
) -> Result<(candle::CudaStorage, Shape)> {
// https://github.com/Dao-AILab/flash-attention/blob/0dfb28174333d9eefb7c1dd4292690a8458d1e89/hopper/flash_api.cpp
let dev = q.device();
let out_shape = q_l.shape().clone();
let out_l = Layout::contiguous(&out_shape);
let q = q.as_cuda_slice::<T>()?;
let k = k.as_cuda_slice::<T>()?;
let v = v.as_cuda_slice::<T>()?;
let q = q.slice(q_l.start_offset()..);
let k = k.slice(k_l.start_offset()..);
let v = v.slice(v_l.start_offset()..);
let q_stride = q_l.stride();
let k_stride = k_l.stride();
let v_stride = v_l.stride();
let o_stride = out_l.stride();
let q_rank = q_stride.len();
let k_rank = k_stride.len();
let v_rank = v_stride.len();
let o_rank = o_stride.len();
if q_rank != 4 || k_rank != 4 || v_rank != 4 {
candle::bail!(
"flash-attn-v3 expects input tensors of rank 4 (q: {q_rank}, k: {k_rank}, v: {v_rank}"
)
}
if q_stride[q_rank - 1] != 1 {
candle::bail!("the last dim of q must be contiguous {q_stride:?}")
}
if k_stride[k_rank - 1] != 1 {
candle::bail!("the last dim of k must be contiguous {k_stride:?}")
}
if v_stride[v_rank - 1] != 1 {
candle::bail!("the last dim of v must be contiguous {v_stride:?}")
}
let (b_sz, seqlen_q, num_heads, head_size_og) = q_l.shape().dims4()?;
let (_b_sz, seqlen_k, num_heads_k, _head_size_og) = k_l.shape().dims4()?;
let expected_kv = (b_sz, seqlen_k, num_heads_k, head_size_og);
if expected_kv != k_l.shape().dims4()? {
candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape())
}
if expected_kv != v_l.shape().dims4()? {
candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape())
}
if head_size_og > 256 {
candle::bail!("only supports head dimension at most 256 (got {head_size_og})")
}
if !(head_size_og == 256 || head_size_og == 128 || head_size_og == 64) {
candle::bail!("only supports head dimension 64, 128 and 256 (got {head_size_og})")
}
if head_size_og % 8 != 0 {
// TODO: Handle head sizes that are not a multiple of 8 via some padding.
candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})")
}
if num_heads % num_heads_k != 0 {
candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}")
}
let use_gqa_packing = match num_heads_k / num_heads {
2 | 4 | 8 | 16 | 32 => self.use_gqa_packing as i32,
_ => 0,
};
let stream = dev.cuda_stream();
let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes {
if alibi_slopes.dtype() != DType::F32 {
candle::bail!(
"DType mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes.dtype(),
DType::F32
);
}
let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout();
if num_heads != alibi_slopes_layout.shape().dims1()? {
candle::bail!(
"shape mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes_layout.shape(),
(num_heads)
);
}
let alibi_slopes = match &*alibi_slopes {
candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?,
_ => candle::bail!("alibi_slopes must be a cuda tensor"),
};
let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..);
// Dropping the guard here doesn't seem very safe.
let (ptr, _guard) = alibi_slopes.device_ptr(&stream);
ptr as *const core::ffi::c_void
} else {
std::ptr::null()
};
// if window_size_left > self.max_seqlen_k or None => -1
let mut window_size_left = self
.window_size_left
.filter(|v| v <= &seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
// if window_size_right > self.max_seqlen_k or None => -1
let mut window_size_right = self
.window_size_right
.filter(|v| v <= &seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
let head_size = round_multiple(head_size_og, 8);
let head_size_rounded = round_multiple(head_size, 32);
let seqlen_q_rounded = round_multiple(seqlen_q, 128);
let seqlen_k_rounded = round_multiple(seqlen_k, 128);
let elem_count = out_shape.elem_count();
let dst = unsafe { dev.alloc::<T>(elem_count) }?;
let softmax_lse = dev.alloc_zeros::<f32>(b_sz * 128 * num_heads * seqlen_q)?;
let is_bf16 = if is_bf16 { 1 } else { 0 };
// Causal is the special case where window_size_right == 0 and window_size_left < 0.
// Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
let is_causal = if window_size_left < 0 && window_size_right == 0 {
1
} else {
0
};
if window_size_left < 0 && window_size_right >= 0 {
window_size_left = seqlen_k as i32;
}
if window_size_left >= 0 && window_size_right < 0 {
window_size_right = seqlen_k as i32;
}
unsafe {
let (q_ptr, _guard) = q.device_ptr(&stream);
let (k_ptr, _guard) = k.device_ptr(&stream);
let (v_ptr, _guard) = v.device_ptr(&stream);
let (dst_ptr, _guard) = dst.device_ptr(&stream);
let (softmax_lse_ptr, _guard) = softmax_lse.device_ptr(&stream);
ffi::run_mha(
q_ptr as *const core::ffi::c_void,
k_ptr as *const core::ffi::c_void,
v_ptr as *const core::ffi::c_void,
dst_ptr as *const core::ffi::c_void,
softmax_lse_ptr as *const core::ffi::c_void,
/* alibi_slopes_ptr */ alibi_slopes_ptr,
/* cu_seqlens_q_ptr */ std::ptr::null(),
/* cu_seqlens_k_ptr */ std::ptr::null(),
/* q_batch_stride */ q_stride[0] as u32,
/* k_batch_stride */ k_stride[0] as u32,
/* v_batch_stride */ v_stride[0] as u32,
/* o_batch_stride */ o_stride[0] as u32,
/* alibi_slopes_batch_stride */ 0,
/* q_row_stride */ q_stride[q_rank - 3] as u32,
/* k_row_stride */ k_stride[k_rank - 3] as u32,
/* v_row_stride */ v_stride[v_rank - 3] as u32,
/* o_row_stride */ o_stride[o_rank - 3] as u32,
/* q_head_stride */ q_stride[q_rank - 2] as u32,
/* k_head_stride */ k_stride[k_rank - 2] as u32,
/* v_head_stride */ v_stride[v_rank - 2] as u32,
/* o_head_stride */ o_stride[o_rank - 2] as u32,
/* b */ b_sz as u32,
/* h */ num_heads as u32,
/* h_k */ num_heads_k as u32,
/* d */ head_size as u32,
/* d_rounded */ head_size_rounded as u32,
/* softmax_scale*/ self.softmax_scale,
/* seqlen_q */ seqlen_q as u32,
/* seqlen_k */ seqlen_k as u32,
/* seqlen_q_rounded */ seqlen_q_rounded as u32,
/* seqlen_k_rounded */ seqlen_k_rounded as u32,
/* is_bf16 */ is_bf16,
/* is_causal */ is_causal,
/* unpadded_lse */ 0,
/* use_gqa_packing */ use_gqa_packing,
/* window_size_left */ window_size_left,
/* window_size_right */ window_size_right,
/* total_q, dummy */ 0u32,
/* total_k, dummy */ 0u32,
)
}
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone());
Ok((dst, out_shape))
}
}
impl candle::CustomOp3 for FlashAttn {
fn name(&self) -> &'static str {
"flash-attn-v3"
}
fn cpu_fwd(
&self,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
) -> Result<(CpuStorage, Shape)> {
candle::bail!("no cpu support for flash-attn-v3")
}
fn cuda_fwd(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
match q.dtype() {
candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false),
candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true),
dt => candle::bail!("flash-attn-v3 is only supported for f16/bf16 ({dt:?})"),
}
}
}
/// Flash-attention v3 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `use_gqa_packing` - enables dedicated kernels for GQA packing if head sizes are compatible.
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
causal: bool,
use_gqa_packing: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttn {
softmax_scale,
alibi_slopes: None,
window_size_left,
window_size_right,
use_gqa_packing,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v3 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
/// * `use_gqa_packing` - enables dedicated kernels for GQA packing if head sizes are compatible.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
use_gqa_packing: bool,
) -> Result<Tensor> {
let op = FlashAttn {
softmax_scale,
alibi_slopes: None,
window_size_left,
window_size_right,
use_gqa_packing,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v3 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
/// * `use_gqa_packing` - enables dedicated kernels for GQA packing if head sizes are compatible.
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_alibi(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
softmax_scale: f32,
causal: bool,
use_gqa_packing: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttn {
softmax_scale,
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
use_gqa_packing,
};
q.apply_op3(k, v, op)
}
/// Flash-attention v3 layer.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(batch, seq_len_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(batch, seq_len_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
/// * `use_gqa_packing` - enables dedicated kernels for GQA packing if head sizes are compatible.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
///
/// The resulting tensor has dimensions `(batch, seq_len_q, num_heads_q, head_size)`.
pub fn flash_attn_alibi_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
alibi_slopes: &Tensor,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
use_gqa_packing: bool,
) -> Result<Tensor> {
let op = FlashAttn {
softmax_scale,
alibi_slopes: Some(alibi_slopes.clone()),
window_size_left,
window_size_right,
use_gqa_packing,
};
q.apply_op3(k, v, op)
}
struct FlashAttnVarLen {
pub softmax_scale: f32,
pub max_seqlen_q: usize,
pub max_seqlen_k: usize,
pub seqlens_q: Tensor,
pub seqlens_k: Tensor,
pub alibi_slopes: Option<Tensor>,
pub window_size_left: Option<usize>,
pub window_size_right: Option<usize>,
pub use_gqa_packing: bool,
}
impl FlashAttnVarLen {
fn cuda_fwd_t<
T: candle::cuda_backend::CudaDType + candle::cuda_backend::cudarc::driver::DeviceRepr,
>(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
is_bf16: bool,
) -> Result<(candle::CudaStorage, Shape)> {
// https://github.com/Dao-AILab/flash-attention/blob/0dfb28174333d9eefb7c1dd4292690a8458d1e89/hopper/flash_api.cpp
let dev = q.device();
let out_shape = q_l.shape().clone();
let out_l = Layout::contiguous(&out_shape);
let (seqlens_q, seqlens_q_layout) = self.seqlens_q.storage_and_layout();
let seqlens_q = match &*seqlens_q {
candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32!
_ => candle::bail!("seqlens_q must be a cuda tensor"),
};
let seqlens_q = match seqlens_q_layout.contiguous_offsets() {
Some((o1, o2)) => seqlens_q.slice(o1..o2),
None => candle::bail!("seqlens_q has to be contiguous"),
};
let (seqlens_k, seqlens_k_layout) = self.seqlens_k.storage_and_layout();
let seqlens_k = match &*seqlens_k {
candle::Storage::Cuda(c) => c.as_cuda_slice::<u32>()?, // Should be i32!
_ => candle::bail!("seqlens_k must be a cuda tensor"),
};
let seqlens_k = match seqlens_k_layout.contiguous_offsets() {
Some((o1, o2)) => seqlens_k.slice(o1..o2),
None => candle::bail!("seqlens_k has to be contiguous"),
};
let q = q.as_cuda_slice::<T>()?;
let k = k.as_cuda_slice::<T>()?;
let v = v.as_cuda_slice::<T>()?;
let q = q.slice(q_l.start_offset()..);
let k = k.slice(k_l.start_offset()..);
let v = v.slice(v_l.start_offset()..);
let q_stride = q_l.stride();
let k_stride = k_l.stride();
let v_stride = v_l.stride();
let o_stride = out_l.stride();
let q_rank = q_stride.len();
let k_rank = k_stride.len();
let v_rank = v_stride.len();
let o_rank = o_stride.len();
if q_rank != 3 || k_rank != 3 || v_rank != 3 {
candle::bail!(
"flash-attn-v3-varlen expects input tensors of rank 3 (q: {q_rank}, k: {k_rank}, v: {v_rank}"
)
}
if q_stride[q_rank - 1] != 1 {
candle::bail!("the last dim of q must be contiguous {q_stride:?}")
}
if k_stride[k_rank - 1] != 1 {
candle::bail!("the last dim of k must be contiguous {k_stride:?}")
}
if v_stride[v_rank - 1] != 1 {
candle::bail!("the last dim of v must be contiguous {v_stride:?}")
}
let (total_q, num_heads, head_size_og) = q_l.shape().dims3()?;
let (total_k, num_heads_k, _head_size_og) = k_l.shape().dims3()?;
let expected_kv = (total_k, num_heads_k, head_size_og);
if expected_kv != k_l.shape().dims3()? {
candle::bail!("shape mismatch q {:?} and k {:?}", q_l.shape(), k_l.shape())
}
if expected_kv != v_l.shape().dims3()? {
candle::bail!("shape mismatch q {:?} and v {:?}", q_l.shape(), v_l.shape())
}
if head_size_og > 256 {
candle::bail!("only supports head dimension at most 256 (got {head_size_og})")
}
if !(head_size_og == 256 || head_size_og == 128 || head_size_og == 64) {
candle::bail!("only supports head dimension 64, 128 and 256 (got {head_size_og})")
}
if head_size_og % 8 != 0 {
// TODO: Handle head sizes that are not a multiple of 8 via some padding.
candle::bail!("only supports head sizes that are a multiple of 8 (got {head_size_og})")
}
if num_heads % num_heads_k != 0 {
candle::bail!("number of k/v heads {num_heads_k} must divide number of heads in query {num_heads}")
}
let use_gqa_packing = match num_heads_k / num_heads {
2 | 4 | 8 | 16 | 32 => self.use_gqa_packing as i32,
_ => 0,
};
let nseqlens_q = seqlens_q_layout.shape().dims1()?;
if nseqlens_q < 2 {
candle::bail!("seqlens_q should have a len >= 2 {nseqlens_q}")
}
let nseqlens_k = seqlens_k_layout.shape().dims1()?;
if nseqlens_k != nseqlens_q {
candle::bail!("seqlens_q and seqlens_k should have the same number of elements {nseqlens_q} <> {nseqlens_k}")
}
let batch_size = nseqlens_q - 1;
let stream = dev.cuda_stream();
let alibi_slopes_ptr = if let Some(alibi_slopes) = &self.alibi_slopes {
if alibi_slopes.dtype() != DType::F32 {
candle::bail!(
"DType mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes.dtype(),
DType::F32
);
}
let (alibi_slopes, alibi_slopes_layout) = alibi_slopes.storage_and_layout();
if num_heads != alibi_slopes_layout.shape().dims1()? {
candle::bail!(
"shape mismatch alibi_slopes {:?}, expected {:?}",
alibi_slopes_layout.shape(),
(num_heads)
);
}
let alibi_slopes = match &*alibi_slopes {
candle::Storage::Cuda(c) => c.as_cuda_slice::<f32>()?,
_ => candle::bail!("alibi_slopes must be a cuda tensor"),
};
let alibi_slopes = alibi_slopes.slice(alibi_slopes_layout.start_offset()..);
// Dropping the guard here doesn't seem very safe.
let (ptr, _guard) = alibi_slopes.device_ptr(&stream);
ptr as *const core::ffi::c_void
} else {
std::ptr::null()
};
// if window_size_left > self.max_seqlen_k or None => -1
let mut window_size_left = self
.window_size_left
.filter(|v| v <= &self.max_seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
if window_size_left < self.max_seqlen_k as i32 {
window_size_left = self.max_seqlen_k.clone() as i32;
}
// if window_size_right > self.max_seqlen_k or None => -1
let mut window_size_right = self
.window_size_right
.filter(|v| v <= &self.max_seqlen_k)
.map(|v| v as i32)
.unwrap_or(-1);
if window_size_right < self.max_seqlen_k as i32 {
window_size_right = self.max_seqlen_k.clone() as i32;
}
let head_size = round_multiple(head_size_og, 8);
let head_size_rounded = round_multiple(head_size, 32);
let seqlen_q_rounded = round_multiple(self.max_seqlen_q, 128);
let seqlen_k_rounded = round_multiple(self.max_seqlen_k, 128);
let elem_count = out_shape.elem_count();
let dst = unsafe { dev.alloc::<T>(elem_count) }?;
let softmax_lse = dev.alloc_zeros::<f32>(num_heads * total_q)?;
let is_bf16 = if is_bf16 { 1 } else { 0 };
// Causal is the special case where window_size_right == 0 and window_size_left < 0.
// Local is the more general case where window_size_right >= 0 or window_size_left >= 0.
let is_causal = if window_size_left < 0 && window_size_right == 0 {
1
} else {
0
};
if window_size_left < 0 && window_size_right >= 0 {
window_size_left = self.max_seqlen_k as i32;
}
if window_size_left >= 0 && window_size_right < 0 {
window_size_right = self.max_seqlen_k as i32;
}
unsafe {
let (q_ptr, _guard) = q.device_ptr(&stream);
let (k_ptr, _guard) = k.device_ptr(&stream);
let (v_ptr, _guard) = v.device_ptr(&stream);
let (dst_ptr, _guard) = dst.device_ptr(&stream);
let (softmax_lse_ptr, _guard) = softmax_lse.device_ptr(&stream);
let (seqlens_q_ptr, _guard) = seqlens_q.device_ptr(&stream);
let (seqlens_k_ptr, _guard) = seqlens_k.device_ptr(&stream);
ffi::run_mha(
q_ptr as *const core::ffi::c_void,
k_ptr as *const core::ffi::c_void,
v_ptr as *const core::ffi::c_void,
dst_ptr as *const core::ffi::c_void,
softmax_lse_ptr as *const core::ffi::c_void,
/* alibi_slopes_ptr */ alibi_slopes_ptr,
/* cu_seqlens_q_ptr */ seqlens_q_ptr as *const i32,
/* cu_seqlens_k_ptr */ seqlens_k_ptr as *const i32,
/* q_batch_stride */ 0,
/* k_batch_stride */ 0,
/* v_batch_stride */ 0,
/* o_batch_stride */ 0,
/* alibi_slopes_batch_stride */ 0,
/* q_row_stride */ q_stride[q_rank - 3] as u32,
/* k_row_stride */ k_stride[k_rank - 3] as u32,
/* v_row_stride */ v_stride[v_rank - 3] as u32,
/* o_row_stride */ o_stride[o_rank - 3] as u32,
/* q_head_stride */ q_stride[q_rank - 2] as u32,
/* k_head_stride */ k_stride[k_rank - 2] as u32,
/* v_head_stride */ v_stride[v_rank - 2] as u32,
/* o_head_stride */ o_stride[o_rank - 2] as u32,
/* b */ batch_size as u32,
/* h */ num_heads as u32,
/* h_k */ num_heads_k as u32,
/* d */ head_size as u32,
/* d_rounded */ head_size_rounded as u32,
/* softmax_scale*/ self.softmax_scale,
/* seqlen_q */ self.max_seqlen_q as u32,
/* seqlen_k */ self.max_seqlen_k as u32,
/* seqlen_q_rounded */ seqlen_q_rounded as u32,
/* seqlen_k_rounded */ seqlen_k_rounded as u32,
/* is_bf16 */ is_bf16,
/* is_causal */ is_causal,
/* unpadded_lse */ 1,
/* use_gqa_packing */ use_gqa_packing,
/* window_size_left */ window_size_left,
/* window_size_right */ window_size_right,
/* total_q */ total_q as u32,
/* total_k */ total_k as u32,
)
}
let dst = candle::CudaStorage::wrap_cuda_slice(dst, dev.clone());
Ok((dst, out_shape))
}
}
impl candle::CustomOp3 for FlashAttnVarLen {
fn name(&self) -> &'static str {
"flash-attn-v3-varlen"
}
fn cpu_fwd(
&self,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
_: &CpuStorage,
_: &Layout,
) -> Result<(CpuStorage, Shape)> {
candle::bail!("no cpu support for flash-attn-v3")
}
fn cuda_fwd(
&self,
q: &candle::CudaStorage,
q_l: &Layout,
k: &candle::CudaStorage,
k_l: &Layout,
v: &candle::CudaStorage,
v_l: &Layout,
) -> Result<(candle::CudaStorage, Shape)> {
match q.dtype() {
candle::DType::F16 => self.cuda_fwd_t::<f16>(q, q_l, k, k_l, v, v_l, false),
candle::DType::BF16 => self.cuda_fwd_t::<bf16>(q, q_l, k, k_l, v, v_l, true),
dt => candle::bail!("flash-attn-v3 is only supported for f16/bf16 ({dt:?})"),
}
}
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v3 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
/// * `use_gqa_packing` - enables dedicated kernels for GQA packing if head sizes are compatible.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
pub fn flash_attn_varlen(
q: &Tensor,
k: &Tensor,
v: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
causal: bool,
use_gqa_packing: bool,
) -> Result<Tensor> {
let window_size_left = None;
let window_size_right = if causal { Some(0) } else { None };
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: None,
window_size_left,
window_size_right,
use_gqa_packing,
};
q.apply_op3(k, v, op)
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v3 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `seqlens_q` - The cumulative lengths of the sequences in the batch, used to index in q.
/// * `seqlens_k` - The cumulative lengths of the sequences in the batch, used to index in k and v.
/// * `max_seqlen_q` - The maximum query sequence length for q in the batch.
/// * `max_seqlen_k` - The maximum query sequence length for k and v in the batch.
/// * `window_size_left` - Limit left attention to value tokens.
/// * `window_size_right` - Limit right attention to value tokens.
/// * `use_gqa_packing` - enables dedicated kernels for GQA packing if head sizes are compatible.
///
/// `seqlens_q` and `seqlens_k` contain `batch_size + 1` elements, typically `0`, `seqlen_1`,
/// `seqlen_1 + seqlen_2`, etc.
///
/// The resulting tensor has dimensions `(total_q, num_heads_q, head_size)`.
///
/// # Causal mask
///
/// `window_size_left=None` with `window_size_right=Some(0)` applies a causal mask to the result
/// of `Q @ K^T`
pub fn flash_attn_varlen_windowed(
q: &Tensor,
k: &Tensor,
v: &Tensor,
seqlens_q: &Tensor,
seqlens_k: &Tensor,
max_seqlen_q: usize,
max_seqlen_k: usize,
softmax_scale: f32,
window_size_left: Option<usize>,
window_size_right: Option<usize>,
use_gqa_packing: bool,
) -> Result<Tensor> {
let op = FlashAttnVarLen {
softmax_scale,
max_seqlen_q,
max_seqlen_k,
seqlens_q: seqlens_q.clone(),
seqlens_k: seqlens_k.clone(),
alibi_slopes: None,
window_size_left,
window_size_right,
use_gqa_packing,
};
q.apply_op3(k, v, op)
}
#[allow(clippy::too_many_arguments)]
/// Flash-attention v3 layer with variable-length batching.
///
/// This implements scaled dot-product attention, `softmax(Q @ K^T . softmax_scale) @ V`.
/// Multi-query and grouped-query attention are supported by using tensors k and v with fewer heads
/// than q, the number of heads in k and v has to be divisible by the number of heads in q.
///
/// # Arguments
///
/// * `q` - Query tensor with shape `(total_q, num_heads_q, head_size)`.
/// * `k` - Key tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `v` - Value tensor with shape `(total_kv, num_heads_kv, head_size)`.
/// * `alibi_slopes` - Alibi slopes tensor with shape `(num_heads_q)`.
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn-v3/src/ffi.rs | candle-flash-attn-v3/src/ffi.rs | // SPDX-License-Identifier: Apache-2.0 OR MIT
// Copyright (c) 2024 Michael Feil
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use core::ffi::{c_int, c_void};
extern "C" {
pub(crate) fn run_mha(
q_ptr: *const c_void,
k_ptr: *const c_void,
v_ptr: *const c_void,
o_ptr: *const c_void,
softmax_lse_ptr: *const c_void,
alibi_slopes_ptr: *const c_void,
cu_seqlens_q_ptr: *const i32,
cu_seqlens_k_ptr: *const i32,
q_batch_stride: u32,
k_batch_stride: u32,
v_batch_stride: u32,
o_batch_stride: u32,
alibi_slopes_batch_stride: u32,
q_row_stride: u32,
k_row_stride: u32,
v_row_stride: u32,
o_row_stride: u32,
q_head_stride: u32,
k_head_stride: u32,
v_head_stride: u32,
o_head_stride: u32,
b: u32,
h: u32,
h_k: u32,
d: u32,
d_rounded: u32,
softmax_scale: f32,
seqlen_q: u32,
seqlen_k: u32,
seqlen_q_rounded: u32,
seqlen_k_rounded: u32,
is_bf16: c_int,
is_causal: c_int,
unpadded_lse: c_int,
use_gqa_packing: c_int,
window_size_left: c_int,
window_size_right: c_int,
total_q: u32,
total_k: u32,
);
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-flash-attn-v3/tests/flash_attn_tests.rs | candle-flash-attn-v3/tests/flash_attn_tests.rs | use anyhow::Result;
use candle_flash_attn_v3;
use candle::{DType, Device, IndexOp, Tensor, D};
use rstest::rstest;
fn to_vec3_round(t: Tensor, digits: i32) -> Result<Vec<Vec<Vec<f32>>>> {
let b = 10f32.powi(digits);
let t = t.to_vec3::<f32>()?;
let t = t
.iter()
.map(|t| {
t.iter()
.map(|t| t.iter().map(|t| f32::round(t * b) / b).collect())
.collect()
})
.collect();
Ok(t)
}
fn fa_acausal(q: &Tensor, k: &Tensor, v: &Tensor, softmax_scale: f32) -> Result<Tensor> {
let in_dtype = q.dtype();
let q = q.to_dtype(DType::F32)?;
let k = k.to_dtype(DType::F32)?;
let v = v.to_dtype(DType::F32)?;
let att = (q.matmul(&k.t()?)? * softmax_scale as f64)?;
let att = candle_nn::ops::softmax(&att, D::Minus1)?;
// Convert to contiguous as matmul doesn't support strided vs for now.
let output = att.matmul(&v.contiguous()?)?.to_dtype(in_dtype)?;
Ok(output)
}
#[test]
fn flash_attn_acausal() -> Result<()> {
let device = Device::new_cuda(0)?;
let q = Tensor::arange(0u32, 3 * 2 * 64, &device)?
.to_dtype(DType::F16)?
.reshape((1, 3, 2, 64))?;
let k = (&q / 400.)?;
let v = (&q / 500.)?;
let q = (&q / 300.)?;
let ys1 = fa_acausal(&q, &k, &v, 0.5)?;
let ys1 = ys1.i(0)?.to_dtype(DType::F32)?;
let ys2 = {
let q = q.transpose(1, 2)?;
let k = k.transpose(1, 2)?;
let v = v.transpose(1, 2)?;
candle_flash_attn_v3::flash_attn(&q, &k, &v, 0.5, false, false)?.transpose(1, 2)?
};
let ys2 = ys2.i(0)?.to_dtype(DType::F32)?;
let diff = ys1.sub(&ys2)?.abs()?.flatten_all()?.max(0)?;
assert_eq!(ys2.dims(), &[3, 2, 64]);
assert_eq!(
to_vec3_round(ys2, 4)?,
&[
[
[
0.0808, 0.0828, 0.0848, 0.0869, 0.0889, 0.0908, 0.0928, 0.0948, 0.0969, 0.0989,
0.1008, 0.1028, 0.1049, 0.1069, 0.1088, 0.1108, 0.1129, 0.1149, 0.1168, 0.1188,
0.1208, 0.1229, 0.1249, 0.1268, 0.1288, 0.1309, 0.1328, 0.1349, 0.1368, 0.1388,
0.1409, 0.1428, 0.1449, 0.1469, 0.1488, 0.1509, 0.1528, 0.1548, 0.1569, 0.1588,
0.1609, 0.1628, 0.1648, 0.1669, 0.1688, 0.1709, 0.1729, 0.1748, 0.1769, 0.1788,
0.1809, 0.1829, 0.1848, 0.1869, 0.1888, 0.1908, 0.1929, 0.1948, 0.1969, 0.1989,
0.2008, 0.2029, 0.205, 0.2069
],
[
0.1071, 0.1091, 0.1111, 0.113, 0.1151, 0.1171, 0.1191, 0.1211, 0.123, 0.1251,
0.1271, 0.129, 0.1311, 0.1331, 0.135, 0.1371, 0.139, 0.1411, 0.1431, 0.145,
0.1471, 0.149, 0.1511, 0.1531, 0.155, 0.1571, 0.1591, 0.1611, 0.1631, 0.165,
0.1671, 0.1691, 0.1711, 0.1731, 0.175, 0.1771, 0.1791, 0.181, 0.1831, 0.1851,
0.1871, 0.1891, 0.191, 0.1931, 0.1951, 0.1971, 0.1991, 0.201, 0.2031, 0.2051,
0.2072, 0.2091, 0.2111, 0.2131, 0.2151, 0.217, 0.2191, 0.2211, 0.2231, 0.2251,
0.2271, 0.229, 0.2312, 0.2332
]
],
[
[
0.3765, 0.3784, 0.3804, 0.3823, 0.3843, 0.3862, 0.3884, 0.3904, 0.3923, 0.3943,
0.3962, 0.3984, 0.4004, 0.4023, 0.4043, 0.4063, 0.4084, 0.4104, 0.4124, 0.4143,
0.4163, 0.4185, 0.4204, 0.4224, 0.4243, 0.4263, 0.4285, 0.4304, 0.4324, 0.4343,
0.4363, 0.4385, 0.4404, 0.4424, 0.4443, 0.4463, 0.4485, 0.4504, 0.4524, 0.4543,
0.4563, 0.4585, 0.4604, 0.4624, 0.4644, 0.4663, 0.4683, 0.4705, 0.4724, 0.4744,
0.4763, 0.4783, 0.4805, 0.4824, 0.4844, 0.4863, 0.4883, 0.4905, 0.4922, 0.4946,
0.4966, 0.4985, 0.5005, 0.5024
],
[
0.3816, 0.3835, 0.3855, 0.3875, 0.3894, 0.3914, 0.3936, 0.3955, 0.3975, 0.3994,
0.4014, 0.4036, 0.4055, 0.4075, 0.4094, 0.4114, 0.4136, 0.4155, 0.4175, 0.4194,
0.4214, 0.4236, 0.4255, 0.4275, 0.4294, 0.4314, 0.4336, 0.4355, 0.4375, 0.4395,
0.4414, 0.4436, 0.4456, 0.4475, 0.4495, 0.4514, 0.4536, 0.4556, 0.4575, 0.4595,
0.4614, 0.4636, 0.4656, 0.4675, 0.4695, 0.4714, 0.4734, 0.4756, 0.4775, 0.4795,
0.4814, 0.4834, 0.4856, 0.4875, 0.4895, 0.4915, 0.4934, 0.4956, 0.4973, 0.4998,
0.5015, 0.5034, 0.5054, 0.5073
]
],
[
[
0.6392, 0.6411, 0.6431, 0.6455, 0.6475, 0.6494, 0.6514, 0.6533, 0.6553, 0.6572,
0.6592, 0.6611, 0.6631, 0.6655, 0.6675, 0.6694, 0.6714, 0.6733, 0.6753, 0.6772,
0.6792, 0.6812, 0.6831, 0.6851, 0.6875, 0.6895, 0.6914, 0.6934, 0.6953, 0.6973,
0.6992, 0.7012, 0.7031, 0.7051, 0.7075, 0.7095, 0.7114, 0.7134, 0.7153, 0.7173,
0.7192, 0.7212, 0.7231, 0.7251, 0.7275, 0.7295, 0.7314, 0.7334, 0.7354, 0.7373,
0.7393, 0.7412, 0.7432, 0.7451, 0.7476, 0.7495, 0.7515, 0.7534, 0.7554, 0.7573,
0.7593, 0.7612, 0.7632, 0.7651
],
[
0.6396, 0.6416, 0.6436, 0.646, 0.6479, 0.6499, 0.6519, 0.6538, 0.6558, 0.6577,
0.6597, 0.6616, 0.6636, 0.666, 0.668, 0.6699, 0.6719, 0.6738, 0.6758, 0.6777,
0.6797, 0.6816, 0.6836, 0.6855, 0.688, 0.6899, 0.6919, 0.6938, 0.6958, 0.6978,
0.6997, 0.7017, 0.7036, 0.7056, 0.708, 0.71, 0.7119, 0.7139, 0.7158, 0.7178,
0.7197, 0.7217, 0.7236, 0.7256, 0.728, 0.73, 0.7319, 0.7339, 0.7358, 0.7378,
0.7397, 0.7417, 0.7437, 0.7456, 0.748, 0.75, 0.752, 0.7539, 0.7559, 0.7578,
0.7598, 0.7617, 0.7637, 0.7656
]
]
]
);
assert!(diff.to_vec0::<f32>()?.abs() < 1e-5);
Ok(())
}
#[test]
fn flash_attn_acausal_gqa() -> Result<()> {
let device = Device::new_cuda(0)?;
let n_h = 4usize;
let n_h_k = 1usize;
let q = Tensor::arange(0u32, (n_h * 2 * 64) as u32, &device)?
.to_dtype(DType::F16)?
.reshape((1, n_h, 2, 64))?;
let gqa = q.clone().i((.., ..n_h_k, .., ..))?;
assert_eq!(gqa.dims(), &[1, n_h_k, 2, 64]);
let q = (q.clone() / 1000.)?;
let k_gqa = (&gqa / 400.)?;
let v_gqa = (&gqa / 500.)?;
// let gqa_repeat = gqa.repeat((1, (n_h / n_h_k) as usize, 1, 1))?;
// assert_eq!(gqa_repeat.dims(), &[1, n_h, 2, 64]);
// let k = (&gqa_repeat / 400.)?;
// let v = (&gqa_repeat / 500.)?;
// let ys1 = fa_acausal(&q, &k, &v, 0.5)?;
// let ys1 = ys1.i(0)?.to_dtype(DType::F32)?;
// assert_eq!(ys1.dims(), &[n_h, 2, 64]);
let ys2 = {
let q = q.transpose(1, 2)?;
let k_gqa = k_gqa.transpose(1, 2)?;
let v_gqa = v_gqa.transpose(1, 2)?;
candle_flash_attn_v3::flash_attn(&q, &k_gqa, &v_gqa, 0.125, false, true)?
.transpose(1, 2)?
};
let ys2 = ys2.i(0)?.to_dtype(DType::F32)?;
assert_eq!(ys2.dims(), &[n_h, 2, 64]);
assert_eq!(
to_vec3_round(ys2.clone(), 4)?,
&[
[
[
0.0653, 0.0673, 0.0693, 0.0713, 0.0734, 0.0753, 0.0773, 0.0793, 0.0813, 0.0834,
0.0853, 0.0873, 0.0894, 0.0913, 0.0933, 0.0953, 0.0973, 0.0994, 0.1013, 0.1033,
0.1053, 0.1073, 0.1094, 0.1113, 0.1133, 0.1154, 0.1173, 0.1194, 0.1213, 0.1233,
0.1254, 0.1273, 0.1294, 0.1313, 0.1333, 0.1354, 0.1373, 0.1393, 0.1414, 0.1433,
0.1454, 0.1473, 0.1493, 0.1514, 0.1533, 0.1554, 0.1573, 0.1593, 0.1614, 0.1633,
0.1654, 0.1674, 0.1693, 0.1714, 0.1733, 0.1753, 0.1774, 0.1793, 0.1814, 0.1833,
0.1853, 0.1874, 0.1895, 0.1914
],
[
0.0679, 0.0699, 0.072, 0.0739, 0.076, 0.0779, 0.0799, 0.082, 0.0839, 0.086,
0.088, 0.0899, 0.092, 0.0939, 0.0959, 0.098, 0.0999, 0.102, 0.1039, 0.106,
0.108, 0.1099, 0.112, 0.114, 0.1159, 0.118, 0.1199, 0.122, 0.124, 0.126,
0.1279, 0.13, 0.132, 0.134, 0.136, 0.1379, 0.14, 0.142, 0.144, 0.146, 0.1479,
0.1499, 0.152, 0.1539, 0.1559, 0.158, 0.1599, 0.162, 0.1639, 0.1659, 0.168,
0.1699, 0.172, 0.174, 0.1759, 0.178, 0.1799, 0.182, 0.184, 0.1859, 0.188,
0.1899, 0.192, 0.194
]
],
[
[
0.0706, 0.0725, 0.0746, 0.0765, 0.0786, 0.0806, 0.0825, 0.0846, 0.0865, 0.0886,
0.0906, 0.0925, 0.0946, 0.0966, 0.0985, 0.1006, 0.1025, 0.1046, 0.1066, 0.1085,
0.1106, 0.1125, 0.1146, 0.1166, 0.1185, 0.1206, 0.1226, 0.1246, 0.1266, 0.1285,
0.1306, 0.1326, 0.1346, 0.1366, 0.1385, 0.1406, 0.1426, 0.1445, 0.1466, 0.1486,
0.1506, 0.1526, 0.1545, 0.1566, 0.1586, 0.1606, 0.1626, 0.1646, 0.1666, 0.1686,
0.1707, 0.1726, 0.1746, 0.1766, 0.1786, 0.1805, 0.1826, 0.1846, 0.1866, 0.1886,
0.1906, 0.1925, 0.1947, 0.1967
],
[
0.0731, 0.0751, 0.0771, 0.0791, 0.0812, 0.0831, 0.0851, 0.0872, 0.0891, 0.0912,
0.0931, 0.0951, 0.0972, 0.0991, 0.1011, 0.1031, 0.1051, 0.1072, 0.1091, 0.1111,
0.1132, 0.1151, 0.1172, 0.1191, 0.1212, 0.1232, 0.1251, 0.1272, 0.1292, 0.1311,
0.1332, 0.1351, 0.1372, 0.1392, 0.1411, 0.1432, 0.1451, 0.1471, 0.1492, 0.1511,
0.1532, 0.1552, 0.1571, 0.1592, 0.1611, 0.1632, 0.1652, 0.1671, 0.1692, 0.1711,
0.1732, 0.1752, 0.1771, 0.1792, 0.1812, 0.1831, 0.1852, 0.1871, 0.1892, 0.1912,
0.1931, 0.1951, 0.1973, 0.1992
]
],
[
[
0.0757, 0.0776, 0.0797, 0.0817, 0.0837, 0.0857, 0.0876, 0.0897, 0.0917, 0.0938,
0.0957, 0.0977, 0.0997, 0.1017, 0.1036, 0.1057, 0.1077, 0.1097, 0.1117, 0.1136,
0.1157, 0.1177, 0.1198, 0.1217, 0.1237, 0.1257, 0.1277, 0.1298, 0.1317, 0.1337,
0.1357, 0.1377, 0.1398, 0.1417, 0.1437, 0.1458, 0.1477, 0.1497, 0.1517, 0.1537,
0.1558, 0.1577, 0.1597, 0.1617, 0.1637, 0.1658, 0.1677, 0.1697, 0.1718, 0.1737,
0.1758, 0.1777, 0.1797, 0.1818, 0.1837, 0.1857, 0.1877, 0.1897, 0.1918, 0.1937,
0.1957, 0.1976, 0.1998, 0.2018
],
[
0.0782, 0.0802, 0.0822, 0.0842, 0.0862, 0.0882, 0.0902, 0.0922, 0.0942, 0.0963,
0.0982, 0.1002, 0.1022, 0.1042, 0.1062, 0.1082, 0.1102, 0.1122, 0.1142, 0.1162,
0.1182, 0.1202, 0.1223, 0.1242, 0.1262, 0.1283, 0.1302, 0.1322, 0.1343, 0.1362,
0.1383, 0.1403, 0.1422, 0.1443, 0.1462, 0.1482, 0.1503, 0.1522, 0.1543, 0.1563,
0.1582, 0.1603, 0.1622, 0.1643, 0.1663, 0.1682, 0.1703, 0.1722, 0.1743, 0.1763,
0.1782, 0.1803, 0.1823, 0.1843, 0.1863, 0.1882, 0.1903, 0.1923, 0.1943, 0.1963,
0.1982, 0.2002, 0.2023, 0.2043
]
],
[
[
0.0807, 0.0826, 0.0847, 0.0867, 0.0887, 0.0907, 0.0927, 0.0947, 0.0967, 0.0987,
0.1007, 0.1027, 0.1047, 0.1067, 0.1086, 0.1107, 0.1127, 0.1147, 0.1167, 0.1187,
0.1207, 0.1227, 0.1247, 0.1267, 0.1287, 0.1307, 0.1327, 0.1348, 0.1367, 0.1387,
0.1407, 0.1427, 0.1448, 0.1467, 0.1487, 0.1508, 0.1527, 0.1547, 0.1567, 0.1587,
0.1608, 0.1627, 0.1647, 0.1667, 0.1687, 0.1708, 0.1727, 0.1747, 0.1768, 0.1787,
0.1808, 0.1827, 0.1847, 0.1868, 0.1887, 0.1907, 0.1927, 0.1947, 0.1968, 0.1987,
0.2007, 0.2026, 0.2048, 0.2068
],
[
0.0831, 0.0851, 0.0871, 0.0891, 0.0911, 0.0931, 0.0951, 0.0971, 0.0991, 0.1011,
0.1031, 0.1051, 0.1071, 0.1091, 0.1111, 0.1131, 0.1151, 0.1171, 0.1191, 0.1211,
0.1231, 0.1251, 0.1271, 0.1292, 0.1311, 0.1332, 0.1351, 0.1371, 0.1392, 0.1411,
0.1432, 0.1451, 0.1471, 0.1492, 0.1511, 0.1531, 0.1552, 0.1571, 0.1592, 0.1611,
0.1631, 0.1652, 0.1671, 0.1692, 0.1711, 0.1731, 0.1752, 0.1771, 0.1792, 0.1812,
0.1831, 0.1852, 0.1871, 0.1891, 0.1912, 0.1931, 0.1952, 0.1971, 0.1991, 0.2012,
0.2031, 0.2051, 0.2072, 0.2092
]
]
]
);
Ok(())
}
#[test]
fn flash_attn_varlen() -> Result<()> {
let device = Device::new_cuda(0)?;
let q = Tensor::arange(0u32, 3 * 2 * 64, &device)?
.to_dtype(DType::F16)?
.reshape((3, 2, 64))?;
let k = (&q / 400.)?;
let v = (&q / 500.)?;
let q = (&q / 300.)?;
let seqlens_q = Tensor::new(&[0u32, 2u32], &device)?;
// let seqlens_k: Tensor = Tensor::new(&[0u32, 3u32], &device)?;
let ys = {
let q = q.transpose(0, 1)?;
let k = k.transpose(0, 1)?;
let v = v.transpose(0, 1)?;
candle_flash_attn_v3::flash_attn_varlen(
&q, &k, &v, &seqlens_q, &seqlens_q, 2, 2, 0.5, false, false,
)?
.transpose(0, 1)?
};
let ys = ys.to_dtype(DType::F32)?;
assert_eq!(ys.dims(), &[3, 2, 64]);
assert_eq!(
to_vec3_round(ys, 4)?,
&[
[
[
0.0808, 0.0828, 0.0848, 0.0869, 0.0889, 0.0908, 0.0928, 0.0948, 0.0969, 0.0989,
0.1008, 0.1028, 0.1049, 0.1069, 0.1088, 0.1108, 0.1129, 0.1149, 0.1168, 0.1188,
0.1208, 0.1229, 0.1249, 0.1268, 0.1288, 0.1309, 0.1328, 0.1349, 0.1368, 0.1388,
0.1409, 0.1428, 0.1449, 0.1469, 0.1488, 0.1509, 0.1528, 0.1548, 0.1569, 0.1588,
0.1609, 0.1628, 0.1648, 0.1669, 0.1688, 0.1709, 0.1729, 0.1748, 0.1769, 0.1788,
0.1809, 0.1829, 0.1848, 0.1869, 0.1888, 0.1908, 0.1929, 0.1948, 0.1969, 0.1989,
0.2008, 0.2029, 0.205, 0.2069
],
[
0.1071, 0.1091, 0.1111, 0.113, 0.1151, 0.1171, 0.1191, 0.1211, 0.123, 0.1251,
0.1271, 0.129, 0.1311, 0.1331, 0.135, 0.1371, 0.139, 0.1411, 0.1431, 0.145,
0.1471, 0.149, 0.1511, 0.1531, 0.155, 0.1571, 0.1591, 0.1611, 0.1631, 0.165,
0.1671, 0.1691, 0.1711, 0.1731, 0.175, 0.1771, 0.1791, 0.181, 0.1831, 0.1851,
0.1871, 0.1891, 0.191, 0.1931, 0.1951, 0.1971, 0.1991, 0.201, 0.2031, 0.2051,
0.2072, 0.2091, 0.2111, 0.2131, 0.2151, 0.217, 0.2191, 0.2211, 0.2231, 0.2251,
0.2271, 0.229, 0.2312, 0.2332
]
],
[
[
0.3765, 0.3784, 0.3804, 0.3823, 0.3843, 0.3862, 0.3884, 0.3904, 0.3923, 0.3943,
0.3962, 0.3984, 0.4004, 0.4023, 0.4043, 0.4063, 0.4084, 0.4104, 0.4124, 0.4143,
0.4163, 0.4185, 0.4204, 0.4224, 0.4243, 0.4263, 0.4285, 0.4304, 0.4324, 0.4343,
0.4363, 0.4385, 0.4404, 0.4424, 0.4443, 0.4463, 0.4485, 0.4504, 0.4524, 0.4543,
0.4563, 0.4585, 0.4604, 0.4624, 0.4644, 0.4663, 0.4683, 0.4705, 0.4724, 0.4744,
0.4763, 0.4783, 0.4805, 0.4824, 0.4844, 0.4863, 0.4883, 0.4905, 0.4922, 0.4946,
0.4966, 0.4985, 0.5005, 0.5024
],
[
0.3816, 0.3835, 0.3855, 0.3875, 0.3894, 0.3914, 0.3936, 0.3955, 0.3975, 0.3994,
0.4014, 0.4036, 0.4055, 0.4075, 0.4094, 0.4114, 0.4136, 0.4155, 0.4175, 0.4194,
0.4214, 0.4236, 0.4255, 0.4275, 0.4294, 0.4314, 0.4336, 0.4355, 0.4375, 0.4395,
0.4414, 0.4436, 0.4456, 0.4475, 0.4495, 0.4514, 0.4536, 0.4556, 0.4575, 0.4595,
0.4614, 0.4636, 0.4656, 0.4675, 0.4695, 0.4714, 0.4734, 0.4756, 0.4775, 0.4795,
0.4814, 0.4834, 0.4856, 0.4875, 0.4895, 0.4915, 0.4934, 0.4956, 0.4973, 0.4998,
0.5015, 0.5034, 0.5054, 0.5073
]
],
[
[
0.6392, 0.6411, 0.6431, 0.6455, 0.6475, 0.6494, 0.6514, 0.6533, 0.6553, 0.6572,
0.6592, 0.6611, 0.6631, 0.6655, 0.6675, 0.6694, 0.6714, 0.6733, 0.6753, 0.6772,
0.6792, 0.6812, 0.6831, 0.6851, 0.6875, 0.6895, 0.6914, 0.6934, 0.6953, 0.6973,
0.6992, 0.7012, 0.7031, 0.7051, 0.7075, 0.7095, 0.7114, 0.7134, 0.7153, 0.7173,
0.7192, 0.7212, 0.7231, 0.7251, 0.7275, 0.7295, 0.7314, 0.7334, 0.7354, 0.7373,
0.7393, 0.7412, 0.7432, 0.7451, 0.7476, 0.7495, 0.7515, 0.7534, 0.7554, 0.7573,
0.7593, 0.7612, 0.7632, 0.7651
],
[
0.6396, 0.6416, 0.6436, 0.646, 0.6479, 0.6499, 0.6519, 0.6538, 0.6558, 0.6577,
0.6597, 0.6616, 0.6636, 0.666, 0.668, 0.6699, 0.6719, 0.6738, 0.6758, 0.6777,
0.6797, 0.6816, 0.6836, 0.6855, 0.688, 0.6899, 0.6919, 0.6938, 0.6958, 0.6978,
0.6997, 0.7017, 0.7036, 0.7056, 0.708, 0.71, 0.7119, 0.7139, 0.7158, 0.7178,
0.7197, 0.7217, 0.7236, 0.7256, 0.728, 0.73, 0.7319, 0.7339, 0.7358, 0.7378,
0.7397, 0.7417, 0.7437, 0.7456, 0.748, 0.75, 0.752, 0.7539, 0.7559, 0.7578,
0.7598, 0.7617, 0.7637, 0.7656
]
]
]
);
Ok(())
}
#[rstest(
head_dim => [64, 128, 256],
seq_len => [2, 4, 9],
use_gqa_packing => [false], // true does not make sense, as its reset to falser in the function
)]
fn flash_attn_varlen_param(head_dim: usize, seq_len: usize, use_gqa_packing: bool) -> Result<()> {
let device = Device::new_cuda(0)?;
// Adjust the shape so it reflects seq_len.
// Here, we make q of shape (3, seq_len, head_dim).
let q = Tensor::arange(0u32, (3 * seq_len * head_dim) as u32, &device)?
.to_dtype(DType::F16)?
.reshape((3, seq_len, head_dim))?;
// divide by max value to have expected magnitude of error.
let k = (&q / ((head_dim * seq_len) as f64 * 4.))?;
let v = (&q / ((head_dim * seq_len) as f64 * 2.))?;
let q = (&q / ((head_dim * seq_len) as f64 * 3.))?;
// For varlen, we need start/end offsets for each “batch element.”
// In this test, we have only 1 “batch element,” so let's do `[0, seq_len]`.
let seqlens_q = Tensor::new(&[0u32, seq_len as u32], &device)?;
let seqlens_k = Tensor::new(&[0u32, seq_len as u32], &device)?;
let ys = {
let q = q.transpose(0, 1)?;
let k = k.transpose(0, 1)?;
let v = v.transpose(0, 1)?;
candle_flash_attn_v3::flash_attn_varlen(
&q,
&k,
&v,
&seqlens_q,
&seqlens_k,
seq_len, // max_seqlen_q
seq_len, // max_seqlen_k
0.5, // softmax scale
false, // causal
use_gqa_packing, // use_gqa_packing
)?
.transpose(0, 1)? // bring it back to (3, seq_len, head_dim)
};
let ys = ys.to_dtype(DType::F32)?;
assert_eq!(ys.dims(), &[3, seq_len, head_dim]);
let ys2 = {
// reference implementation
let q = q.unsqueeze(0)?;
let k = k.unsqueeze(0)?;
let v = v.unsqueeze(0)?;
let y = fa_acausal(&q, &k, &v, 0.5)?;
y.i(0)?.to_dtype(DType::F32)?
};
let diff = ys.sub(&ys2)?.abs()?.flatten_all()?.max(0)?;
assert!(diff.to_vec0::<f32>()?.abs() < 5e-3);
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-onnx/build.rs | candle-onnx/build.rs | use std::io::Result;
fn main() -> Result<()> {
prost_build::compile_protos(&["src/onnx.proto3"], &["src/"])?;
Ok(())
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-onnx/src/lib.rs | candle-onnx/src/lib.rs | use candle::Result;
use prost::Message;
pub mod onnx {
include!(concat!(env!("OUT_DIR"), "/onnx.rs"));
}
pub mod eval;
pub use eval::{dtype, simple_eval};
pub fn read_file<P: AsRef<std::path::Path>>(p: P) -> Result<onnx::ModelProto> {
let buf = std::fs::read(p)?;
onnx::ModelProto::decode(buf.as_slice()).map_err(candle::Error::wrap)
}
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | false |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-onnx/src/eval.rs | candle-onnx/src/eval.rs | use crate::onnx::attribute_proto::AttributeType;
use crate::onnx::tensor_proto::DataType;
use crate::onnx::{self, GraphProto};
use candle::Module;
use candle::{bail, DType, Device, IndexOp, Result, Tensor};
use candle_nn::activation::PReLU;
use std::collections::{HashMap, HashSet};
pub type Value = Tensor;
pub fn dtype(dt: DataType) -> Option<DType> {
match dt {
DataType::Uint8 => Some(DType::U8),
DataType::Uint32 => Some(DType::U32),
DataType::Int64 => Some(DType::I64),
DataType::Float16 => Some(DType::F16),
DataType::Float => Some(DType::F32),
DataType::Double => Some(DType::F64),
DataType::Bool => Some(DType::U8),
_ => None,
}
}
trait Attr {
const TYPE: AttributeType;
fn get(attr: &onnx::AttributeProto) -> Result<&Self>;
}
trait AttrOwned: Sized {
const TYPE: AttributeType;
fn get(attr: &onnx::AttributeProto) -> Result<Self>;
}
impl Attr for i64 {
const TYPE: AttributeType = AttributeType::Int;
fn get(attr: &onnx::AttributeProto) -> Result<&Self> {
Ok(&attr.i)
}
}
impl Attr for f32 {
const TYPE: AttributeType = AttributeType::Float;
fn get(attr: &onnx::AttributeProto) -> Result<&Self> {
Ok(&attr.f)
}
}
impl Attr for [i64] {
const TYPE: AttributeType = AttributeType::Ints;
fn get(attr: &onnx::AttributeProto) -> Result<&Self> {
Ok(attr.ints.as_slice())
}
}
impl Attr for str {
const TYPE: AttributeType = AttributeType::String;
fn get(attr: &onnx::AttributeProto) -> Result<&Self> {
std::str::from_utf8(&attr.s).map_err(candle::Error::wrap)
}
}
impl Attr for GraphProto {
const TYPE: AttributeType = AttributeType::Graph;
fn get(attr: &onnx::AttributeProto) -> Result<&Self> {
attr.g
.as_ref()
.ok_or_else(|| candle::Error::Msg("attribute does not contain graph".to_string()))
}
}
impl AttrOwned for Vec<String> {
const TYPE: AttributeType = AttributeType::Strings;
fn get(attr: &onnx::AttributeProto) -> Result<Self> {
let mut ret = vec![];
for bytes in attr.strings.iter() {
let s = String::from_utf8(bytes.clone()).map_err(candle::Error::wrap)?;
ret.push(s);
}
Ok(ret)
}
}
impl AttrOwned for Tensor {
const TYPE: AttributeType = AttributeType::Tensor;
fn get(attr: &onnx::AttributeProto) -> Result<Self> {
let tensor_proto = match &attr.t {
Some(value) => value,
None => bail!(
"attribute {} was of type TENSOR, but no tensor was found",
attr.name
),
};
let data_type = match DataType::try_from(tensor_proto.data_type) {
Ok(value) => value,
Err(_) => bail!(
"attribute {} of type TENSOR was an invalid data_type number {}",
attr.name,
tensor_proto.data_type
),
};
let dtype = match dtype(data_type) {
Some(value) => value,
None => bail!(
"attribute {} of type TENSOR has an unsupported data_type {}",
attr.name,
data_type.as_str_name()
),
};
let mut dims = Vec::with_capacity(tensor_proto.dims.len());
for dim in &tensor_proto.dims {
if dim < &0 {
bail!(
"attribute {} of type TENSOR has a negative dimension, which is unsupported",
attr.name
)
}
dims.push(*dim as usize)
}
Tensor::from_raw_buffer(&tensor_proto.raw_data, dtype, &dims, &Device::Cpu)
}
}
fn get_attr_<'a>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a onnx::AttributeProto> {
match node.attribute.iter().find(|attr| attr.name == name) {
None => {
bail!(
"cannot find the '{name}' attribute in '{}' for {}",
node.op_type,
node.name
)
}
Some(dt) => Ok(dt),
}
}
fn get_attr<'a, T: Attr + ?Sized>(node: &'a onnx::NodeProto, name: &str) -> Result<&'a T> {
let attr = get_attr_(node, name)?;
if attr.r#type() != T::TYPE {
bail!(
"unsupported type {:?} for '{name}' attribute in '{}' for {}",
attr.r#type,
node.op_type,
node.name
)
}
T::get(attr)
}
fn get_attr_opt<'a, T: Attr + ?Sized>(
node: &'a onnx::NodeProto,
name: &str,
) -> Result<Option<&'a T>> {
match node.attribute.iter().find(|attr| attr.name == name) {
None => Ok(None),
Some(attr) => {
if attr.r#type() != T::TYPE {
bail!(
"unsupported type {:?} for '{name}' attribute in '{}' for {}",
attr.r#type,
node.op_type,
node.name
)
}
let val = T::get(attr)?;
Ok(Some(val))
}
}
}
fn get_attr_opt_owned<T: AttrOwned>(node: &onnx::NodeProto, name: &str) -> Result<Option<T>> {
match node.attribute.iter().find(|attr| attr.name == name) {
None => Ok(None),
Some(attr) => {
if attr.r#type() != T::TYPE {
bail!(
"unsupported type {:?} for '{name}' attribute in '{}' for {}",
attr.r#type,
node.op_type,
node.name
)
}
let val = T::get(attr)?;
Ok(Some(val))
}
}
}
pub fn get_tensor(t: &onnx::TensorProto, name: &str) -> Result<Tensor> {
let dims: Vec<usize> = t.dims.iter().map(|&x| x as usize).collect();
match DataType::try_from(t.data_type) {
Ok(DataType::Int32) => {
if t.int32_data.is_empty() {
let len = t.raw_data.len() / 4;
let data: &[i32] =
unsafe { std::slice::from_raw_parts(t.raw_data.as_ptr() as *const i32, len) };
let data = data.iter().map(|v| *v as i64).collect::<Vec<_>>();
Tensor::from_vec(data, len, &Device::Cpu)
} else {
let data = t.int32_data.iter().map(|v| *v as i64).collect::<Vec<_>>();
Tensor::from_vec(data, t.int32_data.len(), &Device::Cpu)
}
}
Ok(dt) => match dtype(dt) {
Some(dt) => {
if dt == DType::F32 && !t.float_data.is_empty() {
Tensor::from_slice(&t.float_data, dims.as_slice(), &Device::Cpu)
} else if dt == DType::F64 && !t.double_data.is_empty() {
Tensor::from_slice(&t.double_data, dims.as_slice(), &Device::Cpu)
} else if dt == DType::I64 && !t.int64_data.is_empty() {
Tensor::from_slice(&t.int64_data, dims.as_slice(), &Device::Cpu)
} else {
Tensor::from_raw_buffer(
t.raw_data.as_slice(),
dt,
dims.as_slice(),
&Device::Cpu,
)
}
}
None => {
bail!("unsupported 'value' data-type {dt:?} for {name}")
}
},
Err(_) => {
bail!("unsupported 'value' data-type {} for {name}", t.data_type,)
}
}
}
// This function provides a direct evaluation of the proto.
// Longer-term, we should first convert the proto to an intermediate representation of the compute
// graph so as to make multiple evaluations more efficient.
// An example upside of this would be to remove intermediary values when they are not needed
// anymore.
pub fn simple_eval(
model: &onnx::ModelProto,
mut inputs: HashMap<String, Value>,
) -> Result<HashMap<String, Value>> {
let graph = match &model.graph {
None => bail!("no graph defined in proto"),
Some(graph) => graph,
};
simple_eval_(graph, &mut inputs)
}
fn simple_eval_(
graph: &onnx::GraphProto,
values: &mut HashMap<String, Value>,
) -> Result<HashMap<String, Value>> {
for t in graph.initializer.iter() {
let tensor = get_tensor(t, t.name.as_str())?;
values.insert(t.name.to_string(), tensor);
}
for input in graph.input.iter() {
let input_type = match &input.r#type {
Some(input_type) => input_type,
None => continue,
};
let input_type = match &input_type.value {
Some(input_type) => input_type,
None => continue,
};
let tensor_type = match input_type {
onnx::type_proto::Value::TensorType(tt) => tt,
_ => continue,
};
let tensor = match values.get(&input.name) {
None => bail!("missing input {}", input.name),
Some(tensor) => tensor,
};
let dt = match DataType::try_from(tensor_type.elem_type) {
Ok(dt) => match dtype(dt) {
Some(dt) => dt,
None => {
bail!("unsupported 'value' data-type {dt:?} for {}", input.name)
}
},
type_ => bail!("unsupported input type {type_:?}"),
};
match &tensor_type.shape {
None => continue,
Some(shape) => {
if shape.dim.len() != tensor.rank() {
bail!(
"unexpected rank for {}, got {:?}, expected {:?}",
input.name,
shape.dim,
tensor.shape()
)
}
for (idx, (d, &dim)) in shape.dim.iter().zip(tensor.dims().iter()).enumerate() {
match &d.value {
Some(onnx::tensor_shape_proto::dimension::Value::DimValue(v)) => {
if *v as usize != dim {
bail!(
"unexpected dim {idx} for {}, got {:?}, expected {:?}",
input.name,
shape.dim,
tensor.shape()
)
}
}
// We do not check equality constraints for the DimParam dimensions for now.
Some(onnx::tensor_shape_proto::dimension::Value::DimParam(_)) | None => (),
}
}
}
};
if dt != tensor.dtype() {
bail!(
"unexpected dtype for {}, got {:?}, expected {dt:?}",
input.name,
tensor.dtype()
)
}
}
// The nodes are topologically sorted so we can just process them in order.
for node in graph.node.iter() {
let get = |input_name: &str| match values.get(input_name) {
Some(value) => Ok(value),
None => bail!("cannot find {input_name} for op '{}'", node.name),
};
let get_opt = |i: usize| {
node.input
.get(i)
.filter(|s: &&String| !s.is_empty())
.map(|s| get(s))
};
// TODO: Validate node.input for each operator.
match node.op_type.as_str() {
"Add" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?;
let output = input0.broadcast_add(input1)?;
values.insert(node.output[0].clone(), output);
}
"Sub" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?;
let output = input0.broadcast_sub(input1)?;
values.insert(node.output[0].clone(), output);
}
"Mul" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?;
let output = input0.broadcast_mul(input1)?;
values.insert(node.output[0].clone(), output);
}
"Div" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?;
let output = input0.broadcast_div(input1)?;
values.insert(node.output[0].clone(), output);
}
"Pow" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?;
// HACK: current implementation of broadcast_pow cannot handle negative base,
// so we use powf where we can, which *does* correctly handle negative base.
if let Ok(exp) = to_scalar_flexible::<f64>(&input1.to_dtype(DType::F64)?) {
let output = input0.powf(exp)?;
values.insert(node.output[0].clone(), output);
} else {
let output = input0.broadcast_pow(input1)?;
values.insert(node.output[0].clone(), output);
}
}
"Exp" => {
let xs = get(&node.input[0])?;
let output = xs.exp()?;
values.insert(node.output[0].clone(), output);
}
"Equal" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?;
let output = input0.broadcast_eq(input1)?;
values.insert(node.output[0].clone(), output);
}
"Not" => {
let xs = get(&node.input[0])?;
let xs = xs.eq(&xs.zeros_like()?)?;
values.insert(node.output[0].clone(), xs);
}
"MatMul" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?;
let output = input0.broadcast_matmul(input1)?;
values.insert(node.output[0].clone(), output);
}
"Reshape" => {
let input0 = get(&node.input[0])?;
let input1 = get(&node.input[1])?.to_vec1::<i64>()?;
// TODO: Check that there is at most a single -1 or 0, handle other neg values.
let mut other_than_minus1 = 1usize;
for &v in input1.iter() {
if v != -1 && v != 0 {
other_than_minus1 *= v as usize
}
}
let input1 = input1
.iter()
.enumerate()
.map(|(idx, &v)| match v {
-1 => Ok(input0.elem_count() / other_than_minus1),
0 => input0.dim(idx),
_ => Ok(v as usize),
})
.collect::<Result<Vec<usize>>>()?;
let output = input0.reshape(input1)?;
values.insert(node.output[0].clone(), output);
}
"LogSoftmax" => {
let input = get(&node.input[0])?;
let output = match get_attr_opt::<i64>(node, "axis")? {
None => candle_nn::ops::softmax_last_dim(input)?,
Some(&axis) => {
let axis = input.normalize_axis(axis)?;
candle_nn::ops::log_softmax(input, axis)?
}
};
values.insert(node.output[0].clone(), output);
}
"Softmax" => {
let input = get(&node.input[0])?;
let output = match get_attr_opt::<i64>(node, "axis")? {
None => candle_nn::ops::softmax_last_dim(input)?,
Some(&axis) => {
let axis = input.normalize_axis(axis)?;
candle_nn::ops::softmax(input, axis)?
}
};
values.insert(node.output[0].clone(), output);
}
"Transpose" => {
let input = get(&node.input[0])?;
let output = match get_attr_opt::<[i64]>(node, "perm")? {
None => input.t()?,
Some(perm) => {
let perm = perm.iter().map(|&v| v as usize).collect::<Vec<_>>();
input.permute(perm)?.contiguous()?
}
};
values.insert(node.output[0].clone(), output);
}
"Dropout" => {
let input = get(&node.input[0])?;
// Do not apply dropout at the moment, consider that we're only doing inference.
values.insert(node.output[0].clone(), input.clone());
}
"MaxPool" => {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#MaxPool
let dilations = get_attr_opt::<[i64]>(node, "dilations")?;
let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?;
let pads = get_attr_opt::<[i64]>(node, "pads")?;
let strides = get_attr_opt::<[i64]>(node, "strides")?;
let auto_pad = get_attr_opt::<str>(node, "auto_pad")?;
match auto_pad {
None | Some("NOTSET") => (),
Some(s) => bail!("unsupported auto_pad {s}"),
};
if let Some(d) = dilations {
if d.iter().any(|&v| v != 1) {
bail!("MaxPool with dilation != 1, {dilations:?}")
}
}
if let Some(d) = pads {
if d.iter().any(|&v| v != 0) {
bail!("MaxPool with pads != 0, {pads:?}")
}
}
let xs = get(&node.input[0])?;
let (k1, k2) = match kernel_shape {
[k1, k2] => (*k1 as usize, *k2 as usize),
_ => bail!("only 2d MaxPool is supported, kernel shape {kernel_shape:?}"),
};
let ys = match strides {
None => xs.max_pool2d((k1, k2))?,
Some([s1, s2]) => {
xs.max_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))?
}
Some(strides) => bail!("only 2d MaxPool is supported, strides {strides:?}"),
};
values.insert(node.output[0].clone(), ys);
}
"AveragePool" => {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#AveragePool
let dilations = get_attr_opt::<[i64]>(node, "dilations")?;
let kernel_shape = get_attr::<[i64]>(node, "kernel_shape")?;
let pads = get_attr_opt::<[i64]>(node, "pads")?;
let strides = get_attr_opt::<[i64]>(node, "strides")?;
let auto_pad = get_attr_opt::<str>(node, "auto_pad")?;
match auto_pad {
None | Some("NOTSET") => (),
Some(s) => bail!("unsupported auto_pad {s}"),
};
if let Some(d) = dilations {
if d.iter().any(|&v| v != 1) {
bail!("AvgPool with dilation != 1, {dilations:?}")
}
}
if let Some(d) = pads {
if d.iter().any(|&v| v != 0) {
bail!("AvgPool with pads != 0, {pads:?}")
}
}
let xs = get(&node.input[0])?;
let (k1, k2) = match kernel_shape {
[k1, k2] => (*k1 as usize, *k2 as usize),
_ => bail!("only 2d AvgPool is supported, kernel shape {kernel_shape:?}"),
};
let ys = match strides {
None => xs.avg_pool2d((k1, k2))?,
Some([s1, s2]) => {
xs.avg_pool2d_with_stride((k1, k2), (*s1 as usize, *s2 as usize))?
}
Some(strides) => bail!("only 2d AvgPool is supported, strides {strides:?}"),
};
values.insert(node.output[0].clone(), ys);
}
"BatchNormalization" => {
let training_mode = get_attr_opt::<i64>(node, "training_mode")?;
if training_mode.copied().unwrap_or(0) != 0 {
bail!("training mode is not supported for BatchNorm")
}
let eps = get_attr_opt::<f32>(node, "epsilon")?
.copied()
.unwrap_or(1e-5);
let xs = get(&node.input[0])?;
let weight = get(&node.input[1])?;
let bias = get(&node.input[2])?;
let running_mean = get(&node.input[3])?;
let running_var = get(&node.input[4])?;
let target_shape: Vec<usize> = xs
.dims()
.iter()
.enumerate()
.map(|(idx, v)| if idx == 1 { *v } else { 1 })
.collect();
let target_shape = target_shape.as_slice();
let xs = xs
.broadcast_sub(&running_mean.reshape(target_shape)?)?
.broadcast_div(&(running_var.reshape(target_shape)? + eps as f64)?.sqrt()?)?;
let weight = weight.reshape(target_shape)?;
let bias = bias.reshape(target_shape)?;
let xs = xs.broadcast_mul(&weight)?.broadcast_add(&bias)?;
values.insert(node.output[0].clone(), xs);
}
"Squeeze" => {
let xs = get(&node.input[0])?;
let mut axes = if node.input.len() <= 1 {
// contract all the dimensions with size 1 except the batch dim.
xs.dims()
.iter()
.enumerate()
.flat_map(|(idx, &s)| if s == 1 && idx > 0 { Some(idx) } else { None })
.collect()
} else {
get(&node.input[1])?
.to_vec1::<i64>()?
.iter()
.map(|&i| xs.normalize_axis(i))
.collect::<Result<Vec<_>>>()?
};
axes.sort();
let mut xs = xs.clone();
for &axis in axes.iter().rev() {
xs = xs.squeeze(axis)?
}
values.insert(node.output[0].clone(), xs);
}
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#ConstantOfShape
"ConstantOfShape" => {
let input = get(&node.input[0])?;
let value = get_attr_opt_owned::<Tensor>(node, "value")?.unwrap_or(Tensor::zeros(
(),
DType::F32,
&Device::Cpu,
)?);
let shape_vec: Vec<usize> = input
.to_vec1::<i64>()?
.iter()
.map(|&x| x as usize)
.collect();
let xs = Tensor::ones(shape_vec, value.dtype(), input.device())?
.broadcast_mul(&value)?;
values.insert(node.output[0].clone(), xs);
}
"Unsqueeze" => {
let xs = get(&node.input[0])?;
let axes = match get_attr_opt::<[i64]>(node, "axes")? {
Some(axis) => axis.to_vec(),
None => get(&node.input[1])?.to_vec1::<i64>()?,
};
let mut axes = axes
.iter()
.map(|&i| {
if i == xs.rank() as i64 {
Ok(xs.rank())
} else if i < 0 {
// normalize_axis doesn't work correctly here
// because we actually want normalized with respect
// to the final size, not the current (off by one)
Ok(xs.rank() - (-i as usize) + 1)
} else {
xs.normalize_axis(i)
}
})
.collect::<Result<Vec<_>>>()?;
axes.sort();
let mut xs = xs.clone();
for &axis in axes.iter().rev() {
xs = xs.unsqueeze(axis)?
}
values.insert(node.output[0].clone(), xs);
}
"Clip" => {
let xs = get(&node.input[0])?;
let xs = if let Some(mins) = get_opt(1) {
xs.broadcast_maximum(mins?)?
} else {
xs.clone()
};
let xs = if let Some(maxs) = get_opt(2) {
xs.broadcast_minimum(maxs?)?
} else {
xs.clone()
};
values.insert(node.output[0].clone(), xs);
}
"Gather" => {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#Gather
let xs = get(&node.input[0])?;
let indices = get(&node.input[1])?;
let axis = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(0);
let axis = xs.normalize_axis(axis)?;
// index_select does not support negative indices, so normalize them
// to positive indices.
let indices = &{
let zeros = Tensor::zeros(indices.shape(), indices.dtype(), indices.device())?;
let max = Tensor::new(xs.dims()[axis] as i64, indices.device())?
.to_dtype(indices.dtype())?;
let mask = indices.lt(&zeros)?;
mask.to_dtype(indices.dtype())?
.broadcast_mul(&max)?
.add(indices)?
};
// In Pytorch or Numpy this can be done by indexing the xs tensor using the indices
// tensor directly, but candle does not support tensor indexing at the moment, so
// some workarounds must be done.
let xs = match indices.dims() {
[] => {
let index = indices.to_vec0::<i64>()? as usize;
xs.narrow(axis, index, 1)?.squeeze(axis)?
}
[_] => xs.index_select(indices, axis)?,
[first, _] => {
let mut v = Vec::with_capacity(*first);
for i in 0..*first {
v.push(xs.index_select(&indices.get(i)?, axis)?)
}
Tensor::stack(&v, axis)?
}
_ => {
// TODO: Provide an op to handle the ONNX generalized gather op ideally in a
// differentiable way.
todo!("implement gather for {xs:?} {indices:?} axis {axis}")
}
};
values.insert(node.output[0].clone(), xs);
}
// https://onnx.ai/onnx/operators/onnx__GatherElements.html#gatherelements
// A Note to fellow lurkers:
// The numpy based `gather_elements` implementation in `onnx` tests [here](https://github.com/onnx/onnx/blob/main/onnx/backend/test/case/node/gatherelements.py)
// and examples is incorrect.
// Use `torch.gather` for the validating/ verifying against the proper behaviour
"GatherElements" => {
let data = get(&node.input[0])?;
let indices = get(&node.input[1])?;
let rank = data.rank();
if rank != indices.rank() {
bail!("indices must have same rank as input data. Data rank [{}] != indices rank [{}]", data.rank(), indices.rank());
}
let axis = {
let axis_i64 = get_attr_opt::<i64>(node, "axis")?.copied().unwrap_or(0);
let axis = data.normalize_axis(axis_i64)?;
if axis >= rank {
bail!(
"axis ({}) out of accepted range [-rank, rank-1] which was [-{rank}, {}]",
axis_i64,
rank - 1
)
}
axis
};
// index_select does not support negative indices, so normalize them
// to positive indices.
let indices = &{
let zeros = Tensor::zeros(indices.shape(), indices.dtype(), indices.device())?;
let max = Tensor::new(data.dims()[axis] as i64, indices.device())?
.to_dtype(indices.dtype())?;
let mask = indices.lt(&zeros)?;
mask.to_dtype(indices.dtype())?
.broadcast_mul(&max)?
.add(indices)?
};
values.insert(node.output[0].clone(), data.gather(indices, axis)?);
}
"Shape" => {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#Shape
let xs = get(&node.input[0])?;
let start = get_attr_opt::<i64>(node, "start")?.copied().unwrap_or(0);
let end = get_attr_opt::<i64>(node, "end")?.copied().unwrap_or(-1);
let start = xs.normalize_axis(start)?;
let end = xs.normalize_axis(end)?;
let mut dims = vec![];
for idx in start..=end {
dims.push(xs.dim(idx)? as i64)
}
let dims = Tensor::from_vec(dims, xs.rank(), xs.device())?;
values.insert(node.output[0].clone(), dims);
}
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#Size
"Size" => {
let data = get(&node.input[0])?;
let size: usize = data.dims().iter().product();
let output = Tensor::from_slice(&[size as i64], (), data.device())?;
values.insert(node.output[0].clone(), output);
}
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#Sqrt
"Sqrt" => {
let xs = get(&node.input[0])?;
let output = xs.sqrt()?;
values.insert(node.output[0].clone(), output);
}
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#Range
"Range" => {
let start = get(&node.input[0])?;
let limit = get(&node.input[1])?;
let delta = get(&node.input[2])?;
macro_rules! arange_step {
($t: ty) => {
Tensor::arange_step(
to_vec0_flexible::<$t>(start)?,
to_vec0_flexible::<$t>(limit)?,
to_vec0_flexible::<$t>(delta)?,
&Device::Cpu,
)?
};
}
let output = match start.dtype() {
DType::U8 => arange_step!(u8),
DType::U32 => arange_step!(u32),
DType::I64 => arange_step!(i64),
DType::BF16 => arange_step!(f32),
DType::F16 => arange_step!(f32),
DType::F32 => arange_step!(f32),
DType::F64 => arange_step!(f64),
DType::F8E4M3 => arange_step!(f32),
DType::I32
| DType::I16
| DType::F6E2M3
| DType::F6E3M2
| DType::F4
| DType::F8E8M0 => {
bail!("unsupported Range type i32/i16/f6e2m3/f6e3m2/f4/f8e8m0")
}
};
values.insert(node.output[0].clone(), output);
}
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#Greater
"Greater" => {
let a = get(&node.input[0])?;
let b = get(&node.input[1])?;
let output = a.broadcast_gt(b)?;
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
huggingface/candle | https://github.com/huggingface/candle/blob/a4ad7c79666958c38b9afc0e0c3e3499ab8991d8/candle-onnx/tests/ops.rs | candle-onnx/tests/ops.rs | use candle::test_utils::to_vec2_round;
use candle::{DType, Device, NdArray, Result, Tensor};
use candle_onnx::onnx::attribute_proto::AttributeType;
use candle_onnx::onnx::tensor_proto::DataType;
use candle_onnx::onnx::tensor_shape_proto::{dimension, Dimension};
use candle_onnx::onnx::{type_proto, TensorProto, TensorShapeProto, TypeProto};
use candle_onnx::onnx::{AttributeProto, GraphProto, ModelProto, NodeProto, ValueInfoProto};
use candle_onnx::simple_eval;
use std::collections::HashMap;
const INPUT_X: &str = "x";
const INPUT_Y: &str = "y";
const INPUT_A: &str = "a";
const OUTPUT_Z: &str = "z";
fn create_model_proto_with_graph(graph: Option<GraphProto>) -> ModelProto {
ModelProto {
metadata_props: vec![],
training_info: vec![],
functions: vec![],
ir_version: 0,
opset_import: vec![],
producer_name: "".to_string(),
producer_version: "".to_string(),
domain: "".to_string(),
model_version: 0,
doc_string: "".to_string(),
graph,
}
}
#[test]
fn test_evaluation_fails_without_defined_graph() -> Result<()> {
let manual_graph = create_model_proto_with_graph(None);
let inputs: HashMap<String, Tensor> = HashMap::new();
match candle_onnx::simple_eval(&manual_graph, inputs) {
Err(err) => assert_eq!(err.to_string(), "no graph defined in proto"),
Ok(_) => panic!("Expected an error due to undefined graph"),
}
Ok(())
}
// "Add"
#[test]
fn test_add_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Add".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 4.0f64);
Ok(())
}
// "Sub"
#[test]
fn test_sub_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Sub".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 0.0f64);
Ok(())
}
// "Mul"
#[test]
fn test_mul_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Mul".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 4.0f64);
Ok(())
}
// "Div"
#[test]
fn test_div_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Div".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_vec1::<f64>()?[0];
assert_eq!(first, 1.0f64);
Ok(())
}
// "Exp"
#[test]
fn test_exp_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Exp".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(vec![-1.0f32, 0.0f32, 1.0f32, 2.0f32], &[2, 2], &Device::Cpu)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results[0][0], 0.36787944f32);
assert_eq!(results[0][1], 1.0f32);
assert_eq!(results[1], vec![std::f32::consts::E, 7.389056f32]);
Ok(())
}
// "Equal"
#[test]
fn test_equal_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Equal".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
inputs.insert(INPUT_Y.to_string(), Tensor::new(&[2.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0];
assert_eq!(first, 1);
Ok(())
}
// "Not"
#[test]
fn test_not_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Not".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(&[0.], &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let first = z.to_dtype(candle::DType::U8)?.to_vec1::<u8>()?.to_vec()[0];
assert_eq!(first, 1);
Ok(())
}
// "MatMul"
#[test]
fn test_matmul_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "MatMul".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(
INPUT_X.to_string(),
Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?,
);
inputs.insert(
INPUT_Y.to_string(),
Tensor::from_vec(
//
vec![5.0f32, 6.0f32, 7.0f32, 8.0f32],
&[2, 2],
&Device::Cpu,
)?,
);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![19.0, 22.0], vec![43.0, 50.0]]);
Ok(())
}
// "Reshape"
#[test]
fn test_reshape_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Reshape".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let y = Tensor::from_vec(
//
vec![4i64],
&[1],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
inputs.insert(INPUT_Y.to_string(), y);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec1::<f32>()?;
assert_eq!(results, vec![1.0, 2.0, 3.0, 4.0]);
Ok(())
}
// "LogSoftmax"
#[test]
fn test_logsoftmax_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "LogSoftmax".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]]
);
Ok(())
}
// "Softmax"
#[test]
fn test_softmax_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Softmax".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![0.26894143, 0.7310586], vec![0.26894143, 0.7310586]]
);
Ok(())
}
// "Transpose"
#[test]
fn test_transpose_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Transpose".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![1.0, 3.0], vec![2.0, 4.0]]);
Ok(())
}
// "Dropout"
#[test]
fn test_dropout_operation() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Dropout".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
//
vec![1.0f32, 2.0f32, 3.0f32, 4.0f32],
&[2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![1.0, 2.0], vec![3.0, 4.0]]);
Ok(())
}
// "Flatten"
#[test]
fn test_flatten_operation() -> Result<()> {
let mut att_axis = AttributeProto {
name: "axis".to_string(),
ref_attr_name: "axis".to_string(),
i: 0,
doc_string: "axis".to_string(),
r#type: 2,
f: 0.0,
s: vec![],
t: None,
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
};
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Flatten".to_string(),
domain: "".to_string(),
attribute: vec![att_axis.clone()],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
vec![
1.0f32, 2.0f32, 3.0f32, 4.0f32, 5.0f32, 6.0f32, 7.0f32, 8.0f32,
],
&[2, 2, 2],
&Device::Cpu,
)?;
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), x);
let eval = candle_onnx::simple_eval(&manual_graph, inputs.clone())?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(results, vec![vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]]);
att_axis.i = 1;
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Flatten".to_string(),
domain: "".to_string(),
attribute: vec![att_axis.clone()],
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![
ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
},
ValueInfoProto {
name: INPUT_Y.to_string(),
doc_string: "".to_string(),
r#type: None,
},
],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
let results = z.to_vec2::<f32>()?;
assert_eq!(
results,
vec![vec![1.0, 2.0, 3.0, 4.0], vec![5.0, 6.0, 7.0, 8.0]]
);
Ok(())
}
// Below are ops that are implemented but not tested yet
// "MaxPool"
// #[test]
// "AveragePool"
// #[test]
// "BatchNormalization"
// #[test]
// "Squeeze"
// #[test]
// "ConstantOfShape"
#[test]
fn test_constant_of_shape() -> Result<()> {
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-31
test(
&[4i64, 3, 2],
Some(1.),
&[
[[1., 1.], [1., 1.], [1., 1.]],
[[1., 1.], [1., 1.], [1., 1.]],
[[1., 1.], [1., 1.], [1., 1.]],
[[1., 1.], [1., 1.], [1., 1.]],
],
)?;
// https://github.com/onnx/onnx/blob/main/docs/Operators.md#examples-31
test(&[1i64], Some(0i64), &[0i64])?;
// "value" defaults to 0 f32
test(&[4i64], None as Option<i64>, &[0., 0., 0., 0.])?;
fn test(
input: impl NdArray,
value: Option<impl NdArray>,
expected: impl NdArray,
) -> Result<()> {
let mut attribute = vec![];
if let Some(value) = value {
let tensor = Tensor::new(value, &Device::Cpu)?;
let (value, data_type) = match tensor.dtype() {
DType::U8 => (
tensor.to_vec0::<u8>()?.to_le_bytes().to_vec(),
DataType::Uint8,
),
DType::U32 => (
tensor.to_vec0::<u32>()?.to_le_bytes().to_vec(),
DataType::Uint32,
),
DType::I64 => (
tensor.to_vec0::<i64>()?.to_le_bytes().to_vec(),
DataType::Int64,
),
DType::F32 => (
tensor.to_vec0::<f32>()?.to_le_bytes().to_vec(),
DataType::Float,
),
DType::F64 => (
tensor.to_vec0::<f64>()?.to_le_bytes().to_vec(),
DataType::Double,
),
_ => panic!("unsupported DType in test"),
};
let tensor = TensorProto {
data_type: data_type.into(),
dims: tensor.dims().iter().map(|v| *v as i64).collect(),
raw_data: value,
segment: None,
float_data: vec![],
int32_data: vec![],
string_data: vec![],
int64_data: vec![],
name: "".to_string(),
doc_string: "".to_string(),
external_data: vec![],
data_location: 0,
double_data: vec![],
uint64_data: vec![],
};
attribute.push(AttributeProto {
name: "value".to_string(),
ref_attr_name: "value".to_string(),
i: 0,
doc_string: "value".to_string(),
r#type: AttributeType::Tensor.into(),
f: 0.0,
s: vec![],
t: Some(tensor),
g: None,
sparse_tensor: None,
tp: None,
floats: vec![],
ints: vec![],
strings: vec![],
tensors: vec![],
graphs: vec![],
sparse_tensors: vec![],
type_protos: vec![],
})
}
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "ConstantOfShape".to_string(),
domain: "".to_string(),
attribute,
input: vec![INPUT_X.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let mut inputs: HashMap<String, Tensor> = HashMap::new();
inputs.insert(INPUT_X.to_string(), Tensor::new(input, &Device::Cpu)?);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval
.get(OUTPUT_Z)
.expect("Output 'z' not found")
.to_dtype(DType::F64)?;
let expected = Tensor::new(expected, &Device::Cpu)?.to_dtype(DType::F64)?;
match expected.dims().len() {
0 => assert_eq!(z.to_vec0::<f64>()?, expected.to_vec0::<f64>()?),
1 => assert_eq!(z.to_vec1::<f64>()?, expected.to_vec1::<f64>()?),
2 => assert_eq!(z.to_vec2::<f64>()?, expected.to_vec2::<f64>()?),
3 => assert_eq!(z.to_vec3::<f64>()?, expected.to_vec3::<f64>()?),
_ => unreachable!(),
};
Ok(())
}
Ok(())
}
// "Unsqueeze"
#[test]
fn test_unsqueeze() -> Result<()> {
let manual_graph = create_model_proto_with_graph(Some(GraphProto {
node: vec![NodeProto {
op_type: "Unsqueeze".to_string(),
domain: "".to_string(),
attribute: vec![],
input: vec![INPUT_X.to_string(), INPUT_Y.to_string()],
output: vec![OUTPUT_Z.to_string()],
name: "".to_string(),
doc_string: "".to_string(),
}],
name: "".to_string(),
initializer: vec![],
input: vec![],
output: vec![ValueInfoProto {
name: OUTPUT_Z.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
value_info: vec![ValueInfoProto {
name: INPUT_X.to_string(),
doc_string: "".to_string(),
r#type: None,
}],
doc_string: "".to_string(),
sparse_initializer: vec![],
quantization_annotation: vec![],
}));
let x = Tensor::from_vec(
vec![
1.0f32, 2.0f32, //
3.0f32, 4.0f32, //
],
&[2, 2],
&Device::Cpu,
)?;
let y = Tensor::from_vec(vec![-1i64], &[1], &Device::Cpu)?;
let inputs = HashMap::from_iter([(INPUT_X.to_string(), x.clone()), (INPUT_Y.to_string(), y)]);
let eval = candle_onnx::simple_eval(&manual_graph, inputs)?;
assert_eq!(eval.len(), 1);
let z = eval.get(OUTPUT_Z).expect("Output 'z' not found");
assert_eq!(z.dims(), &[2, 2, 1]);
assert_eq!(
z.flatten_all()?.to_vec1::<f32>()?,
x.flatten_all()?.to_vec1::<f32>()?
);
Ok(())
}
// "Clip"
// #[test]
// "Gather"
#[test]
fn test_gather_operation() -> Result<()> {
// test taken from https://onnx.ai/onnx/operators/onnx__Gather.html#summary.
test(
&[[1.0, 1.2], [2.3, 3.4], [4.5, 5.7]],
&[[0i64, 1], [1, 2]],
0,
&[[[1.0, 1.2], [2.3, 3.4]], [[2.3, 3.4], [4.5, 5.7]]],
)?;
// test taken from https://onnx.ai/onnx/operators/onnx__Gather.html#summary.
test(
&[[1.0, 1.2, 1.9], [2.3, 3.4, 3.9], [4.5, 5.7, 5.9]],
&[[0i64, 2]],
1,
&[[[1.0, 1.9]], [[2.3, 3.9]], [[4.5, 5.9]]],
)?;
// all the tests below are generated from numpy.take, which works like
// onnx's Gather operation.
test(&[1.0, 2.0, 3.0, 4.0], 3i64, 0, 4.0)?;
test(&[[1.0, 2.0, 3.0, 4.0]], 3i64, 1, &[4.0])?;
test(
&[[1.0], [2.0], [3.0], [4.0]],
&[3i64, 2],
0,
&[[4.0], [3.0]],
)?;
test(
&[
[[1.0, 2.0], [3.0, 4.0]],
[[5.0, 6.0], [7.0, 8.0]],
[[9.0, 10.0], [11.0, 12.0]],
[[13.0, 14.0], [15.0, 16.0]],
],
1i64,
0,
&[[5.0, 6.0], [7.0, 8.0]],
)?;
test(
&[
[[1.0, 2.0], [3.0, 4.0]],
| rust | Apache-2.0 | a4ad7c79666958c38b9afc0e0c3e3499ab8991d8 | 2026-01-04T15:42:50.663313Z | true |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/webdav.rs | src/webdav.rs | use std::future::Future;
use std::io;
use std::net::ToSocketAddrs;
use std::path::PathBuf;
use std::pin::Pin;
use std::task::{Context, Poll};
use anyhow::Result;
use dav_server::{body::Body, DavConfig, DavHandler};
use headers::{authorization::Basic, Authorization, HeaderMapExt};
use hyper::{service::Service, Request, Response};
use tracing::{error, info};
#[cfg(feature = "rustls-tls")]
use {
futures_util::stream::StreamExt,
hyper::server::accept,
hyper::server::conn::AddrIncoming,
std::fs::File,
std::future::ready,
std::path::Path,
std::sync::Arc,
tls_listener::{SpawningHandshakes, TlsListener},
tokio_rustls::rustls::{Certificate, PrivateKey, ServerConfig},
tokio_rustls::TlsAcceptor,
};
pub struct WebDavServer {
pub host: String,
pub port: u16,
pub auth_user: Option<String>,
pub auth_password: Option<String>,
pub tls_config: Option<(PathBuf, PathBuf)>,
pub handler: DavHandler,
}
impl WebDavServer {
pub async fn serve(self) -> Result<()> {
let addr = (self.host, self.port)
.to_socket_addrs()
.unwrap()
.next()
.ok_or_else(|| io::Error::from(io::ErrorKind::AddrNotAvailable))?;
#[cfg(feature = "rustls-tls")]
if let Some((tls_cert, tls_key)) = self.tls_config {
let incoming = TlsListener::new(
SpawningHandshakes(tls_acceptor(&tls_key, &tls_cert)?),
AddrIncoming::bind(&addr)?,
)
.filter(|conn| {
if let Err(err) = conn {
error!("TLS error: {:?}", err);
ready(false)
} else {
ready(true)
}
});
let server = hyper::Server::builder(accept::from_stream(incoming)).serve(MakeSvc {
auth_user: self.auth_user,
auth_password: self.auth_password,
handler: self.handler,
});
info!("listening on https://{}", addr);
let _ = server.await.map_err(|e| error!("server error: {}", e));
return Ok(());
}
#[cfg(not(feature = "rustls-tls"))]
if self.tls_config.is_some() {
anyhow::bail!("TLS is not supported in this build.");
}
let server = hyper::Server::bind(&addr).serve(MakeSvc {
auth_user: self.auth_user,
auth_password: self.auth_password,
handler: self.handler,
});
info!("listening on http://{}", server.local_addr());
let _ = server.await.map_err(|e| error!("server error: {}", e));
Ok(())
}
}
#[derive(Clone)]
pub struct AliyunDriveWebDav {
auth_user: Option<String>,
auth_password: Option<String>,
handler: DavHandler,
}
impl Service<Request<hyper::Body>> for AliyunDriveWebDav {
type Response = Response<Body>;
type Error = hyper::Error;
#[allow(clippy::type_complexity)]
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request<hyper::Body>) -> Self::Future {
let should_auth = self.auth_user.is_some() && self.auth_password.is_some();
let dav_server = self.handler.clone();
let auth_user = self.auth_user.clone();
let auth_pwd = self.auth_password.clone();
Box::pin(async move {
if should_auth {
let auth_user = auth_user.unwrap();
let auth_pwd = auth_pwd.unwrap();
let user = match req.headers().typed_get::<Authorization<Basic>>() {
Some(Authorization(basic))
if basic.username() == auth_user && basic.password() == auth_pwd =>
{
basic.username().to_string()
}
Some(_) | None => {
// return a 401 reply.
let response = hyper::Response::builder()
.status(401)
.header("WWW-Authenticate", "Basic realm=\"aliyundrive-webdav\"")
.body(Body::from("Authentication required".to_string()))
.unwrap();
return Ok(response);
}
};
let config = DavConfig::new().principal(user);
Ok(dav_server.handle_with(config, req).await)
} else {
Ok(dav_server.handle(req).await)
}
})
}
}
pub struct MakeSvc {
pub auth_user: Option<String>,
pub auth_password: Option<String>,
pub handler: DavHandler,
}
impl<T> Service<T> for MakeSvc {
type Response = AliyunDriveWebDav;
type Error = hyper::Error;
#[allow(clippy::type_complexity)]
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>> + Send>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _: T) -> Self::Future {
let auth_user = self.auth_user.clone();
let auth_password = self.auth_password.clone();
let handler = self.handler.clone();
let fut = async move {
Ok(AliyunDriveWebDav {
auth_user,
auth_password,
handler,
})
};
Box::pin(fut)
}
}
#[cfg(feature = "rustls-tls")]
fn tls_acceptor(key: &Path, cert: &Path) -> anyhow::Result<TlsAcceptor> {
let mut key_reader = io::BufReader::new(File::open(key)?);
let mut cert_reader = io::BufReader::new(File::open(cert)?);
let key = PrivateKey(private_keys(&mut key_reader)?.remove(0));
let certs = rustls_pemfile::certs(&mut cert_reader)?
.into_iter()
.map(Certificate)
.collect();
let mut config = ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(certs, key)?;
config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
Ok(Arc::new(config).into())
}
#[cfg(feature = "rustls-tls")]
fn private_keys(rd: &mut dyn io::BufRead) -> Result<Vec<Vec<u8>>, io::Error> {
use rustls_pemfile::{read_one, Item};
let mut keys = Vec::<Vec<u8>>::new();
loop {
match read_one(rd)? {
None => return Ok(keys),
Some(Item::RSAKey(key)) => keys.push(key),
Some(Item::PKCS8Key(key)) => keys.push(key),
Some(Item::ECKey(key)) => keys.push(key),
_ => {}
};
}
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/main.rs | src/main.rs | use std::env;
use std::path::PathBuf;
use anyhow::bail;
use clap::{Parser, Subcommand};
use dav_server::{memls::MemLs, DavHandler};
#[cfg(unix)]
use futures_util::stream::StreamExt;
use self_update::cargo_crate_version;
use tracing::{debug, info, warn};
use tracing_subscriber::EnvFilter;
#[cfg(unix)]
use {signal_hook::consts::signal::*, signal_hook_tokio::Signals};
use cache::Cache;
use drive::{read_refresh_token, AliyunDrive, DriveConfig, DriveType};
use vfs::AliyunDriveFileSystem;
use webdav::WebDavServer;
mod cache;
mod drive;
mod login;
mod vfs;
mod webdav;
#[derive(Parser, Debug)]
#[command(name = "aliyundrive-webdav", about, version, author)]
#[command(args_conflicts_with_subcommands = true)]
struct Opt {
/// Listen host
#[arg(long, env = "HOST", default_value = "0.0.0.0")]
host: String,
/// Listen port
#[arg(short, env = "PORT", long, default_value = "8080")]
port: u16,
/// Aliyun drive client_id
#[arg(long, env = "CLIENT_ID")]
client_id: Option<String>,
/// Aliyun drive client_secret
#[arg(long, env = "CLIENT_SECRET")]
client_secret: Option<String>,
/// Aliyun drive type
#[arg(long, env = "DRIVE_TYPE")]
drive_type: Option<DriveType>,
/// Aliyun drive refresh token
#[arg(short, long, env = "REFRESH_TOKEN")]
refresh_token: Option<String>,
/// WebDAV authentication username
#[arg(short = 'U', long, env = "WEBDAV_AUTH_USER")]
auth_user: Option<String>,
/// WebDAV authentication password
#[arg(short = 'W', long, env = "WEBDAV_AUTH_PASSWORD")]
auth_password: Option<String>,
/// Automatically generate index.html
#[arg(short = 'I', long)]
auto_index: bool,
/// Read/download buffer size in bytes, defaults to 10MB
#[arg(short = 'S', long, default_value = "10485760")]
read_buffer_size: usize,
/// Upload buffer size in bytes, defaults to 16MB
#[arg(long, default_value = "16777216")]
upload_buffer_size: usize,
/// Directory entries cache size
#[arg(long, default_value = "1000")]
cache_size: u64,
/// Directory entries cache expiration time in seconds
#[arg(long, default_value = "600")]
cache_ttl: u64,
/// Root directory path
#[arg(long, env = "WEBDAV_ROOT", default_value = "/")]
root: String,
/// Working directory, refresh_token will be stored in there if specified
#[arg(short = 'w', long)]
workdir: Option<PathBuf>,
/// Delete file permanently instead of trashing it
#[arg(long)]
no_trash: bool,
/// Enable read only mode
#[arg(long)]
read_only: bool,
/// TLS certificate file path
#[arg(long, env = "TLS_CERT")]
tls_cert: Option<PathBuf>,
/// TLS private key file path
#[arg(long, env = "TLS_KEY")]
tls_key: Option<PathBuf>,
/// Prefix to be stripped off when handling request.
#[arg(long, env = "WEBDAV_STRIP_PREFIX")]
strip_prefix: Option<String>,
/// Enable debug log
#[arg(long)]
debug: bool,
/// Disable self auto upgrade
#[arg(long)]
no_self_upgrade: bool,
/// Skip uploading same size file
#[arg(long)]
skip_upload_same_size: bool,
/// Prefer downloading using HTTP protocol
#[arg(long)]
prefer_http_download: bool,
/// Enable 302 redirect when possible
#[arg(long)]
redirect: bool,
#[command(subcommand)]
subcommands: Option<Commands>,
}
#[derive(Subcommand, Debug)]
enum Commands {
/// Scan QRCode
#[command(subcommand)]
Qr(QrCommand),
}
#[derive(Subcommand, Debug)]
enum QrCommand {
/// Scan QRCode login to get a token
Login,
/// Generate a QRCode
Generate,
/// Query the QRCode login result
#[command(arg_required_else_help = true)]
Query {
/// Query parameter sid
#[arg(long)]
sid: String,
},
}
#[tokio::main(flavor = "multi_thread")]
async fn main() -> anyhow::Result<()> {
#[cfg(feature = "native-tls-vendored")]
openssl_probe::init_ssl_cert_env_vars();
let opt = Opt::parse();
if env::var("RUST_LOG").is_err() {
if opt.debug {
env::set_var("RUST_LOG", "aliyundrive_webdav=debug,reqwest=debug");
} else {
env::set_var("RUST_LOG", "aliyundrive_webdav=info,reqwest=warn");
}
}
tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env())
.with_timer(tracing_subscriber::fmt::time::time())
.init();
let workdir = opt
.workdir
.or_else(|| dirs::cache_dir().map(|c| c.join("aliyundrive-webdav")));
let refresh_token_host = if opt.client_id.is_none() || opt.client_secret.is_none() {
env::var("ALIYUNDRIVE_OAUTH_SERVER")
.unwrap_or_else(|_| "https://aliyundrive-oauth.messense.me".to_string())
} else {
"https://openapi.aliyundrive.com".to_string()
};
let drive_config = DriveConfig {
api_base_url: "https://openapi.aliyundrive.com".to_string(),
refresh_token_host,
workdir,
client_id: opt.client_id.clone(),
client_secret: opt.client_secret.clone(),
drive_type: opt.drive_type.clone(),
};
// subcommands
if let Some(Commands::Qr(qr)) = opt.subcommands.as_ref() {
match qr {
QrCommand::Login => {
let refresh_token = login(drive_config.clone(), 120).await?;
println!("\nrefresh_token:\n\n{}", refresh_token)
}
QrCommand::Generate => {
let scanner = login::QrCodeScanner::new(drive_config.clone()).await?;
let data = scanner.scan().await?;
println!("{}", serde_json::to_string_pretty(&data)?);
}
QrCommand::Query { sid } => {
let scanner = login::QrCodeScanner::new(drive_config.clone()).await?;
let query_result = scanner.query(sid).await?;
if query_result.is_success() {
let code = query_result.auth_code.unwrap();
let refresh_token = scanner.fetch_refresh_token(&code).await?;
println!("{}", refresh_token)
}
}
}
return Ok(());
}
if env::var("NO_SELF_UPGRADE").is_err() && !opt.no_self_upgrade {
tokio::task::spawn_blocking(move || {
if let Err(e) = check_for_update(opt.debug) {
debug!("failed to check for update: {}", e);
}
})
.await?;
}
let auth_user = opt.auth_user;
let auth_password = opt.auth_password;
if (auth_user.is_some() && auth_password.is_none())
|| (auth_user.is_none() && auth_password.is_some())
{
bail!("auth-user and auth-password must be specified together.");
}
let tls_config = match (opt.tls_cert, opt.tls_key) {
(Some(cert), Some(key)) => Some((cert, key)),
(None, None) => None,
_ => bail!("tls-cert and tls-key must be specified together."),
};
let refresh_token_from_file = if let Some(dir) = drive_config.workdir.as_ref() {
read_refresh_token(dir).await.ok()
} else {
None
};
let refresh_token = if opt.refresh_token.is_none()
&& refresh_token_from_file.is_none()
&& atty::is(atty::Stream::Stdout)
{
login(drive_config.clone(), 30).await?
} else {
let token = opt.refresh_token.unwrap_or_default();
if !token.is_empty() && token.split('.').count() < 3 {
bail!("Invalid refresh token value found in `--refresh-token` argument");
}
token
};
let drive = AliyunDrive::new(drive_config, refresh_token).await?;
let mut fs = AliyunDriveFileSystem::new(drive, opt.root, opt.cache_size, opt.cache_ttl)?;
fs.set_no_trash(opt.no_trash)
.set_read_only(opt.read_only)
.set_upload_buffer_size(opt.upload_buffer_size)
.set_skip_upload_same_size(opt.skip_upload_same_size)
.set_prefer_http_download(opt.prefer_http_download);
debug!("aliyundrive file system initialized");
#[cfg(unix)]
let dir_cache = fs.dir_cache.clone();
let mut dav_server_builder = DavHandler::builder()
.filesystem(Box::new(fs))
.locksystem(MemLs::new())
.read_buf_size(opt.read_buffer_size)
.autoindex(opt.auto_index)
.redirect(opt.redirect);
if let Some(prefix) = opt.strip_prefix {
dav_server_builder = dav_server_builder.strip_prefix(prefix);
}
let dav_server = dav_server_builder.build_handler();
debug!(
read_buffer_size = opt.read_buffer_size,
auto_index = opt.auto_index,
"webdav handler initialized"
);
let server = WebDavServer {
host: opt.host,
port: opt.port,
auth_user,
auth_password,
tls_config,
handler: dav_server,
};
#[cfg(not(unix))]
server.serve().await?;
#[cfg(unix)]
{
let signals = Signals::new([SIGHUP])?;
let handle = signals.handle();
let signals_task = tokio::spawn(handle_signals(signals, dir_cache));
server.serve().await?;
// Terminate the signal stream.
handle.close();
signals_task.await?;
}
Ok(())
}
#[cfg(unix)]
async fn handle_signals(mut signals: Signals, dir_cache: Cache) {
while let Some(signal) = signals.next().await {
match signal {
SIGHUP => {
dir_cache.invalidate_all();
info!("directory cache invalidated by SIGHUP");
}
_ => unreachable!(),
}
}
}
async fn login(drive_config: DriveConfig, timeout: u64) -> anyhow::Result<String> {
const SLEEP: u64 = 3;
let scanner = login::QrCodeScanner::new(drive_config).await?;
// 返回二维码内容结果集
let sid = scanner.scan().await?.sid;
// 需要生成二维码的内容
let qrcode_content = format!("https://www.aliyundrive.com/o/oauth/authorize?sid={sid}");
// 打印二维码
qr2term::print_qr(&qrcode_content)?;
info!("Please scan the qrcode to login in {} seconds", timeout);
let loop_count = timeout / SLEEP;
for _i in 0..loop_count {
tokio::time::sleep(tokio::time::Duration::from_secs(SLEEP)).await;
// 模拟轮训查询二维码状态
let query_result = scanner.query(&sid).await?;
if !query_result.is_success() {
continue;
}
let code = query_result.auth_code.unwrap();
let refresh_token = scanner.fetch_refresh_token(&code).await?;
return Ok(refresh_token);
}
bail!("Login failed")
}
fn check_for_update(show_output: bool) -> anyhow::Result<()> {
use self_update::update::UpdateStatus;
#[cfg(unix)]
use std::os::unix::process::CommandExt;
use std::process::Command;
let auth_token = env::var("GITHUB_TOKEN")
.unwrap_or_else(|_| env::var("HOMEBREW_GITHUB_API_TOKEN").unwrap_or_default());
let status = self_update::backends::github::Update::configure()
.repo_owner("messense")
.repo_name("aliyundrive-webdav")
.bin_name("aliyundrive-webdav")
.target(if cfg!(target_os = "macos") {
"apple-darwin"
} else {
self_update::get_target()
})
.auth_token(&auth_token)
.show_output(show_output)
.show_download_progress(true)
.no_confirm(true)
.current_version(cargo_crate_version!())
.build()?
.update_extended()?;
if let UpdateStatus::Updated(ref release) = status {
if let Some(body) = &release.body {
if !body.trim().is_empty() {
info!("aliyundrive-webdav upgraded to {}:\n", release.version);
info!("{}", body);
} else {
info!("aliyundrive-webdav upgraded to {}", release.version);
}
}
} else {
info!("aliyundrive-webdav is up-to-date");
}
if status.updated() {
warn!("Respawning...");
let current_exe = env::current_exe();
let mut command = Command::new(current_exe?);
command.args(env::args().skip(1)).env("NO_SELF_UPGRADE", "");
#[cfg(unix)]
{
let err = command.exec();
bail!(err);
}
#[cfg(windows)]
{
let status = command.spawn().and_then(|mut c| c.wait())?;
bail!("aliyundrive-webdav upgraded");
}
}
Ok(())
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/cache.rs | src/cache.rs | use std::path::Path;
use std::time::Duration;
use moka::future::Cache as MokaCache;
use tracing::debug;
use crate::drive::AliyunFile;
#[derive(Clone)]
pub struct Cache {
inner: MokaCache<String, Vec<AliyunFile>>,
}
impl Cache {
pub fn new(max_capacity: u64, ttl: u64) -> Self {
let inner = MokaCache::builder()
.max_capacity(max_capacity)
.time_to_live(Duration::from_secs(ttl))
.build();
Self { inner }
}
pub fn get(&self, key: &str) -> Option<Vec<AliyunFile>> {
debug!(key = %key, "cache: get");
self.inner.get(key)
}
pub async fn insert(&self, key: String, value: Vec<AliyunFile>) {
debug!(key = %key, "cache: insert");
self.inner.insert(key, value).await;
}
pub async fn invalidate(&self, path: &Path) {
let key = path.to_string_lossy().into_owned();
debug!(path = %path.display(), key = %key, "cache: invalidate");
self.inner.invalidate(&key).await;
}
pub async fn invalidate_parent(&self, path: &Path) {
if let Some(parent) = path.parent() {
self.invalidate(parent).await;
}
}
pub fn invalidate_all(&self) {
debug!("cache: invalidate all");
self.inner.invalidate_all();
}
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/vfs.rs | src/vfs.rs | use std::collections::HashMap;
use std::fmt::{Debug, Formatter};
use std::io::{Cursor, SeekFrom, Write};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use anyhow::Result;
use bytes::{Buf, BufMut, Bytes, BytesMut};
use dashmap::DashMap;
use dav_server::{
davpath::DavPath,
fs::{
DavDirEntry, DavFile, DavFileSystem, DavMetaData, FsError, FsFuture, FsStream, OpenOptions,
ReadDirMeta,
},
};
use futures_util::future::{ready, FutureExt};
use path_slash::PathBufExt;
use tracing::{debug, error, trace, warn};
use zip::write::{FileOptions, ZipWriter};
use crate::{
cache::Cache,
drive::{model::GetFileDownloadUrlResponse, AliyunDrive, AliyunFile, DateTime, FileType},
};
#[derive(Clone)]
pub struct AliyunDriveFileSystem {
drive: AliyunDrive,
pub(crate) dir_cache: Cache,
uploading: Arc<DashMap<String, Vec<AliyunFile>>>,
root: PathBuf,
no_trash: bool,
read_only: bool,
upload_buffer_size: usize,
skip_upload_same_size: bool,
prefer_http_download: bool,
}
impl AliyunDriveFileSystem {
#[allow(clippy::too_many_arguments)]
pub fn new(drive: AliyunDrive, root: String, cache_size: u64, cache_ttl: u64) -> Result<Self> {
let dir_cache = Cache::new(cache_size, cache_ttl);
debug!("dir cache initialized");
let root = if root.starts_with('/') {
PathBuf::from(root)
} else {
Path::new("/").join(root)
};
Ok(Self {
drive,
dir_cache,
uploading: Arc::new(DashMap::new()),
root,
no_trash: false,
read_only: false,
upload_buffer_size: 16 * 1024 * 1024,
skip_upload_same_size: false,
prefer_http_download: false,
})
}
pub fn set_read_only(&mut self, read_only: bool) -> &mut Self {
self.read_only = read_only;
self
}
pub fn set_no_trash(&mut self, no_trash: bool) -> &mut Self {
self.no_trash = no_trash;
self
}
pub fn set_upload_buffer_size(&mut self, upload_buffer_size: usize) -> &mut Self {
self.upload_buffer_size = upload_buffer_size;
self
}
pub fn set_skip_upload_same_size(&mut self, skip_upload_same_size: bool) -> &mut Self {
self.skip_upload_same_size = skip_upload_same_size;
self
}
pub fn set_prefer_http_download(&mut self, prefer_http_download: bool) -> &mut Self {
self.prefer_http_download = prefer_http_download;
self
}
fn find_in_cache(&self, path: &Path) -> Result<Option<AliyunFile>, FsError> {
if let Some(parent) = path.parent() {
let parent_str = parent.to_string_lossy();
let file_name = path
.file_name()
.ok_or(FsError::NotFound)?
.to_string_lossy()
.into_owned();
let file = self.dir_cache.get(&parent_str).and_then(|files| {
for file in &files {
if file.name == file_name {
return Some(file.clone());
}
}
None
});
Ok(file)
} else {
let root = AliyunFile::new_root();
Ok(Some(root))
}
}
async fn get_file(&self, path: PathBuf) -> Result<Option<AliyunFile>, FsError> {
let path_str = path.to_slash_lossy();
let file = self.find_in_cache(&path)?;
if let Some(file) = file {
trace!(path = %path.display(), file_id = %file.id, "file found in cache");
Ok(Some(file))
} else {
trace!(path = %path.display(), "file not found in cache");
if let Ok(Some(file)) = self.drive.get_by_path(&path_str).await {
return Ok(Some(file));
}
// path may contain whitespaces which get_by_path can't handle
// so we try to find it in directory
let parts: Vec<&str> = path_str.split('/').collect();
let parts_len = parts.len();
let filename = parts[parts_len - 1];
let mut prefix = PathBuf::from("/");
for part in &parts[0..parts_len - 1] {
let parent = prefix.join(part);
prefix = parent.clone();
let files = self.read_dir_and_cache(parent).await?;
if let Some(file) = files.iter().find(|f| f.name == filename) {
trace!(path = %path.display(), file_id = %file.id, "file found in cache");
return Ok(Some(file.clone()));
}
}
Ok(None)
}
}
async fn read_dir_and_cache(&self, path: PathBuf) -> Result<Vec<AliyunFile>, FsError> {
let path_str = path.to_slash_lossy();
let parent_file_id = if path_str == "/" {
"root".to_string()
} else {
match self.find_in_cache(&path) {
Ok(Some(file)) => file.id,
_ => match self.drive.get_by_path(&path_str).await {
Ok(Some(file)) => file.id,
Ok(None) => return Err(FsError::NotFound),
Err(err) => {
error!(path = %path_str, error = %err, "get_by_path failed");
return Err(FsError::GeneralFailure);
}
},
}
};
let mut files = if let Some(files) = self.dir_cache.get(&path_str) {
debug!(path = %path_str, "read_dir cache hit");
files
} else {
let res = self
.list_files_and_cache(path_str.to_string(), parent_file_id.clone())
.await;
match res {
Ok(files) => {
debug!(path = %path_str, "read_dir cache miss");
files
}
Err(err) => {
if let Some(req_err) = err.downcast_ref::<reqwest::Error>() {
if matches!(req_err.status(), Some(reqwest::StatusCode::NOT_FOUND)) {
debug!(path = %path_str, "read_dir not found");
return Err(FsError::NotFound);
} else {
error!(path = %path_str, error = %err, "list_files_and_cache failed");
return Err(FsError::GeneralFailure);
}
} else {
error!(path = %path_str, error = %err, "list_files_and_cache failed");
return Err(FsError::GeneralFailure);
}
}
}
};
let uploading_files = self.list_uploading_files(&parent_file_id);
if !uploading_files.is_empty() {
debug!("added {} uploading files", uploading_files.len());
files.extend(uploading_files);
}
Ok(files)
}
fn list_uploading_files(&self, parent_file_id: &str) -> Vec<AliyunFile> {
self.uploading
.get(parent_file_id)
.map(|val_ref| val_ref.value().clone())
.unwrap_or_default()
}
fn remove_uploading_file(&self, parent_file_id: &str, name: &str) {
if let Some(mut files) = self.uploading.get_mut(parent_file_id) {
if let Some(index) = files.iter().position(|x| x.name == name) {
files.swap_remove(index);
}
}
}
async fn list_files_and_cache(
&self,
path_str: String,
parent_file_id: String,
) -> Result<Vec<AliyunFile>> {
let files = self.drive.list_all(&parent_file_id).await?;
self.cache_dir(path_str, files.clone()).await;
Ok(files)
}
async fn cache_dir(&self, dir_path: String, files: Vec<AliyunFile>) {
trace!(path = %dir_path, count = files.len(), "cache dir");
self.dir_cache.insert(dir_path, files).await;
}
fn normalize_dav_path(&self, dav_path: &DavPath) -> PathBuf {
let path = dav_path.as_pathbuf();
if self.root.parent().is_none() || path.starts_with(&self.root) {
return path;
}
let rel_path = dav_path.as_rel_ospath();
if rel_path == Path::new("") {
return self.root.clone();
}
self.root.join(rel_path)
}
}
impl DavFileSystem for AliyunDriveFileSystem {
fn open<'a>(
&'a self,
dav_path: &'a DavPath,
options: OpenOptions,
) -> FsFuture<Box<dyn DavFile>> {
let path = self.normalize_dav_path(dav_path);
let mode = if options.write { "write" } else { "read" };
debug!(path = %path.display(), mode = %mode, "fs: open");
async move {
if options.append {
// Can't support open in write-append mode
error!(path = %path.display(), "unsupported write-append mode");
return Err(FsError::NotImplemented);
}
let parent_path = path.parent().ok_or(FsError::NotFound)?;
let parent_file = self
.get_file(parent_path.to_path_buf())
.await?
.ok_or(FsError::NotFound)?;
let sha1 = options.checksum.and_then(|c| {
if let Some((algo, hash)) = c.split_once(':') {
if algo.eq_ignore_ascii_case("sha1") {
Some(hash.to_string())
} else {
None
}
} else {
None
}
});
let mut dav_file = if let Some(file) = self.get_file(path.clone()).await? {
if options.write && options.create_new {
return Err(FsError::Exists);
}
if options.write && self.read_only {
return Err(FsError::Forbidden);
}
AliyunDavFile::new(
self.clone(),
file,
parent_file.id,
parent_path.to_path_buf(),
options.size.unwrap_or_default(),
sha1,
)
} else if options.write && (options.create || options.create_new) {
if self.read_only {
return Err(FsError::Forbidden);
}
let size = options.size;
let name = dav_path
.file_name()
.ok_or(FsError::GeneralFailure)?
.to_string();
// 忽略 macOS 上的一些特殊文件
if name == ".DS_Store" || name.starts_with("._") {
return Err(FsError::NotFound);
}
let now = SystemTime::now();
let file = AliyunFile {
name,
id: "".to_string(),
r#type: FileType::File,
created_at: DateTime::new(now),
updated_at: DateTime::new(now),
size: size.unwrap_or(0),
url: None,
content_hash: None,
};
let mut uploading = self.uploading.entry(parent_file.id.clone()).or_default();
uploading.push(file.clone());
AliyunDavFile::new(
self.clone(),
file,
parent_file.id,
parent_path.to_path_buf(),
size.unwrap_or(0),
sha1,
)
} else {
return Err(FsError::NotFound);
};
dav_file.http_download = self.prefer_http_download;
Ok(Box::new(dav_file) as Box<dyn DavFile>)
}
.boxed()
}
fn read_dir<'a>(
&'a self,
path: &'a DavPath,
_meta: ReadDirMeta,
) -> FsFuture<FsStream<Box<dyn DavDirEntry>>> {
let path = self.normalize_dav_path(path);
debug!(path = %path.display(), "fs: read_dir");
async move {
let files = self.read_dir_and_cache(path.clone()).await?;
let mut v: Vec<Box<dyn DavDirEntry>> = Vec::with_capacity(files.len());
for file in files {
v.push(Box::new(file));
}
let stream = futures_util::stream::iter(v);
Ok(Box::pin(stream) as FsStream<Box<dyn DavDirEntry>>)
}
.boxed()
}
fn metadata<'a>(&'a self, path: &'a DavPath) -> FsFuture<Box<dyn DavMetaData>> {
let path = self.normalize_dav_path(path);
debug!(path = %path.display(), "fs: metadata");
async move {
let file = self.get_file(path).await?.ok_or(FsError::NotFound)?;
Ok(Box::new(file) as Box<dyn DavMetaData>)
}
.boxed()
}
fn create_dir<'a>(&'a self, dav_path: &'a DavPath) -> FsFuture<()> {
let path = self.normalize_dav_path(dav_path);
debug!(path = %path.display(), "fs: create_dir");
async move {
if self.read_only {
return Err(FsError::Forbidden);
}
let parent_path = path.parent().ok_or(FsError::NotFound)?;
let parent_file = self
.get_file(parent_path.to_path_buf())
.await?
.ok_or(FsError::NotFound)?;
if !matches!(parent_file.r#type, FileType::Folder) {
return Err(FsError::Forbidden);
}
if let Some(name) = path.file_name() {
let name = name.to_string_lossy().into_owned();
self.drive
.create_folder(&parent_file.id, &name)
.await
.map_err(|err| {
error!(path = %path.display(), error = %err, "create folder failed");
FsError::GeneralFailure
})?;
self.dir_cache.invalidate(parent_path).await;
Ok(())
} else {
Err(FsError::Forbidden)
}
}
.boxed()
}
fn remove_dir<'a>(&'a self, dav_path: &'a DavPath) -> FsFuture<()> {
let path = self.normalize_dav_path(dav_path);
debug!(path = %path.display(), "fs: remove_dir");
async move {
if self.read_only {
return Err(FsError::Forbidden);
}
let file = self
.get_file(path.clone())
.await?
.ok_or(FsError::NotFound)?;
if !matches!(file.r#type, FileType::Folder) {
return Err(FsError::Forbidden);
}
self.drive
.remove_file(&file.id, !self.no_trash)
.await
.map_err(|err| {
error!(path = %path.display(), error = %err, "remove directory failed");
FsError::GeneralFailure
})?;
self.dir_cache.invalidate(&path).await;
self.dir_cache.invalidate_parent(&path).await;
Ok(())
}
.boxed()
}
fn remove_file<'a>(&'a self, dav_path: &'a DavPath) -> FsFuture<()> {
let path = self.normalize_dav_path(dav_path);
debug!(path = %path.display(), "fs: remove_file");
async move {
if self.read_only {
return Err(FsError::Forbidden);
}
let file = self
.get_file(path.clone())
.await?
.ok_or(FsError::NotFound)?;
if !matches!(file.r#type, FileType::File) {
return Err(FsError::Forbidden);
}
self.drive
.remove_file(&file.id, !self.no_trash)
.await
.map_err(|err| {
error!(path = %path.display(), error = %err, "remove file failed");
FsError::GeneralFailure
})?;
self.dir_cache.invalidate_parent(&path).await;
Ok(())
}
.boxed()
}
fn copy<'a>(&'a self, from_dav: &'a DavPath, to_dav: &'a DavPath) -> FsFuture<()> {
let from = self.normalize_dav_path(from_dav);
let to = self.normalize_dav_path(to_dav);
debug!(from = %from.display(), to = %to.display(), "fs: copy");
async move {
if self.read_only {
return Err(FsError::Forbidden);
}
let file = self
.get_file(from.clone())
.await?
.ok_or(FsError::NotFound)?;
let to_parent_file = self
.get_file(to.parent().unwrap().to_path_buf())
.await?
.ok_or(FsError::NotFound)?;
self.drive
.copy_file(&file.id, &to_parent_file.id)
.await
.map_err(|err| {
error!(from = %from.display(), to = %to.display(), error = %err, "copy file failed");
FsError::GeneralFailure
})?;
self.dir_cache.invalidate(&to).await;
self.dir_cache.invalidate_parent(&to).await;
Ok(())
}
.boxed()
}
fn rename<'a>(&'a self, from_dav: &'a DavPath, to_dav: &'a DavPath) -> FsFuture<()> {
let from = self.normalize_dav_path(from_dav);
let to = self.normalize_dav_path(to_dav);
debug!(from = %from.display(), to = %to.display(), "fs: rename");
async move {
if self.read_only {
return Err(FsError::Forbidden);
}
let is_dir;
if from.parent() == to.parent() {
// rename
if let Some(name) = to.file_name() {
let file = self
.get_file(from.clone())
.await?
.ok_or(FsError::NotFound)?;
is_dir = matches!(file.r#type, FileType::Folder);
let name = name.to_string_lossy().into_owned();
self.drive
.rename_file(&file.id, &name)
.await
.map_err(|err| {
error!(from = %from.display(), to = %to.display(), error = %err, "rename file failed");
FsError::GeneralFailure
})?;
} else {
return Err(FsError::Forbidden);
}
} else {
// move
let file = self
.get_file(from.clone())
.await?
.ok_or(FsError::NotFound)?;
is_dir = matches!(file.r#type, FileType::Folder);
let to_parent_file = self
.get_file(to.parent().unwrap().to_path_buf())
.await?
.ok_or(FsError::NotFound)?;
let new_name = to_dav.file_name();
self.drive
.move_file(&file.id, &to_parent_file.id, new_name)
.await
.map_err(|err| {
error!(from = %from.display(), to = %to.display(), error = %err, "move file failed");
FsError::GeneralFailure
})?;
}
if is_dir {
self.dir_cache.invalidate(&from).await;
}
self.dir_cache.invalidate_parent(&from).await;
self.dir_cache.invalidate_parent(&to).await;
Ok(())
}
.boxed()
}
fn get_quota(&self) -> FsFuture<(u64, Option<u64>)> {
debug!("fs: get_quota");
async move {
let (used, total) = self.drive.get_quota().await.map_err(|err| {
error!(error = %err, "get quota failed");
FsError::GeneralFailure
})?;
Ok((used, Some(total)))
}
.boxed()
}
fn have_props<'a>(
&'a self,
_path: &'a DavPath,
) -> std::pin::Pin<Box<dyn futures_util::Future<Output = bool> + Send + 'a>> {
Box::pin(ready(true))
}
fn get_prop(&self, dav_path: &DavPath, prop: dav_server::fs::DavProp) -> FsFuture<Vec<u8>> {
let path = self.normalize_dav_path(dav_path);
let prop_name = match prop.prefix.as_ref() {
Some(prefix) => format!("{}:{}", prefix, prop.name),
None => prop.name.to_string(),
};
debug!(path = %path.display(), prop = %prop_name, "fs: get_prop");
async move {
if prop.namespace.as_deref() == Some("http://owncloud.org/ns")
&& prop.name == "checksums"
{
let file = self.get_file(path).await?.ok_or(FsError::NotFound)?;
if let Some(sha1) = file.content_hash {
let xml = format!(
r#"<?xml version="1.0"?>
<oc:checksums xmlns:d="DAV:" xmlns:nc="http://nextcloud.org/ns" xmlns:oc="http://owncloud.org/ns">
<oc:checksum>sha1:{}</oc:checksum>
</oc:checksums>
"#,
sha1
);
return Ok(xml.into_bytes());
}
}
Err(FsError::NotImplemented)
}
.boxed()
}
}
#[derive(Debug, Clone)]
struct UploadState {
size: u64,
buffer: BytesMut,
chunk_count: u64,
chunk: u64,
upload_id: String,
upload_urls: Vec<String>,
sha1: Option<String>,
}
impl Default for UploadState {
fn default() -> Self {
Self {
size: 0,
buffer: BytesMut::new(),
chunk_count: 0,
chunk: 1,
upload_id: String::new(),
upload_urls: Vec::new(),
sha1: None,
}
}
}
struct AliyunDavFile {
fs: AliyunDriveFileSystem,
file: AliyunFile,
parent_file_id: String,
parent_dir: PathBuf,
current_pos: u64,
upload_state: UploadState,
http_download: bool,
}
impl Debug for AliyunDavFile {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("AliyunDavFile")
.field("file", &self.file)
.field("parent_file_id", &self.parent_file_id)
.field("current_pos", &self.current_pos)
.field("upload_state", &self.upload_state)
.finish()
}
}
impl AliyunDavFile {
fn new(
fs: AliyunDriveFileSystem,
file: AliyunFile,
parent_file_id: String,
parent_dir: PathBuf,
size: u64,
sha1: Option<String>,
) -> Self {
Self {
fs,
file,
parent_file_id,
parent_dir,
current_pos: 0,
upload_state: UploadState {
size,
sha1,
..Default::default()
},
http_download: false,
}
}
async fn get_download_url(&self) -> Result<GetFileDownloadUrlResponse, FsError> {
self.fs.drive.get_download_url(&self.file.id).await.map_err(|err| {
error!(file_id = %self.file.id, file_name = %self.file.name, error = %err, "get download url failed");
FsError::GeneralFailure
})
}
async fn prepare_for_upload(&mut self) -> Result<bool, FsError> {
if self.upload_state.chunk_count == 0 {
let size = self.upload_state.size;
debug!(file_name = %self.file.name, size = size, "prepare for upload");
if !self.file.id.is_empty() {
if let Some(content_hash) = self.file.content_hash.as_ref() {
if let Some(sha1) = self.upload_state.sha1.as_ref() {
if content_hash.eq_ignore_ascii_case(sha1) {
debug!(file_name = %self.file.name, sha1 = %sha1, "skip uploading same content hash file");
return Ok(false);
}
}
}
if self.fs.skip_upload_same_size && self.file.size == size {
debug!(file_name = %self.file.name, size = size, "skip uploading same size file");
return Ok(false);
}
// existing file, delete before upload
if let Err(err) = self
.fs
.drive
.remove_file(&self.file.id, !self.fs.no_trash)
.await
{
error!(file_name = %self.file.name, error = %err, "delete file before upload failed");
}
}
// TODO: create parent folders?
let upload_buffer_size = self.fs.upload_buffer_size as u64;
let chunk_count =
size / upload_buffer_size + if size % upload_buffer_size != 0 { 1 } else { 0 };
self.upload_state.chunk_count = chunk_count;
let res = self
.fs
.drive
.create_file_with_proof(&self.file.name, &self.parent_file_id, size, chunk_count)
.await
.map_err(|err| {
error!(file_name = %self.file.name, error = %err, "create file with proof failed");
FsError::GeneralFailure
})?;
self.file.id = res.file_id.clone();
let Some(upload_id) = res.upload_id else {
error!("create file with proof failed: missing upload_id");
return Err(FsError::GeneralFailure);
};
self.upload_state.upload_id = upload_id;
let upload_urls: Vec<_> = res
.part_info_list
.into_iter()
.map(|x| x.upload_url)
.collect();
if upload_urls.is_empty() {
error!(file_id = %self.file.id, file_name = %self.file.name, "empty upload urls");
return Err(FsError::GeneralFailure);
}
self.upload_state.upload_urls = upload_urls;
}
Ok(true)
}
async fn maybe_upload_chunk(&mut self, remaining: bool) -> Result<(), FsError> {
let chunk_size = if remaining {
// last chunk size maybe less than upload_buffer_size
self.upload_state.buffer.remaining()
} else {
self.fs.upload_buffer_size
};
let current_chunk = self.upload_state.chunk;
if chunk_size > 0
&& self.upload_state.buffer.remaining() >= chunk_size
&& current_chunk <= self.upload_state.chunk_count
{
let chunk_data = self.upload_state.buffer.split_to(chunk_size);
debug!(
file_id = %self.file.id,
file_name = %self.file.name,
size = self.upload_state.size,
"upload part {}/{}",
current_chunk,
self.upload_state.chunk_count
);
let mut upload_url = &self.upload_state.upload_urls[current_chunk as usize - 1];
let upload_data = chunk_data.freeze();
let mut res = self.fs.drive.upload(upload_url, upload_data.clone()).await;
if let Err(ref err) = res {
if err.to_string().contains("expired") {
warn!(
file_id = %self.file.id,
file_name = %self.file.name,
upload_url = %upload_url,
"upload url expired"
);
if let Ok(part_info_list) = self
.fs
.drive
.get_upload_url(
&self.file.id,
&self.upload_state.upload_id,
self.upload_state.chunk_count,
)
.await
{
let upload_urls: Vec<_> =
part_info_list.into_iter().map(|x| x.upload_url).collect();
self.upload_state.upload_urls = upload_urls;
upload_url = &self.upload_state.upload_urls[current_chunk as usize - 1];
// retry upload
res = self.fs.drive.upload(upload_url, upload_data).await;
}
}
res.map_err(|err| {
error!(
file_id = %self.file.id,
file_name = %self.file.name,
upload_url = %upload_url,
size = self.upload_state.size,
error = %err,
"upload file chunk {} failed",
current_chunk
);
FsError::GeneralFailure
})?;
}
self.upload_state.chunk += 1;
}
Ok(())
}
}
impl DavFile for AliyunDavFile {
fn metadata(&'_ mut self) -> FsFuture<'_, Box<dyn DavMetaData>> {
debug!(file_id = %self.file.id, file_name = %self.file.name, "file: metadata");
async move {
// 阿里云盘接口没有 .livp 格式文件下载地址
// 我们用 heic 和 mov 文件生成 zip 文件还原 .livp 文件
// 故需要重新计算文件大小
if self.file.name.ends_with(".livp") {
if let Some(file) = self
.fs
.drive
.get_file(&self.file.id)
.await
.map_err(|_| FsError::GeneralFailure)?
{
Ok(Box::new(file) as Box<dyn DavMetaData>)
} else {
Err(FsError::NotFound)
}
} else {
let file = self.file.clone();
Ok(Box::new(file) as Box<dyn DavMetaData>)
}
}
.boxed()
}
fn redirect_url(&mut self) -> FsFuture<Option<String>> {
debug!(file_id = %self.file.id, file_name = %self.file.name, "file: redirect_url");
async move {
if self.file.id.is_empty() {
return Err(FsError::NotFound);
}
let download_url = self.file.url.take();
let download_url = if let Some(mut url) = download_url {
if is_url_expired(&url) {
debug!(url = %url, "download url expired");
url = self.get_download_url().await?.url;
}
url
} else {
let res = self.get_download_url().await?;
res.url
};
if !download_url.is_empty() {
self.file.url = Some(download_url.clone());
if !download_url.contains("x-oss-additional-headers=referer") {
return Ok(Some(download_url));
}
}
Ok(None)
}
.boxed()
}
fn write_buf(&'_ mut self, buf: Box<dyn Buf + Send>) -> FsFuture<'_, ()> {
debug!(file_id = %self.file.id, file_name = %self.file.name, "file: write_buf");
async move {
if self.prepare_for_upload().await? {
self.upload_state.buffer.put(buf);
self.maybe_upload_chunk(false).await?;
}
Ok(())
}
.boxed()
}
fn write_bytes(&mut self, buf: Bytes) -> FsFuture<()> {
debug!(file_id = %self.file.id, file_name = %self.file.name, size = buf.len(), "file: write_bytes");
async move {
if self.prepare_for_upload().await? {
self.upload_state.buffer.extend_from_slice(&buf);
self.maybe_upload_chunk(false).await?;
}
Ok(())
}
.boxed()
}
fn read_bytes(&mut self, count: usize) -> FsFuture<Bytes> {
debug!(
file_id = %self.file.id,
file_name = %self.file.name,
pos = self.current_pos,
count = count,
size = self.file.size,
"file: read_bytes",
);
async move {
if self.file.id.is_empty() {
// upload in progress
return Err(FsError::NotFound);
}
let download_url = self.file.url.take();
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | true |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/drive/model.rs | src/drive/model.rs | use std::collections::HashMap;
use std::ops;
use std::time::SystemTime;
use ::time::{format_description::well_known::Rfc3339, OffsetDateTime};
use serde::{Deserialize, Deserializer, Serialize};
#[derive(Debug, Clone, Deserialize)]
pub struct RefreshTokenResponse {
pub access_token: String,
pub refresh_token: String,
pub expires_in: u64,
pub token_type: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct GetDriveInfoResponse {
pub default_drive_id: String,
pub resource_drive_id: Option<String>,
pub backup_drive_id: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct ListFileRequest<'a> {
pub drive_id: &'a str,
pub parent_file_id: &'a str,
pub limit: u64,
pub fields: &'a str,
pub order_by: &'a str,
pub order_direction: &'a str,
pub marker: Option<&'a str>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ListFileResponse {
pub items: Vec<ListFileItem>,
pub next_marker: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct ListFileItem {
pub name: String,
pub category: Option<String>,
#[serde(rename = "file_id")]
pub id: String,
pub r#type: FileType,
pub created_at: DateTime,
pub updated_at: DateTime,
pub size: Option<u64>,
pub url: Option<String>,
pub content_hash: Option<String>,
}
#[derive(Debug, Clone, Serialize)]
pub struct GetFileByPathRequest<'a> {
pub drive_id: &'a str,
pub file_path: &'a str,
}
#[derive(Debug, Clone, Serialize)]
pub struct GetFileRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
}
#[derive(Debug, Clone, Deserialize)]
pub struct StreamInfo {
pub size: u64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct GetFileResponse {
pub name: String,
pub file_extension: String,
#[serde(rename = "file_id")]
pub id: String,
pub r#type: FileType,
pub created_at: DateTime,
pub updated_at: DateTime,
#[serde(default)]
pub size: u64,
pub streams_info: HashMap<String, StreamInfo>,
}
impl From<GetFileResponse> for AliyunFile {
fn from(res: GetFileResponse) -> AliyunFile {
let size = if res.file_extension != "livp" || res.streams_info.is_empty() {
res.size
} else {
let name = res.name.replace(".livp", "");
let mut zip_size = 0;
for (typ, info) in &res.streams_info {
let name_len = format!("{}.{}", name, typ).len() as u64;
// local file header size
zip_size += 30;
zip_size += name_len;
// file size
zip_size += info.size;
// central directory entry size
zip_size += 46;
zip_size += name_len;
}
// End of central directory size
zip_size += 22;
zip_size
};
AliyunFile {
name: res.name,
id: res.id,
r#type: res.r#type,
created_at: res.created_at,
updated_at: res.updated_at,
size,
url: None,
content_hash: None,
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct GetFileDownloadUrlRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
pub expire_sec: usize,
}
#[derive(Debug, Clone, Deserialize)]
pub struct GetFileDownloadUrlResponse {
pub url: String,
#[serde(default)]
pub streams_url: HashMap<String, String>,
pub expiration: String,
pub method: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct TrashRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
}
#[derive(Debug, Clone, Serialize)]
pub struct DeleteFileRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
}
#[derive(Debug, Clone, Serialize)]
pub struct CreateFolderRequest<'a> {
pub check_name_mode: &'a str,
pub drive_id: &'a str,
pub name: &'a str,
pub parent_file_id: &'a str,
pub r#type: &'a str,
}
#[derive(Debug, Clone, Serialize)]
pub struct RenameFileRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
pub name: &'a str,
}
#[derive(Debug, Clone, Serialize)]
pub struct MoveFileRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
pub to_parent_file_id: &'a str,
pub new_name: Option<&'a str>,
}
#[derive(Debug, Clone, Serialize)]
pub struct CopyFileRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
pub to_parent_file_id: &'a str,
pub auto_rename: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct UploadPartInfo {
pub part_number: u64,
#[serde(skip_serializing_if = "String::is_empty")]
pub upload_url: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct CreateFileWithProofRequest<'a> {
pub check_name_mode: &'a str,
pub content_hash: &'a str,
pub content_hash_name: &'a str,
pub drive_id: &'a str,
pub name: &'a str,
pub parent_file_id: &'a str,
pub proof_code: &'a str,
pub proof_version: &'a str,
pub size: u64,
pub part_info_list: Vec<UploadPartInfo>,
pub r#type: &'a str,
}
#[derive(Debug, Clone, Deserialize)]
pub struct CreateFileWithProofResponse {
#[serde(default)]
pub part_info_list: Vec<UploadPartInfo>,
pub file_id: String,
pub upload_id: Option<String>,
pub file_name: String,
}
#[derive(Debug, Clone, Serialize)]
pub struct CompleteUploadRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
pub upload_id: &'a str,
}
#[derive(Debug, Clone, Serialize)]
pub struct GetUploadUrlRequest<'a> {
pub drive_id: &'a str,
pub file_id: &'a str,
pub upload_id: &'a str,
pub part_info_list: Vec<UploadPartInfo>,
}
#[derive(Debug, Clone, Deserialize)]
pub struct SpaceInfo {
pub total_size: u64,
pub used_size: u64,
}
#[derive(Debug, Clone, Deserialize)]
pub struct GetSpaceInfoResponse {
pub personal_space_info: SpaceInfo,
}
#[derive(Debug, Clone)]
pub struct DateTime(SystemTime);
impl DateTime {
pub fn new(st: SystemTime) -> Self {
Self(st)
}
}
impl<'a> Deserialize<'a> for DateTime {
fn deserialize<D: Deserializer<'a>>(deserializer: D) -> Result<Self, D::Error> {
let dt = OffsetDateTime::parse(<&str>::deserialize(deserializer)?, &Rfc3339)
.map_err(serde::de::Error::custom)?;
Ok(Self(dt.into()))
}
}
impl ops::Deref for DateTime {
type Target = SystemTime;
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[derive(Debug, Clone, Copy, Deserialize)]
#[serde(rename_all = "lowercase")]
pub enum FileType {
Folder,
File,
}
#[derive(Debug, Clone, Deserialize)]
pub struct AliyunFile {
pub name: String,
#[serde(rename = "file_id")]
pub id: String,
pub r#type: FileType,
pub created_at: DateTime,
pub updated_at: DateTime,
#[serde(default)]
pub size: u64,
pub url: Option<String>,
pub content_hash: Option<String>,
}
impl AliyunFile {
pub fn new_root() -> Self {
let now = SystemTime::now();
Self {
name: "/".to_string(),
id: "root".to_string(),
r#type: FileType::Folder,
created_at: DateTime(now),
updated_at: DateTime(now),
size: 0,
url: None,
content_hash: None,
}
}
}
impl From<ListFileItem> for AliyunFile {
fn from(f: ListFileItem) -> Self {
Self {
name: f.name,
id: f.id,
r#type: f.r#type,
created_at: f.created_at,
updated_at: f.updated_at,
size: f.size.unwrap_or_default(),
// 文件列表接口返回的图片下载地址经常是有问题的, 不使用它
url: if matches!(f.category.as_deref(), Some("image")) {
None
} else {
f.url
},
content_hash: f.content_hash,
}
}
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/drive/mod.rs | src/drive/mod.rs | use std::collections::HashMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use anyhow::{bail, Context, Result};
use bytes::Bytes;
use clap::ValueEnum;
use dav_server::fs::{DavDirEntry, DavMetaData, FsFuture, FsResult};
use futures_util::future::FutureExt;
use reqwest::{
header::{HeaderMap, HeaderValue},
IntoUrl, StatusCode,
};
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
use reqwest_retry::{policies::ExponentialBackoff, RetryTransientMiddleware};
use serde::de::DeserializeOwned;
use serde::Serialize;
use tokio::{
sync::{oneshot, RwLock},
time,
};
use tracing::{debug, error, info, warn};
pub mod model;
use model::*;
pub use model::{AliyunFile, DateTime, FileType};
const ORIGIN: &str = "https://www.aliyundrive.com";
const REFERER: &str = "https://www.aliyundrive.com/";
const UA: &str = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.83 Safari/537.36";
/// Aliyundrive drive type
#[derive(Debug, Clone, Copy, ValueEnum)]
pub enum DriveType {
/// Resource drive
Resource,
/// Backup drive
Backup,
/// Default drive
Default,
}
#[derive(Debug, Clone)]
pub struct DriveConfig {
pub api_base_url: String,
pub refresh_token_host: String,
pub workdir: Option<PathBuf>,
pub client_id: Option<String>,
pub client_secret: Option<String>,
pub drive_type: Option<DriveType>,
}
#[derive(Debug, Clone)]
struct Credentials {
refresh_token: String,
access_token: Option<String>,
}
#[derive(Debug, Clone)]
pub struct AliyunDrive {
config: DriveConfig,
client: ClientWithMiddleware,
credentials: Arc<RwLock<Credentials>>,
drive_id: Option<String>,
}
impl AliyunDrive {
pub async fn new(config: DriveConfig, refresh_token: String) -> Result<Self> {
let refresh_token_is_empty = refresh_token.is_empty();
let credentials = Credentials {
refresh_token,
access_token: None,
};
let mut headers = HeaderMap::new();
headers.insert("Origin", HeaderValue::from_static(ORIGIN));
headers.insert("Referer", HeaderValue::from_static(REFERER));
if let Ok(canary_env) = std::env::var("ALIYUNDRIVE_CANARY") {
// 灰度环境:gray
headers.insert("X-Canary", HeaderValue::from_str(&canary_env)?);
}
let retry_policy = ExponentialBackoff::builder()
.backoff_exponent(2)
.retry_bounds(Duration::from_millis(100), Duration::from_secs(5))
.build_with_max_retries(3);
let client = reqwest::Client::builder()
.user_agent(UA)
.default_headers(headers)
// OSS closes idle connections after 60 seconds,
// so we can close idle connections ahead of time to prevent re-using them.
// See also https://github.com/hyperium/hyper/issues/2136
.pool_idle_timeout(Duration::from_secs(50))
.connect_timeout(Duration::from_secs(10))
.timeout(Duration::from_secs(30))
.build()?;
let client = ClientBuilder::new(client)
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
.build();
let drive_type = config.drive_type.clone();
let mut drive = Self {
config,
client,
credentials: Arc::new(RwLock::new(credentials)),
drive_id: None,
};
let (tx, rx) = oneshot::channel();
// schedule update token task
let refresh_token_from_file = if let Some(dir) = drive.config.workdir.as_ref() {
read_refresh_token(dir).await.ok()
} else {
None
};
if refresh_token_is_empty && refresh_token_from_file.is_none() {
bail!("No refresh token provided! \n📝 Please specify refresh token from `--refresh-token` CLI option.");
}
let client = drive.clone();
tokio::spawn(async move {
let mut delay_seconds = 7000;
match client
.do_refresh_token_with_retry(refresh_token_from_file)
.await
{
Ok(res) => {
// token usually expires in 7200s, refresh earlier
delay_seconds = res.expires_in - 200;
if tx.send(res.access_token).is_err() {
error!("send access_token failed");
}
}
Err(err) => {
error!("refresh token failed: {}", err);
tx.send(String::new()).unwrap();
}
}
loop {
time::sleep(time::Duration::from_secs(delay_seconds)).await;
if let Err(err) = client.do_refresh_token_with_retry(None).await {
error!("refresh token failed: {}", err);
}
}
});
let access_token = rx.await?;
if access_token.is_empty() {
bail!("get access_token failed");
}
let drive_type_str = match drive_type {
Some(DriveType::Resource) => "resource",
Some(DriveType::Backup) => "backup",
Some(DriveType::Default) | None => "default",
};
let drive_id = drive
.get_drive_id(drive_type)
.await
.context("get drive id failed")?;
info!(drive_id = %drive_id, "found {} drive", drive_type_str);
drive.drive_id = Some(drive_id);
Ok(drive)
}
async fn save_refresh_token(&self, refresh_token: &str) -> Result<()> {
if let Some(dir) = self.config.workdir.as_ref() {
tokio::fs::create_dir_all(dir).await?;
let refresh_token_file = dir.join("refresh_token");
tokio::fs::write(refresh_token_file, refresh_token).await?;
}
Ok(())
}
async fn do_refresh_token(&self, refresh_token: &str) -> Result<RefreshTokenResponse> {
let mut data = HashMap::new();
data.insert("refresh_token", refresh_token);
data.insert("grant_type", "refresh_token");
if let Some(client_id) = self.config.client_id.as_ref() {
data.insert("client_id", client_id);
}
if let Some(client_secret) = self.config.client_secret.as_ref() {
data.insert("client_secret", client_secret);
}
let res = self
.client
.post(format!(
"{}/oauth/access_token",
&self.config.refresh_token_host
))
.json(&data)
.send()
.await?;
match res.error_for_status_ref() {
Ok(_) => {
let res = res.json::<RefreshTokenResponse>().await?;
info!("refresh token succeed");
debug!(
refresh_token = %res.refresh_token,
"new refresh token"
);
Ok(res)
}
Err(err) => {
let msg = res.text().await?;
let context = format!("{}: {}", err, msg);
Err(err).context(context)
}
}
}
async fn do_refresh_token_with_retry(
&self,
refresh_token_from_file: Option<String>,
) -> Result<RefreshTokenResponse> {
let mut last_err = None;
let mut refresh_token = self.refresh_token().await;
for _ in 0..10 {
match self.do_refresh_token(&refresh_token).await {
Ok(res) => {
let mut cred = self.credentials.write().await;
cred.refresh_token = res.refresh_token.clone();
cred.access_token = Some(res.access_token.clone());
if let Err(err) = self.save_refresh_token(&res.refresh_token).await {
error!(error = %err, "save refresh token failed");
}
return Ok(res);
}
Err(err) => {
let mut should_warn = true;
let mut should_retry = match err.downcast_ref::<reqwest::Error>() {
Some(e) => {
e.is_connect()
|| e.is_timeout()
|| matches!(e.status(), Some(StatusCode::TOO_MANY_REQUESTS))
}
None => false,
};
// retry if command line refresh_token is invalid but we also have
// refresh_token from file
if let Some(refresh_token_from_file) = refresh_token_from_file.as_ref() {
if !should_retry && &refresh_token != refresh_token_from_file {
refresh_token = refresh_token_from_file.trim().to_string();
should_retry = true;
// don't warn if we are gonna try refresh_token from file
should_warn = false;
}
}
if should_retry {
if should_warn {
warn!(error = %err, "refresh token failed, will wait and retry");
}
last_err = Some(err);
time::sleep(Duration::from_secs(1)).await;
continue;
} else {
last_err = Some(err);
break;
}
}
}
}
Err(last_err.unwrap())
}
async fn refresh_token(&self) -> String {
let cred = self.credentials.read().await;
cred.refresh_token.clone()
}
async fn access_token(&self) -> Result<String> {
let cred = self.credentials.read().await;
cred.access_token.clone().context("missing access_token")
}
fn drive_id(&self) -> Result<&str> {
self.drive_id.as_deref().context("missing drive_id")
}
async fn request<T, U>(&self, url: String, req: &T) -> Result<Option<U>>
where
T: Serialize + ?Sized,
U: DeserializeOwned,
{
let mut access_token = self.access_token().await?;
let url = reqwest::Url::parse(&url)?;
let res = self
.client
.post(url.clone())
.bearer_auth(&access_token)
.json(&req)
.send()
.await?;
match res.error_for_status_ref() {
Ok(_) => {
if res.status() == StatusCode::NO_CONTENT {
return Ok(None);
}
// let res = res.text().await?;
// println!("{}: {}", url, res);
// let res = serde_json::from_str(&res)?;
let res = res.json::<U>().await?;
Ok(Some(res))
}
Err(err) => {
let err_msg = res.text().await?;
debug!(error = %err_msg, url = %url, "request failed");
match err.status() {
Some(
status_code
@
// 4xx
(StatusCode::UNAUTHORIZED
| StatusCode::REQUEST_TIMEOUT
| StatusCode::TOO_MANY_REQUESTS
// 5xx
| StatusCode::INTERNAL_SERVER_ERROR
| StatusCode::BAD_GATEWAY
| StatusCode::SERVICE_UNAVAILABLE
| StatusCode::GATEWAY_TIMEOUT),
) => {
if status_code == StatusCode::UNAUTHORIZED {
// refresh token and retry
let token_res = self.do_refresh_token_with_retry(None).await?;
access_token = token_res.access_token;
} else {
// wait for a while and retry
time::sleep(Duration::from_secs(1)).await;
}
let res = self
.client
.post(url)
.bearer_auth(&access_token)
.json(&req)
.send()
.await?
.error_for_status()?;
if res.status() == StatusCode::NO_CONTENT {
return Ok(None);
}
let res = res.json::<U>().await?;
Ok(Some(res))
}
_ => Err(err.into()),
}
}
}
}
pub async fn get_drive_id(&self, drive_type: Option<DriveType>) -> Result<String> {
let req = HashMap::<String, String>::new();
let res: GetDriveInfoResponse = self
.request(
format!("{}/adrive/v1.0/user/getDriveInfo", self.config.api_base_url),
&req,
)
.await
.and_then(|res| res.context("expect response"))?;
let drive_id = match drive_type {
Some(DriveType::Resource) => res.resource_drive_id.unwrap_or_else(|| {
warn!("resource drive not found, use default drive instead");
res.default_drive_id
}),
Some(DriveType::Backup) => res.backup_drive_id.unwrap_or_else(|| {
warn!("backup drive not found, use default drive instead");
res.default_drive_id
}),
Some(DriveType::Default) | None => res.default_drive_id,
};
Ok(drive_id)
}
pub async fn get_file(&self, file_id: &str) -> Result<Option<AliyunFile>> {
let drive_id = self.drive_id()?;
debug!(drive_id = %drive_id, file_id = %file_id, "get file");
let req = GetFileRequest { drive_id, file_id };
let res: Result<GetFileResponse> = self
.request(
format!("{}/adrive/v1.0/openFile/get", self.config.api_base_url),
&req,
)
.await
.and_then(|res| res.context("expect response"));
match res {
Ok(file) => Ok(Some(file.into())),
Err(err) => {
if let Some(req_err) = err.downcast_ref::<reqwest::Error>() {
if matches!(req_err.status(), Some(StatusCode::NOT_FOUND)) {
Ok(None)
} else {
Err(err)
}
} else {
Err(err)
}
}
}
}
pub async fn get_by_path(&self, path: &str) -> Result<Option<AliyunFile>> {
let drive_id = self.drive_id()?;
debug!(drive_id = %drive_id, path = %path, "get file by path");
if path == "/" || path.is_empty() {
return Ok(Some(AliyunFile::new_root()));
}
let req = GetFileByPathRequest {
drive_id,
file_path: path,
};
let res: Result<AliyunFile> = self
.request(
format!(
"{}/adrive/v1.0/openFile/get_by_path",
self.config.api_base_url
),
&req,
)
.await
.and_then(|res| res.context("expect response"));
match res {
Ok(file) => Ok(Some(file)),
Err(_) => Ok(None),
}
}
pub async fn list_all(&self, parent_file_id: &str) -> Result<Vec<AliyunFile>> {
let mut files = Vec::new();
let mut marker = None;
loop {
let res = self.list(parent_file_id, marker.as_deref()).await?;
files.extend(res.items.into_iter().map(|f| f.into()));
if res.next_marker.is_empty() {
break;
}
marker = Some(res.next_marker);
}
Ok(files)
}
pub async fn list(
&self,
parent_file_id: &str,
marker: Option<&str>,
) -> Result<ListFileResponse> {
let drive_id = self.drive_id()?;
debug!(drive_id = %drive_id, parent_file_id = %parent_file_id, marker = ?marker, "list file");
let req = ListFileRequest {
drive_id,
parent_file_id,
limit: 200,
fields: "*",
order_by: "updated_at",
order_direction: "DESC",
marker,
};
self.request(
format!("{}/adrive/v1.0/openFile/list", self.config.api_base_url),
&req,
)
.await
.and_then(|res| res.context("expect response"))
}
pub async fn download<U: IntoUrl>(&self, url: U, range: Option<(u64, usize)>) -> Result<Bytes> {
use reqwest::header::RANGE;
let url = url.into_url()?;
let res = if let Some((start_pos, size)) = range {
let end_pos = start_pos + size as u64 - 1;
debug!(url = %url, start = start_pos, end = end_pos, "download file");
let range = format!("bytes={}-{}", start_pos, end_pos);
self.client
.get(url)
.header(RANGE, range)
.send()
.await?
.error_for_status()?
} else {
debug!(url = %url, "download file");
self.client.get(url).send().await?.error_for_status()?
};
Ok(res.bytes().await?)
}
pub async fn get_download_url(&self, file_id: &str) -> Result<GetFileDownloadUrlResponse> {
debug!(file_id = %file_id, "get download url");
let req = GetFileDownloadUrlRequest {
drive_id: self.drive_id()?,
file_id,
expire_sec: 14400, // 4 hours
};
let res: GetFileDownloadUrlResponse = self
.request(
format!(
"{}/adrive/v1.0/openFile/getDownloadUrl",
self.config.api_base_url
),
&req,
)
.await?
.context("expect response")?;
Ok(res)
}
async fn trash(&self, file_id: &str) -> Result<()> {
debug!(file_id = %file_id, "trash file");
let req = TrashRequest {
drive_id: self.drive_id()?,
file_id,
};
let res: Result<Option<serde::de::IgnoredAny>> = self
.request(
format!(
"{}/adrive/v1.0/openFile/recyclebin/trash",
self.config.api_base_url
),
&req,
)
.await;
if let Err(err) = res {
if let Some(req_err) = err.downcast_ref::<reqwest::Error>() {
// Ignore 404 and 400 status codes
if !matches!(
req_err.status(),
Some(StatusCode::NOT_FOUND | StatusCode::BAD_REQUEST)
) {
return Err(err);
}
}
}
Ok(())
}
async fn delete_file(&self, file_id: &str) -> Result<()> {
debug!(file_id = %file_id, "delete file");
let req = TrashRequest {
drive_id: self.drive_id()?,
file_id,
};
let res: Result<Option<serde::de::IgnoredAny>> = self
.request(
format!("{}/adrive/v1.0/openFile/delete", self.config.api_base_url),
&req,
)
.await;
if let Err(err) = res {
if let Some(req_err) = err.downcast_ref::<reqwest::Error>() {
// Ignore 404 and 400 status codes
if !matches!(
req_err.status(),
Some(StatusCode::NOT_FOUND | StatusCode::BAD_REQUEST)
) {
return Err(err);
}
}
}
Ok(())
}
pub async fn remove_file(&self, file_id: &str, trash: bool) -> Result<()> {
if trash {
self.trash(file_id).await?;
} else {
self.delete_file(file_id).await?;
}
Ok(())
}
pub async fn create_folder(&self, parent_file_id: &str, name: &str) -> Result<()> {
debug!(parent_file_id = %parent_file_id, name = %name, "create folder");
let req = CreateFolderRequest {
check_name_mode: "refuse",
drive_id: self.drive_id()?,
name,
parent_file_id,
r#type: "folder",
};
let _res: Option<serde::de::IgnoredAny> = self
.request(
format!("{}/adrive/v1.0/openFile/create", self.config.api_base_url),
&req,
)
.await?;
Ok(())
}
pub async fn rename_file(&self, file_id: &str, name: &str) -> Result<()> {
debug!(file_id = %file_id, name = %name, "rename file");
let req = RenameFileRequest {
drive_id: self.drive_id()?,
file_id,
name,
};
let _res: Option<serde::de::IgnoredAny> = self
.request(
format!("{}/adrive/v1.0/openFile/update", self.config.api_base_url),
&req,
)
.await?;
Ok(())
}
pub async fn move_file(
&self,
file_id: &str,
to_parent_file_id: &str,
new_name: Option<&str>,
) -> Result<()> {
debug!(file_id = %file_id, to_parent_file_id = %to_parent_file_id, "move file");
let drive_id = self.drive_id()?;
let req = MoveFileRequest {
drive_id,
file_id,
to_parent_file_id,
new_name,
};
let _res: Option<serde::de::IgnoredAny> = self
.request(
format!("{}/adrive/v1.0/openFile/move", self.config.api_base_url),
&req,
)
.await?;
Ok(())
}
pub async fn copy_file(&self, file_id: &str, to_parent_file_id: &str) -> Result<()> {
debug!(file_id = %file_id, to_parent_file_id = %to_parent_file_id, "copy file");
let drive_id = self.drive_id()?;
let req = CopyFileRequest {
drive_id,
file_id,
to_parent_file_id,
auto_rename: false,
};
let _res: Option<serde::de::IgnoredAny> = self
.request(
format!("{}/adrive/v1.0/openFile/copy", self.config.api_base_url),
&req,
)
.await?;
Ok(())
}
pub async fn create_file_with_proof(
&self,
name: &str,
parent_file_id: &str,
size: u64,
chunk_count: u64,
) -> Result<CreateFileWithProofResponse> {
debug!(name = %name, parent_file_id = %parent_file_id, size = size, "create file with proof");
let drive_id = self.drive_id()?;
let part_info_list = (1..=chunk_count)
.map(|part_number| UploadPartInfo {
part_number,
upload_url: String::new(),
})
.collect();
let req = CreateFileWithProofRequest {
check_name_mode: "refuse",
content_hash: "",
content_hash_name: "none",
drive_id,
name,
parent_file_id,
proof_code: "",
proof_version: "v1",
size,
part_info_list,
r#type: "file",
};
let res: CreateFileWithProofResponse = self
.request(
format!("{}/adrive/v1.0/openFile/create", self.config.api_base_url),
&req,
)
.await?
.context("expect response")?;
Ok(res)
}
pub async fn complete_file_upload(&self, file_id: &str, upload_id: &str) -> Result<()> {
debug!(file_id = %file_id, upload_id = %upload_id, "complete file upload");
let drive_id = self.drive_id()?;
let req = CompleteUploadRequest {
drive_id,
file_id,
upload_id,
};
let _res: Option<serde::de::IgnoredAny> = self
.request(
format!("{}/adrive/v1.0/openFile/complete", self.config.api_base_url),
&req,
)
.await?;
Ok(())
}
pub async fn upload(&self, url: &str, body: Bytes) -> Result<()> {
let res = self.client.put(url).body(body).send().await?;
if let Err(err) = res.error_for_status_ref() {
let detail = res
.text()
.await
.unwrap_or_else(|_| "unknown error".to_string());
bail!("{}: {}", err, detail);
}
Ok(())
}
pub async fn get_upload_url(
&self,
file_id: &str,
upload_id: &str,
chunk_count: u64,
) -> Result<Vec<UploadPartInfo>> {
debug!(file_id = %file_id, upload_id = %upload_id, "get upload url");
let drive_id = self.drive_id()?;
let part_info_list = (1..=chunk_count)
.map(|part_number| UploadPartInfo {
part_number,
upload_url: String::new(),
})
.collect();
let req = GetUploadUrlRequest {
drive_id,
file_id,
upload_id,
part_info_list,
};
let res: CreateFileWithProofResponse = self
.request(
format!(
"{}/adrive/v1.0/openFile/getUploadUrl",
self.config.api_base_url
),
&req,
)
.await?
.context("expect response")?;
Ok(res.part_info_list)
}
pub async fn get_quota(&self) -> Result<(u64, u64)> {
let drive_id = self.drive_id()?;
let mut data = HashMap::new();
data.insert("drive_id", drive_id);
let res: GetSpaceInfoResponse = self
.request(
format!("{}/adrive/v1.0/user/getSpaceInfo", self.config.api_base_url),
&data,
)
.await?
.context("expect response")?;
Ok((
res.personal_space_info.used_size,
res.personal_space_info.total_size,
))
}
}
impl DavMetaData for AliyunFile {
fn len(&self) -> u64 {
self.size
}
fn modified(&self) -> FsResult<SystemTime> {
Ok(*self.updated_at)
}
fn is_dir(&self) -> bool {
matches!(self.r#type, FileType::Folder)
}
fn created(&self) -> FsResult<SystemTime> {
Ok(*self.created_at)
}
}
impl DavDirEntry for AliyunFile {
fn name(&self) -> Vec<u8> {
self.name.as_bytes().to_vec()
}
fn metadata(&self) -> FsFuture<Box<dyn DavMetaData>> {
async move { Ok(Box::new(self.clone()) as Box<dyn DavMetaData>) }.boxed()
}
}
pub async fn read_refresh_token(workdir: &Path) -> Result<String> {
let file = workdir.join("refresh_token");
let token = tokio::fs::read_to_string(&file).await?;
if token.split('.').count() < 3 {
bail!(
"Please remove outdated refresh_token cache for v1.x at {}",
file.display(),
);
}
Ok(token)
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/login/model.rs | src/login/model.rs | use std::str::FromStr;
use serde::{Deserialize, Serialize};
#[derive(Debug, Clone, Serialize)]
pub struct QrCodeRequest {
#[serde(skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
pub scopes: Vec<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub width: Option<u32>,
#[serde(skip_serializing_if = "Option::is_none")]
pub height: Option<u32>,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct QrCodeResponse {
#[serde(rename = "qrCodeUrl")]
pub qr_code_url: String,
pub sid: String,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq)]
pub enum QrCodeStatus {
WaitLogin,
ScanSuccess,
LoginSuccess,
QrCodeExpired,
}
impl FromStr for QrCodeStatus {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
use QrCodeStatus::*;
match s {
"WaitLogin" => Ok(WaitLogin),
"ScanSuccess" => Ok(ScanSuccess),
"LoginSuccess" => Ok(LoginSuccess),
_ => Ok(QrCodeExpired),
}
}
}
impl<'de> Deserialize<'de> for QrCodeStatus {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
FromStr::from_str(&s).map_err(serde::de::Error::custom)
}
}
#[derive(Debug, Clone, Deserialize)]
pub struct QrCodeStatusResponse {
pub status: QrCodeStatus,
#[serde(rename = "authCode")]
pub auth_code: Option<String>,
}
impl QrCodeStatusResponse {
pub fn is_success(&self) -> bool {
matches!(self.status, QrCodeStatus::LoginSuccess)
}
}
#[derive(Debug, Clone, Serialize)]
pub struct AuthorizationCodeRequest {
#[serde(skip_serializing_if = "Option::is_none")]
pub client_id: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub client_secret: Option<String>,
pub grant_type: String,
pub code: String,
}
#[derive(Debug, Clone, Deserialize)]
pub struct AuthorizationCodeResponse {
// pub token_type: String,
// pub access_token: String,
pub refresh_token: String,
// pub expires_in: usize,
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/src/login/mod.rs | src/login/mod.rs | pub mod model;
use crate::drive::DriveConfig;
use crate::login::model::*;
pub struct QrCodeScanner {
client: reqwest::Client,
drive_config: DriveConfig,
}
impl QrCodeScanner {
pub async fn new(drive_config: DriveConfig) -> anyhow::Result<Self> {
let client = reqwest::Client::builder()
.pool_idle_timeout(std::time::Duration::from_secs(50))
.connect_timeout(std::time::Duration::from_secs(10))
.timeout(std::time::Duration::from_secs(30))
.build()?;
Ok(Self {
client,
drive_config,
})
}
}
impl QrCodeScanner {
pub async fn scan(&self) -> anyhow::Result<QrCodeResponse> {
let req = QrCodeRequest {
client_id: self.drive_config.client_id.clone(),
client_secret: self.drive_config.client_secret.clone(),
scopes: vec![
"user:base".to_string(),
"file:all:read".to_string(),
"file:all:write".to_string(),
],
width: None,
height: None,
};
let url =
if self.drive_config.client_id.is_none() || self.drive_config.client_secret.is_none() {
format!(
"{}/oauth/authorize/qrcode",
&self.drive_config.refresh_token_host
)
} else {
"https://openapi.aliyundrive.com/oauth/authorize/qrcode".to_string()
};
let resp = self.client.post(url).json(&req).send().await?;
let resp = resp.json::<QrCodeResponse>().await?;
Ok(resp)
}
pub async fn query(&self, sid: &str) -> anyhow::Result<QrCodeStatusResponse> {
let url = format!("https://openapi.aliyundrive.com/oauth/qrcode/{sid}/status");
let resp = self.client.get(url).send().await?;
let resp = resp.json::<QrCodeStatusResponse>().await?;
Ok(resp)
}
pub async fn fetch_refresh_token(&self, code: &str) -> anyhow::Result<String> {
let req = AuthorizationCodeRequest {
client_id: self.drive_config.client_id.clone(),
client_secret: self.drive_config.client_secret.clone(),
grant_type: "authorization_code".to_string(),
code: code.to_string(),
};
let url =
if self.drive_config.client_id.is_none() || self.drive_config.client_secret.is_none() {
format!(
"{}/oauth/access_token",
&self.drive_config.refresh_token_host
)
} else {
"https://openapi.aliyundrive.com/oauth/access_token".to_string()
};
let resp = self.client.post(url).json(&req).send().await?;
let resp = resp.json::<AuthorizationCodeResponse>().await?;
Ok(resp.refresh_token)
}
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
messense/aliyundrive-webdav | https://github.com/messense/aliyundrive-webdav/blob/6e8eba62b4e50acf89681e3a67a3a693186dcd05/backend/src/main.rs | backend/src/main.rs | use std::env;
use std::time::Duration;
use axum::{
body::Body,
extract::{Json, State},
http::{HeaderValue, StatusCode},
response::{IntoResponse, Response},
routing::post,
Router,
};
use reqwest::Client;
use serde::Deserialize;
use tokio;
#[derive(Deserialize)]
struct QrCodeRequest {
scopes: Vec<String>,
width: Option<u32>,
height: Option<u32>,
}
#[derive(Deserialize)]
struct AuthorizationRequest {
grant_type: String,
code: Option<String>,
refresh_token: Option<String>,
}
#[derive(Clone)]
struct AppState {
client: Client,
}
#[tokio::main]
async fn main() {
// Create a shared reqwest client
let client = reqwest::Client::builder()
.connect_timeout(Duration::from_secs(10))
.read_timeout(Duration::from_secs(30))
.build()
.unwrap();
// Create the application state
let state = AppState { client };
let app = Router::new()
.route("/oauth/authorize/qrcode", post(qrcode))
.route("/oauth/access_token", post(access_token))
.with_state(state);
let addr = "0.0.0.0:8080";
println!("Server running on {}", addr);
let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
axum::serve(listener, app).await.unwrap();
}
async fn qrcode(
State(state): State<AppState>,
Json(payload): Json<QrCodeRequest>
) -> Result<impl IntoResponse, StatusCode> {
let client_id = env::var("ALIYUNDRIVE_CLIENT_ID").unwrap_or_default();
let client_secret = env::var("ALIYUNDRIVE_CLIENT_SECRET").unwrap_or_default();
let client = &state.client;
match client
.post("https://openapi.aliyundrive.com/oauth/authorize/qrcode")
.json(&serde_json::json!({
"client_id": client_id,
"client_secret": client_secret,
"scopes": payload.scopes,
"width": payload.width,
"height": payload.height,
}))
.send()
.await
{
Ok(res) => {
let status = res.status();
let headers = res.headers().clone();
let content_type = headers
.get("content-type")
.unwrap_or(&HeaderValue::from_static("application/json"))
.to_str()
.unwrap_or("application/json")
.to_string();
let body = res.bytes().await.unwrap_or_default();
Ok(Response::builder()
.status(status)
.header("Content-Type", content_type)
.body(Body::from(body))
.unwrap())
}
Err(_) => return Err(StatusCode::INTERNAL_SERVER_ERROR),
}
}
async fn access_token(
State(state): State<AppState>,
Json(payload): Json<AuthorizationRequest>,
) -> Result<impl IntoResponse, StatusCode> {
if payload.code.is_none() && payload.refresh_token.is_none() {
return Err(StatusCode::BAD_REQUEST);
}
let client_id = env::var("ALIYUNDRIVE_CLIENT_ID").unwrap_or_default();
let client_secret = env::var("ALIYUNDRIVE_CLIENT_SECRET").unwrap_or_default();
let client = &state.client;
match client
.post("https://openapi.aliyundrive.com/oauth/access_token")
.json(&serde_json::json!({
"client_id": client_id,
"client_secret": client_secret,
"grant_type": payload.grant_type,
"code": payload.code,
"refresh_token": payload.refresh_token,
}))
.send()
.await
{
Ok(res) => {
let status = res.status();
let headers = res.headers().clone();
let content_type = headers
.get("content-type")
.unwrap_or(&HeaderValue::from_static("application/json"))
.to_str()
.unwrap_or("application/json")
.to_string();
let body = res.bytes().await.unwrap_or_default();
Ok(Response::builder()
.status(status)
.header("Content-Type", content_type)
.body(Body::from(body))
.unwrap())
}
Err(_) => return Err(StatusCode::INTERNAL_SERVER_ERROR),
}
}
| rust | MIT | 6e8eba62b4e50acf89681e3a67a3a693186dcd05 | 2026-01-04T15:44:31.325646Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/build.rs | crates/lib/build.rs | use std::env;
fn main() {
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap();
let dist_url = if target_os == "windows" {
option_env!("PKGX_DIST_URL").unwrap_or("https://dist.pkgx.dev/v2")
} else {
option_env!("PKGX_DIST_URL").unwrap_or("https://dist.pkgx.dev")
};
let default_pantry_tarball_filename = "pantry.tar.xz";
let pantry_url =
option_env!("PKGX_PANTRY_TARBALL_FILENAME").unwrap_or(default_pantry_tarball_filename);
println!("cargo:rustc-env=PKGX_DIST_URL={dist_url}");
println!("cargo:rustc-env=PKGX_PANTRY_TARBALL_FILENAME={pantry_url}");
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/config.rs | crates/lib/src/config.rs | use std::env;
use std::io;
use std::path::PathBuf;
#[derive(Debug)]
pub struct Config {
pub pantry_dir: PathBuf,
pub pantry_db_file: PathBuf,
pub dist_url: String,
pub pkgx_dir: PathBuf,
}
impl Config {
pub fn new() -> io::Result<Self> {
let pantry_dir = get_pantry_dir()?;
let pantry_db_file: PathBuf = get_pantry_db_file()?;
let dist_url = get_dist_url();
let pkgx_dir = get_pkgx_dir()?;
Ok(Self {
pantry_dir,
pantry_db_file,
dist_url,
pkgx_dir,
})
}
}
fn get_dist_url() -> String {
if let Ok(env_url) = env::var("PKGX_DIST_URL") {
return env_url;
}
env!("PKGX_DIST_URL").to_string()
}
#[allow(non_snake_case)]
fn get_PKGX_PANTRY_DIR() -> Option<PathBuf> {
if let Ok(env_dir) = env::var("PKGX_PANTRY_DIR") {
let path = PathBuf::from(env_dir);
if path.is_absolute() {
Some(path)
} else if let Ok(cwd) = env::current_dir() {
Some(cwd.join(path))
} else {
None
}
} else {
None
}
}
fn get_pantry_dir() -> io::Result<PathBuf> {
if let Some(path) = get_PKGX_PANTRY_DIR() {
Ok(path)
} else if let Some(path) = dirs_next::data_local_dir() {
Ok(path.join("pkgx/pantry"))
} else {
Err(io::Error::new(
io::ErrorKind::NotFound,
"Could not determine cache directory",
))
}
}
fn get_pkgx_dir() -> io::Result<PathBuf> {
if let Ok(path) = env::var("PKGX_DIR") {
let path = PathBuf::from(path);
if path.is_absolute() {
return Ok(path);
}
}
let default = dirs_next::home_dir().map(|x| x.join(".pkgx"));
if default.clone().is_some_and(|x| x.exists()) {
Ok(default.unwrap())
} else if let Ok(xdg) = env::var("XDG_DATA_HOME") {
Ok(PathBuf::from(xdg).join("pkgx"))
} else {
Ok(default.unwrap())
}
}
fn get_pantry_db_file() -> io::Result<PathBuf> {
if let Some(path) = get_PKGX_PANTRY_DIR() {
Ok(path.join("pantry.2.db"))
} else if let Some(path) = dirs_next::cache_dir() {
Ok(path.join("pkgx/pantry.2.db"))
} else {
Err(io::Error::new(
io::ErrorKind::NotFound,
"Could not determine data directory",
))
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/cellar.rs | crates/lib/src/cellar.rs | use crate::config::Config;
use crate::types::{Installation, Package, PackageReq};
use libsemverator::semver::Semver as Version;
use std::error::Error;
use std::path::PathBuf;
use tokio::fs;
pub async fn ls(project: &str, config: &Config) -> Result<Vec<Installation>, Box<dyn Error>> {
let d = config.pkgx_dir.join(project);
match fs::metadata(&d).await {
Ok(metadata) => {
if !metadata.is_dir() {
return Err(format!("err: expected directory: {:?}", d).into());
}
}
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound {
return Ok(vec![]);
} else {
return Err(e.into());
}
}
}
let mut rv = vec![];
let mut entries = fs::read_dir(&d).await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
let name = entry.file_name().to_string_lossy().to_string();
if !name.starts_with('v') || name == "var" {
continue;
}
if !fs::symlink_metadata(&path).await?.is_dir() {
continue;
}
if let Ok(version) = Version::parse(&name[1..]) {
rv.push(Installation {
path,
pkg: Package {
project: project.to_string(),
version,
},
});
}
}
Ok(rv)
}
pub async fn resolve(
pkgreq: &PackageReq,
config: &Config,
) -> Result<Option<Installation>, Box<dyn Error>> {
Ok(ls(&pkgreq.project, config)
.await?
.iter()
.filter(|i| pkgreq.constraint.satisfies(&i.pkg.version))
.max_by_key(|i| i.pkg.version.clone())
.cloned())
}
pub fn dst(pkg: &Package, config: &Config) -> PathBuf {
config
.pkgx_dir
.join(pkg.project.clone())
.join(format!("v{}", pkg.version.raw))
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/platform_case_aware_env_key.rs | crates/lib/src/platform_case_aware_env_key.rs | #[cfg(windows)]
use std::{
fmt,
hash::{Hash, Hasher},
};
#[cfg(windows)]
#[derive(Clone)]
pub struct CaseInsensitiveKey(pub String);
#[cfg(windows)]
impl PartialEq for CaseInsensitiveKey {
fn eq(&self, other: &Self) -> bool {
self.0.eq_ignore_ascii_case(&other.0)
}
}
#[cfg(windows)]
impl fmt::Display for CaseInsensitiveKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.0)
}
}
#[cfg(windows)]
impl Eq for CaseInsensitiveKey {}
#[cfg(windows)]
impl Hash for CaseInsensitiveKey {
fn hash<H: Hasher>(&self, state: &mut H) {
self.0.to_lowercase().hash(state);
}
}
#[cfg(windows)]
impl fmt::Debug for CaseInsensitiveKey {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self.0)
}
}
#[cfg(windows)]
pub type PlatformCaseAwareEnvKey = CaseInsensitiveKey;
#[cfg(not(windows))]
pub type PlatformCaseAwareEnvKey = String;
#[cfg(windows)]
pub fn construct_platform_case_aware_env_key(key: String) -> PlatformCaseAwareEnvKey {
CaseInsensitiveKey(key)
}
#[cfg(not(windows))]
pub fn construct_platform_case_aware_env_key(key: String) -> PlatformCaseAwareEnvKey {
key
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/inventory.rs | crates/lib/src/inventory.rs | use crate::client::build_client;
use crate::config::Config;
use crate::types::{host, Package, PackageReq};
use libsemverator::semver::Semver as Version;
use reqwest::Url;
use std::error::Error;
// Select function to pick a version
pub async fn select(rq: &PackageReq, config: &Config) -> Result<Option<Version>, Box<dyn Error>> {
let versions = ls(&rq.project, config).await?;
Ok(versions
.iter()
.filter(|v| rq.constraint.satisfies(v))
.max()
.cloned())
}
// Get function to fetch available versions
pub async fn ls(project: &String, config: &Config) -> Result<Vec<Version>, Box<dyn Error>> {
let base_url = config.dist_url.clone();
let (platform, arch) = host();
let url = Url::parse(&format!(
"{}/{}/{}/{}/versions.txt",
base_url, project, platform, arch
))?;
let rsp = build_client()?
.get(url.clone())
.send()
.await?
.error_for_status()?;
let releases = rsp.text().await?;
let mut versions: Vec<Version> = releases
.lines()
.map(Version::parse)
.filter_map(Result::ok)
.collect();
if versions.is_empty() {
return Err(Box::new(std::io::Error::new(
std::io::ErrorKind::NotFound,
format!("No inventory for {}", project),
)));
}
if project == "openssl.org" {
// Workaround: Remove specific version
let excluded_version = Version::parse("1.1.118")?;
versions.retain(|x| x != &excluded_version);
}
Ok(versions)
}
//TODO xz bottles are preferred
pub fn get_url(pkg: &Package, config: &Config) -> String {
let (platform, arch) = host();
format!(
"{}/{}/{}/{}/v{}.tar.xz",
config.dist_url, pkg.project, platform, arch, pkg.version.raw
)
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/lib.rs | crates/lib/src/lib.rs | mod cellar;
mod client;
pub mod config;
pub mod env;
pub mod hydrate;
mod install;
pub mod install_multi;
pub mod inventory;
mod pantry;
pub mod pantry_db;
pub mod platform_case_aware_env_key;
pub mod resolve;
pub mod sync;
pub mod types;
pub mod utils;
pub type Version = libsemverator::semver::Semver;
pub type VersionRange = libsemverator::range::Range;
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/sync.rs | crates/lib/src/sync.rs | use crate::{client::build_client, config::Config, pantry_db};
use async_compression::tokio::bufread::XzDecoder;
use fs2::FileExt;
use futures::TryStreamExt;
use rusqlite::Connection;
use std::{error::Error, fs::OpenOptions, path::PathBuf};
use tokio_tar::Archive;
use tokio_util::compat::FuturesAsyncReadCompatExt;
#[allow(clippy::all)]
pub fn should(config: &Config) -> Result<bool, Box<dyn Error>> {
if !config.pantry_dir.join("projects").is_dir() {
Ok(true)
} else {
// the file always exists because we create the connection
// but will be 0 bytes if we need to fill it
Ok(std::fs::metadata(&config.pantry_db_file)?.len() == 0)
}
}
// doesn’t replace pantry clone, will build db
// essential for working in a local pantry clone with PKGX_PANTRY_DIR set
pub async fn ensure(config: &Config, conn: &mut Connection) -> Result<(), Box<dyn Error>> {
if !config.pantry_dir.join("projects").is_dir() {
replace(config, conn).await
} else {
let lockfile = lock(config)?;
pantry_db::cache(config, conn)?;
FileExt::unlock(&lockfile)?;
Ok(())
}
}
pub async fn update(config: &Config, conn: &mut Connection) -> Result<(), Box<dyn Error>> {
if std::env::var("PKGX_PANTRY_DIR").is_ok() {
return Err("PKGX_PANTRY_DIR is set, refusing to update pantry")?;
}
replace(config, conn).await
}
async fn replace(config: &Config, conn: &mut Connection) -> Result<(), Box<dyn Error>> {
let url = format!(
"{}/{}",
config.dist_url,
env!("PKGX_PANTRY_TARBALL_FILENAME")
);
let lockfile = lock(config)?;
download_and_extract_pantry(&url, &config.pantry_dir).await?;
pantry_db::cache(config, conn)?;
FileExt::unlock(&lockfile)?;
Ok(())
}
async fn download_and_extract_pantry(url: &str, dest: &PathBuf) -> Result<(), Box<dyn Error>> {
let rsp = build_client()?.get(url).send().await?.error_for_status()?;
let stream = rsp.bytes_stream();
let stream = stream
.map_err(|e| futures::io::Error::new(futures::io::ErrorKind::Other, e))
.into_async_read();
let stream = stream.compat();
let decoder = XzDecoder::new(stream);
// Step 3: Extract the tar archive
let mut archive = Archive::new(decoder);
archive.unpack(dest).await?;
Ok(())
}
fn lock(config: &Config) -> Result<std::fs::File, Box<dyn Error>> {
std::fs::create_dir_all(&config.pantry_dir)?;
#[cfg(not(windows))]
let lockfile = OpenOptions::new().read(true).open(&config.pantry_dir)?;
#[cfg(windows)]
let lockfile = OpenOptions::new()
.read(true)
.create(true)
.truncate(true)
.write(true)
.open(config.pantry_dir.join("lockfile"))?;
lockfile.lock_exclusive()?;
Ok(lockfile)
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/pantry_db.rs | crates/lib/src/pantry_db.rs | use std::{collections::HashMap, error::Error};
use rusqlite::{params, Connection};
use crate::{config::Config, pantry, types::PackageReq};
pub fn cache(config: &Config, conn: &mut Connection) -> Result<(), Box<dyn Error>> {
conn.execute_batch(
"
PRAGMA synchronous = OFF;
PRAGMA journal_mode = MEMORY;
PRAGMA temp_store = MEMORY;
DROP TABLE IF EXISTS provides;
DROP TABLE IF EXISTS dependencies;
DROP TABLE IF EXISTS companions;
DROP TABLE IF EXISTS runtime_env;
DROP TABLE IF EXISTS aliases;
CREATE TABLE provides (
project TEXT,
program TEXT
);
CREATE TABLE dependencies (
project TEXT,
pkgspec TEXT
);
CREATE TABLE companions (
project TEXT,
pkgspec TEXT
);
CREATE TABLE runtime_env (
project TEXT,
envline TEXT
);
CREATE TABLE aliases (
project TEXT,
alias TEXT
);
CREATE INDEX idx_project ON provides(project);
CREATE INDEX idx_program ON provides(program);
CREATE INDEX idx_project_dependencies ON dependencies(project);
CREATE INDEX idx_project_companions ON companions(project);
CREATE INDEX idx_alias_project ON aliases(alias);
",
)?;
let tx = conn.transaction()?;
for pkg in pantry::ls(config) {
for mut program in pkg.programs {
program = std::path::Path::new(&program)
.file_name()
.unwrap()
.to_str()
.unwrap()
.to_string();
tx.execute(
"INSERT INTO provides (project, program) VALUES (?1, ?2);",
params![pkg.project, program],
)?;
}
if let Some(display_name) = pkg.display_name {
tx.execute(
"INSERT INTO aliases (project, alias) VALUES (?1, ?2);",
params![pkg.project, display_name],
)?;
}
for dep in pkg.deps {
tx.execute(
"INSERT INTO dependencies (project, pkgspec) VALUES (?1, ?2);",
params![pkg.project, dep.to_string()],
)?;
}
for companion in pkg.companions {
tx.execute(
"INSERT INTO companions (project, pkgspec) VALUES (?1, ?2);",
params![pkg.project, companion.to_string()],
)?;
}
for (key, value) in pkg.env {
tx.execute(
"INSERT INTO runtime_env (project, envline) VALUES (?1, ?2);",
params![pkg.project, format!("{}={}", key, value)],
)?;
}
}
tx.commit()?;
Ok(())
}
pub fn deps_for_project(
project: &String,
conn: &Connection,
) -> Result<Vec<PackageReq>, Box<dyn Error>> {
let mut stmt = conn.prepare("SELECT pkgspec FROM dependencies WHERE project = ?1")?;
let rv = stmt.query_map(params![project], |row| {
let pkgspec: String = row.get(0)?;
let pkgrq = PackageReq::parse(&pkgspec).unwrap(); //FIXME unwrap()
Ok(pkgrq)
})?;
Ok(rv.collect::<Result<Vec<_>, _>>()?)
}
pub fn which(cmd: &String, conn: &Connection) -> Result<Vec<String>, rusqlite::Error> {
let mut stmt = conn.prepare("SELECT project FROM provides WHERE program = ?1")?;
let mut rv = Vec::new();
let mut rows = stmt.query(params![cmd])?;
while let Some(row) = rows.next()? {
rv.push(row.get(0)?);
}
Ok(rv)
}
pub fn projects_for_symbol(
symbol: &String,
conn: &Connection,
) -> Result<Vec<String>, rusqlite::Error> {
let mut stmt = conn.prepare(
"
SELECT project FROM provides WHERE program = ?1
UNION
SELECT project FROM aliases WHERE LOWER(alias) = LOWER(?1);",
)?;
let mut rv = Vec::new();
let mut rows = stmt.query(params![symbol])?;
while let Some(row) = rows.next()? {
rv.push(row.get(0)?);
}
Ok(rv)
}
pub fn runtime_env_for_project(
project: &String,
conn: &Connection,
) -> Result<HashMap<String, String>, Box<dyn Error>> {
let sql = "SELECT envline FROM runtime_env WHERE project = ?1";
let mut stmt = conn.prepare(sql)?;
let mut rows = stmt.query(params![project])?;
let mut env = HashMap::new();
while let Some(row) = rows.next()? {
let envline: String = row.get(0)?;
let (key, value) = envline.split_once('=').unwrap();
env.insert(key.to_string(), value.to_string());
}
Ok(env)
}
pub fn companions_for_projects(
projects: &[String],
conn: &Connection,
) -> Result<Vec<PackageReq>, Box<dyn Error>> {
if projects.is_empty() {
return Ok(Vec::new());
}
// Generate placeholders for the IN clause (?, ?, ?, ...)
let placeholders = projects.iter().map(|_| "?").collect::<Vec<_>>().join(", ");
let query = format!(
"SELECT pkgspec FROM companions WHERE project IN ({})",
placeholders
);
let mut stmt = conn.prepare(&query)?;
let companions = stmt.query_map(
rusqlite::params_from_iter(projects.iter()), // Efficiently bind the projects
|row| {
let pkgspec: String = row.get(0)?;
let pkgrq = PackageReq::parse(&pkgspec).unwrap(); //TODO handle error!
Ok(pkgrq)
},
)?;
// Collect results into a Vec<PackageReq>, propagating errors
Ok(companions.collect::<Result<Vec<_>, _>>()?)
}
pub fn programs_for_project(
project: &String,
conn: &Connection,
) -> Result<Vec<String>, rusqlite::Error> {
let mut stmt = conn.prepare("SELECT program FROM provides WHERE project = ?1")?;
let mut rv = Vec::new();
let mut rows = stmt.query(params![project])?;
while let Some(row) = rows.next()? {
rv.push(row.get(0)?);
}
Ok(rv)
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/hydrate.rs | crates/lib/src/hydrate.rs | use crate::types::PackageReq;
use libsemverator::range::Range as VersionReq;
use std::collections::{HashMap, HashSet};
use std::error::Error;
#[derive(Clone)]
struct Node {
parent: Option<Box<Node>>,
pkg: PackageReq,
children: HashSet<String>,
}
impl Node {
fn new(pkg: PackageReq, parent: Option<Box<Node>>) -> Self {
Self {
parent,
pkg,
children: HashSet::new(),
}
}
fn count(&self) -> usize {
let mut count = 0;
let mut node = self.parent.as_ref();
while let Some(parent_node) = node {
count += 1;
node = parent_node.parent.as_ref();
}
count
}
}
/// Hydrates dependencies and returns a topologically sorted list of packages.
pub async fn hydrate<F>(
input: &Vec<PackageReq>,
get_deps: F,
) -> Result<Vec<PackageReq>, Box<dyn Error>>
where
F: Fn(String) -> Result<Vec<PackageReq>, Box<dyn Error>>,
{
let dry = condense(input);
let mut graph: HashMap<String, Box<Node>> = HashMap::new();
let mut stack: Vec<Box<Node>> = vec![];
let mut additional_unicodes: Vec<VersionReq> = vec![];
for pkg in dry.iter() {
let node = graph
.entry(pkg.project.clone())
.or_insert_with(|| Box::new(Node::new(pkg.clone(), None)));
node.pkg.constraint = intersect_constraints(&node.pkg.constraint, &pkg.constraint)?;
stack.push(node.clone());
}
while let Some(mut current) = stack.pop() {
for child_pkg in get_deps(current.pkg.project.clone())? {
let child_node = graph
.entry(child_pkg.project.clone())
.or_insert_with(|| Box::new(Node::new(child_pkg.clone(), Some(current.clone()))));
let intersection =
intersect_constraints(&child_node.pkg.constraint, &child_pkg.constraint);
if let Ok(constraint) = intersection {
child_node.pkg.constraint = constraint;
current.children.insert(child_node.pkg.project.clone());
stack.push(child_node.clone());
} else if child_pkg.project == "unicode.org" {
// we handle unicode.org for now to allow situations like:
// https://github.com/pkgxdev/pantry/issues/4104
// https://github.com/pkgxdev/pkgx/issues/899
additional_unicodes.push(child_pkg.constraint);
} else {
return Err(intersection.unwrap_err());
}
}
}
let mut pkgs: Vec<&Box<Node>> = graph.values().collect();
pkgs.sort_by_key(|node| node.count());
let mut pkgs: Vec<PackageReq> = pkgs.into_iter().map(|node| node.pkg.clone()).collect();
// see above explanation
for constraint in additional_unicodes {
let pkg = PackageReq {
project: "unicode.org".to_string(),
constraint,
};
pkgs.push(pkg);
}
Ok(pkgs)
}
/// Condenses a list of `PackageRequirement` by intersecting constraints for duplicates.
fn condense(pkgs: &Vec<PackageReq>) -> Vec<PackageReq> {
let mut out: Vec<PackageReq> = vec![];
for pkg in pkgs {
if let Some(existing) = out.iter_mut().find(|p| p.project == pkg.project) {
existing.constraint = intersect_constraints(&existing.constraint, &pkg.constraint)
.expect("Failed to intersect constraints");
} else {
out.push(pkg.clone());
}
}
out
}
/// Intersects two version constraints.
fn intersect_constraints(a: &VersionReq, b: &VersionReq) -> Result<VersionReq, Box<dyn Error>> {
a.intersect(b).map_err(|e| e.into())
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/env.rs | crates/lib/src/env.rs | use std::{
collections::{HashMap, HashSet},
error::Error,
path::PathBuf,
};
#[cfg(unix)]
use std::str::FromStr;
use crate::{
platform_case_aware_env_key::{construct_platform_case_aware_env_key, PlatformCaseAwareEnvKey},
types::Installation,
};
#[cfg(unix)]
pub const SEP: &str = ":";
#[cfg(windows)]
pub const SEP: &str = ";";
pub fn map(installations: &Vec<Installation>) -> HashMap<String, Vec<String>> {
let mut vars: HashMap<EnvKey, OrderedSet<PathBuf>> = HashMap::new();
let projects: HashSet<&str> = installations
.iter()
.map(|i| i.pkg.project.as_str())
.collect();
for installation in installations {
for key in EnvKey::iter() {
if let Some(suffixes) = suffixes(&key) {
for suffix in suffixes {
let path = installation.path.join(suffix);
if path.is_dir() {
vars.entry(key.clone())
.or_insert_with(OrderedSet::new)
.add(path);
}
}
}
}
if projects.contains("cmake.org") {
vars.entry(EnvKey::CmakePrefixPath)
.or_insert_with(OrderedSet::new)
.add(installation.path.clone());
}
}
// don’t break `man`
#[cfg(unix)]
if vars.contains_key(&EnvKey::Manpath) {
vars.get_mut(&EnvKey::Manpath)
.unwrap()
.add(PathBuf::from_str("/usr/share/man").unwrap());
}
// https://github.com/pkgxdev/libpkgx/issues/70
#[cfg(unix)]
if vars.contains_key(&EnvKey::XdgDataDirs) {
let set = vars.get_mut(&EnvKey::XdgDataDirs).unwrap();
set.add(PathBuf::from_str("/usr/local/share").unwrap());
set.add(PathBuf::from_str("/usr/share").unwrap());
}
let mut rv: HashMap<String, Vec<String>> = HashMap::new();
for (key, set) in vars {
let set = set
.items
.iter()
.map(|p| p.to_string_lossy().to_string())
.collect();
rv.insert(key.as_ref().to_string(), set);
}
rv
}
use rusqlite::Connection;
use strum::IntoEnumIterator;
use strum_macros::{AsRefStr, EnumIter, EnumString};
#[derive(Debug, EnumString, AsRefStr, PartialEq, Eq, Hash, Clone, EnumIter)]
#[strum(serialize_all = "SCREAMING_SNAKE_CASE")]
enum EnvKey {
Path,
Manpath,
PkgConfigPath,
#[cfg(unix)]
LibraryPath,
#[cfg(unix)]
LdLibraryPath,
#[cfg(unix)]
Cpath,
XdgDataDirs,
CmakePrefixPath,
#[cfg(target_os = "macos")]
DyldFallbackLibraryPath,
SslCertFile,
#[cfg(unix)]
Ldflags,
PkgxDir,
AclocalPath,
#[cfg(windows)]
Lib,
#[cfg(windows)]
Include,
}
struct OrderedSet<T: Eq + std::hash::Hash + Clone> {
items: Vec<T>,
set: HashSet<T>,
}
impl<T: Eq + std::hash::Hash + Clone> OrderedSet<T> {
fn new() -> Self {
OrderedSet {
items: Vec::new(),
set: HashSet::new(),
}
}
fn add(&mut self, item: T) {
if self.set.insert(item.clone()) {
self.items.push(item);
}
}
}
fn suffixes(key: &EnvKey) -> Option<Vec<&'static str>> {
match key {
EnvKey::Path => Some(vec!["bin", "sbin"]),
EnvKey::Manpath => Some(vec!["man", "share/man"]),
EnvKey::PkgConfigPath => Some(vec!["share/pkgconfig", "lib/pkgconfig"]),
EnvKey::XdgDataDirs => Some(vec!["share"]),
EnvKey::AclocalPath => Some(vec!["share/aclocal"]),
#[cfg(unix)]
EnvKey::LibraryPath | EnvKey::LdLibraryPath => Some(vec!["lib", "lib64"]),
#[cfg(target_os = "macos")]
EnvKey::DyldFallbackLibraryPath => Some(vec!["lib", "lib64"]),
#[cfg(unix)]
EnvKey::Cpath => Some(vec!["include"]),
EnvKey::CmakePrefixPath | EnvKey::SslCertFile | EnvKey::PkgxDir => None,
#[cfg(unix)]
EnvKey::Ldflags => None,
#[cfg(windows)]
EnvKey::Lib => Some(vec!["lib"]),
#[cfg(windows)]
EnvKey::Include => Some(vec!["include"]),
}
}
pub fn mix(input: HashMap<String, Vec<String>>) -> HashMap<PlatformCaseAwareEnvKey, String> {
let mut rv: HashMap<PlatformCaseAwareEnvKey, String> = HashMap::new();
for (key, value) in std::env::vars() {
rv.insert(construct_platform_case_aware_env_key(key), value);
}
for (key, value) in input.iter() {
let key = &construct_platform_case_aware_env_key(key.clone());
if let Some(values) = rv.get(key) {
rv.insert(key.clone(), format!("{}{}{}", value.join(SEP), SEP, values));
} else {
rv.insert(key.clone(), value.join(SEP));
}
}
rv
}
pub fn mix_runtime(
input: &HashMap<PlatformCaseAwareEnvKey, String>,
installations: &Vec<Installation>,
conn: &Connection,
) -> Result<HashMap<PlatformCaseAwareEnvKey, String>, Box<dyn Error>> {
let mut output: HashMap<PlatformCaseAwareEnvKey, String> = input
.iter()
.map(|(k, v)| (k.clone(), format!("{}{}${}", v, SEP, k)))
.collect();
for installation in installations.clone() {
let runtime_env =
crate::pantry_db::runtime_env_for_project(&installation.pkg.project, conn)?;
for (key, runtime_value) in runtime_env {
let runtime_value = expand_moustaches(&runtime_value, &installation, installations);
let insert_key = construct_platform_case_aware_env_key(key.clone());
let new_value = if let Some(curr_value) = output.get(&insert_key) {
if runtime_value.contains(&format!("${}", key)) {
runtime_value.replace(&format!("${}", key), curr_value)
} else {
// parent env overrides runtime env if the runtime env
// has no capacity to include the parent env
curr_value.clone()
}
} else if runtime_value.contains(&format!("${}", key)) {
runtime_value
} else {
format!("${{{}:-{}}}", key, runtime_value)
};
output.insert(insert_key, new_value);
}
}
Ok(output)
}
pub fn expand_moustaches(input: &str, pkg: &Installation, deps: &Vec<Installation>) -> String {
let mut output = input.to_string();
if output.starts_with("${{") {
output.replace_range(..1, "");
}
output = output.replace("{{prefix}}", &pkg.path.to_string_lossy());
output = output.replace("{{version}}", &format!("{}", &pkg.pkg.version));
output = output.replace("{{version.major}}", &format!("{}", pkg.pkg.version.major));
output = output.replace("{{version.minor}}", &format!("{}", pkg.pkg.version.minor));
output = output.replace("{{version.patch}}", &format!("{}", pkg.pkg.version.patch));
output = output.replace(
"{{version.marketing}}",
&format!("{}.{}", pkg.pkg.version.major, pkg.pkg.version.minor),
);
for dep in deps {
let prefix = format!("deps.{}", dep.pkg.project);
output = output.replace(
&format!("{{{{{}.prefix}}}}", prefix),
&dep.path.to_string_lossy(),
);
output = output.replace(
&format!("{{{{{}.version}}}}", prefix),
&format!("{}", &dep.pkg.version),
);
output = output.replace(
&format!("{{{{{}.version.major}}}}", prefix),
&format!("{}", dep.pkg.version.major),
);
output = output.replace(
&format!("{{{{{}.version.minor}}}}", prefix),
&format!("{}", dep.pkg.version.minor),
);
output = output.replace(
&format!("{{{{{}.version.patch}}}}", prefix),
&format!("{}", dep.pkg.version.patch),
);
output = output.replace(
&format!("{{{{{}.version.marketing}}}}", prefix),
&format!("{}.{}", dep.pkg.version.major, dep.pkg.version.minor),
);
}
output
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/client.rs | crates/lib/src/client.rs | use std::env;
use reqwest::{Client, ClientBuilder};
#[cfg(not(any(target_os = "macos", target_os = "windows")))]
const CERT: &[u8] = include_bytes!("amazon_root_ca1.pem");
#[cfg(not(any(target_os = "macos", target_os = "windows")))]
pub fn build_client() -> Result<Client, Box<dyn std::error::Error>> {
let mut builder = ClientBuilder::new();
let bndl = reqwest::Certificate::from_pem_bundle(CERT)?;
for cert in bndl {
builder = builder.add_root_certificate(cert);
}
builder = builder.user_agent(get_user_agent());
Ok(builder.build()?)
}
#[cfg(any(target_os = "macos", target_os = "windows"))]
pub fn build_client() -> Result<Client, Box<dyn std::error::Error>> {
Ok(ClientBuilder::new().user_agent(get_user_agent()).build()?)
}
fn get_user_agent() -> String {
let version = env!("CARGO_PKG_VERSION");
let os = std::env::consts::OS;
let arch = std::env::consts::ARCH;
let group = env::var("PKGX_USER_AGENT_GROUP");
let name = if group.is_ok() {
format!("pkgx[{}]", group.unwrap())
} else {
"pkgx".to_string()
};
format!("{name}/{version} ({os}; {arch})")
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/install.rs | crates/lib/src/install.rs | use async_compression::tokio::bufread::XzDecoder;
use fs2::FileExt;
use std::{
error::Error,
fs::{self, OpenOptions},
};
use tempfile::tempdir_in;
use tokio::task;
use tokio_tar::Archive;
// Compatibility trait lets us call `compat()` on a futures::io::AsyncRead
// to convert it into a tokio::io::AsyncRead.
use tokio_util::compat::FuturesAsyncReadCompatExt;
// Lets us call into_async_read() to convert a futures::stream::Stream into a
// futures::io::AsyncRead.
use futures::stream::TryStreamExt;
use crate::{
cellar,
client::build_client,
config::Config,
inventory,
types::{Installation, Package},
};
pub enum InstallEvent {
DownloadSize(u64), // Total size of the download in bytes
Progress(u64), // we downloaded n bytes
}
//TODO set UserAgent
pub async fn install<F>(
pkg: &Package,
config: &Config,
mut event_callback: Option<F>,
) -> Result<Installation, Box<dyn Error>>
where
F: FnMut(InstallEvent) + Send + 'static,
{
let shelf = config.pkgx_dir.join(&pkg.project);
fs::create_dir_all(&shelf)?;
#[cfg(windows)]
let lockfile = OpenOptions::new()
.read(true)
.write(true)
.create(true)
.truncate(true)
.open(shelf.join("lockfile"))?;
#[cfg(not(windows))]
let lockfile = OpenOptions::new()
.read(true) // Open the directory in read-only mode
.open(shelf.clone())?;
task::spawn_blocking({
let lockfile = lockfile.try_clone()?;
move || {
lockfile
.lock_exclusive()
.expect("unexpected error: install locking failed");
}
})
.await?;
let dst_path = cellar::dst(pkg, config);
// did another instance of pkgx install us while we waited for the lock?
// if so, we’re good: eject
if dst_path.is_dir() {
FileExt::unlock(&lockfile)?;
return Ok(Installation {
path: dst_path,
pkg: pkg.clone(),
});
}
let url = inventory::get_url(pkg, config);
let client = build_client()?;
let rsp = client.get(url).send().await?.error_for_status()?;
let total_size = rsp
.content_length()
.ok_or("Failed to get content length from response")?;
if let Some(cb) = event_callback.as_mut() {
cb(InstallEvent::DownloadSize(total_size));
}
let stream = rsp.bytes_stream();
//TODO we don’t want to add inspect_ok to the stream at all in --silent mode
// ^^ but the borrow checker despises us with a venom I can barely articulate if we try
let stream = stream.inspect_ok(move |chunk| {
if let Some(cb) = event_callback.as_mut() {
cb(InstallEvent::Progress(chunk.len() as u64));
}
});
let stream = stream
.map_err(|e| futures::io::Error::new(futures::io::ErrorKind::Other, e))
.into_async_read();
let stream = stream.compat();
// Step 2: Create a XZ decoder
let decoder = XzDecoder::new(stream);
// Step 3: Make a temporary directory to extract the tarball into
let temp_dir = tempdir_in(config.pkgx_dir.join(&pkg.project))?;
// Step 4: Extract the tar archive
let mut archive = Archive::new(decoder);
archive.unpack(&temp_dir).await?;
// Step 5: atomically move from temp dir to installation location
let partial_path = format!("{}/v{}", pkg.project, pkg.version.raw);
fs::rename(temp_dir.path().join(&partial_path), &dst_path)?;
let installation = Installation {
path: dst_path,
pkg: pkg.clone(),
};
#[cfg(not(windows))]
symlink(&installation, config).await?;
// ^^ you need admin privs to symlink on windows (wtf)
FileExt::unlock(&lockfile)?;
Ok(installation)
}
#[cfg(not(windows))]
use {
libsemverator::range::Range as VersionReq, libsemverator::semver::Semver as Version,
std::collections::VecDeque, std::path::Path, std::path::PathBuf,
};
#[cfg(not(windows))]
async fn symlink(installation: &Installation, config: &Config) -> Result<(), Box<dyn Error>> {
let mut versions: VecDeque<(Version, PathBuf)> = cellar::ls(&installation.pkg.project, config)
.await?
.into_iter()
.map(|entry| (entry.pkg.version, entry.path))
.collect();
versions.make_contiguous().sort_by(|a, b| a.0.cmp(&b.0));
if versions.is_empty() {
return Err(format!("no versions for package {}", installation.pkg.project).into());
}
let shelf = installation.path.parent().unwrap();
let newest = versions.back().unwrap(); // Safe as we've checked it's not empty
let v_mm = format!(
"{}.{}",
installation.pkg.version.major, installation.pkg.version.minor
);
let minor_range = if installation.pkg.version.major > 0 {
VersionReq::caret(&v_mm)?
} else {
VersionReq::parse(&format!(
">={},<0.{}",
v_mm,
installation.pkg.version.minor + 1
))?
};
let most_minor = versions
.iter()
.filter(|(version, _)| minor_range.satisfies(version))
.next_back()
.ok_or_else(|| {
anyhow::anyhow!(
"Could not find most minor version for {}",
installation.pkg.project
)
})?;
if most_minor.0 != installation.pkg.version {
return Ok(());
}
make_symlink(shelf, &format!("v{}", v_mm), installation).await?;
// bug in semverator
let major_range = VersionReq::parse(&format!("^{}", installation.pkg.version.major))?;
let most_major = versions
.iter()
.filter(|(version, _)| major_range.satisfies(version))
.next_back()
.ok_or_else(|| anyhow::anyhow!("Could not find most major version"))?;
if most_major.0 != installation.pkg.version {
return Ok(());
}
make_symlink(
shelf,
&format!("v{}", installation.pkg.version.major),
installation,
)
.await?;
if installation.pkg.version == newest.0 {
make_symlink(shelf, "v*", installation).await?;
}
Ok(())
}
#[cfg(not(windows))]
async fn make_symlink(
shelf: &Path,
symname: &str,
installation: &Installation,
) -> Result<(), Box<dyn Error>> {
let symlink_path = shelf.join(symname);
if symlink_path.is_symlink() {
if let Err(err) = fs::remove_file(&symlink_path) {
if err.kind() != std::io::ErrorKind::NotFound {
return Err(err.into());
}
}
}
let target = installation
.path
.file_name()
.ok_or_else(|| anyhow::anyhow!("Could not get the base name of the installation path"))?;
#[cfg(not(windows))]
std::os::unix::fs::symlink(target, &symlink_path)?;
#[cfg(windows)]
std::os::windows::fs::symlink_dir(target, symlink_path)?;
Ok(())
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/types.rs | crates/lib/src/types.rs | use lazy_static::lazy_static;
use libsemverator::range::Range as VersionReq;
use libsemverator::semver::Semver as Version;
use serde::ser::SerializeStruct;
use serde::{Serialize, Serializer};
use std::error::Error;
use std::fmt;
//TODO regex is probs not most efficient (but do perf tests if you change it)
lazy_static! {
static ref PACKAGE_REGEX: Regex = Regex::new(r"^(.+?)(([\^=~<>@].+)|\*)?$").unwrap();
}
#[derive(Debug, Clone, serde::Serialize)]
pub struct Package {
pub project: String,
pub version: Version,
}
impl fmt::Display for Package {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}={}", self.project, &self.version)
}
}
#[derive(Debug, Clone)]
pub struct PackageReq {
pub project: String,
pub constraint: VersionReq,
}
use regex::Regex;
impl PackageReq {
pub fn parse(pkgspec: &str) -> Result<Self, Box<dyn Error>> {
let input = pkgspec.trim();
let captures = PACKAGE_REGEX
.captures(input)
.ok_or_else(|| format!("invalid pkgspec: {}", input))?;
let project = captures.get(1).unwrap().as_str().to_string();
let str = if let Some(cap) = captures.get(2) {
let cap = cap.as_str();
if cap.trim() == "" {
"*"
} else {
cap
}
} else {
"*"
};
let constraint = VersionReq::parse(str)?;
Ok(Self {
project,
constraint,
})
}
}
impl fmt::Display for PackageReq {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.constraint.raw == "*" {
write!(f, "{}", self.project)
} else {
write!(f, "{}{}", self.project, &self.constraint)
}
}
}
#[derive(Debug, Clone)]
pub struct Installation {
pub path: std::path::PathBuf,
pub pkg: Package,
}
impl Serialize for Installation {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut state = serializer.serialize_struct("MyType", 3)?;
state.serialize_field("path", &self.path)?;
state.serialize_field("project", &self.pkg.project)?;
state.serialize_field("version", &self.pkg.version)?;
state.end()
}
}
// These are only used per build at present
#[allow(dead_code)]
pub enum Host {
Darwin,
Linux,
Windows,
}
// These are only used per build at present
#[allow(dead_code)]
pub enum Arch {
Arm64,
X86_64,
}
pub fn host() -> (Host, Arch) {
#[cfg(target_os = "macos")]
let host = Host::Darwin;
#[cfg(target_os = "linux")]
let host = Host::Linux;
#[cfg(windows)]
let host = Host::Windows;
#[cfg(target_arch = "aarch64")]
let arch = Arch::Arm64;
#[cfg(target_arch = "x86_64")]
let arch = Arch::X86_64;
#[cfg(not(any(target_arch = "aarch64", target_arch = "x86_64")))]
panic!("Unsupported architecture");
(host, arch)
}
impl fmt::Display for Host {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let os_str = match self {
Host::Linux => "linux",
Host::Darwin => "darwin",
Host::Windows => "windows",
};
write!(f, "{}", os_str)
}
}
impl fmt::Display for Arch {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let os_str = match self {
Arch::Arm64 => "aarch64",
Arch::X86_64 => "x86-64",
};
write!(f, "{}", os_str)
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/pantry.rs | crates/lib/src/pantry.rs | use crate::{config::Config, types::PackageReq};
use libsemverator::range::Range as VersionReq;
use serde::Deserialize;
use serde::Deserializer;
use std::collections::HashMap;
use std::fs;
use std::path::PathBuf;
pub struct PantryEntry {
pub project: String,
pub deps: Vec<PackageReq>,
pub programs: Vec<String>,
pub companions: Vec<PackageReq>,
pub env: HashMap<String, String>,
pub display_name: Option<String>,
}
impl PantryEntry {
fn from_path(path: &PathBuf, pantry_dir: &PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
let project = path
.parent()
.unwrap()
.strip_prefix(pantry_dir)
.unwrap()
.to_str()
.unwrap()
.to_string();
#[cfg(windows)]
let project = project.replace("\\", "/");
Self::from_raw_entry(RawPantryEntry::from_path(path)?, project)
}
fn from_raw_entry(
entry: RawPantryEntry,
project: String,
) -> Result<Self, Box<dyn std::error::Error>> {
let deps = if let Some(deps) = entry.dependencies {
deps.0
.iter()
.map(|(project, constraint)| {
VersionReq::parse(constraint).map(|constraint| PackageReq {
project: project.clone(),
constraint,
})
})
.collect::<Result<Vec<_>, _>>()?
} else {
vec![]
};
let programs = if let Some(provides) = entry.provides {
provides.0
} else {
vec![]
};
let companions = if let Some(companions) = entry.companions {
companions
.0
.iter()
.map(|(k, v)| {
VersionReq::parse(v).map(|constraint| PackageReq {
project: k.clone(),
constraint,
})
})
.collect::<Result<Vec<_>, _>>()?
} else {
vec![]
};
let env = if let Some(runtime) = entry.runtime {
runtime.env
} else {
HashMap::new()
};
let display_name = entry.display_name;
Ok(Self {
deps,
project,
env,
companions,
programs,
display_name,
})
}
}
pub struct PackageEntryIterator {
stack: Vec<PathBuf>, // stack for directories to visit
pantry_dir: PathBuf,
}
impl PackageEntryIterator {
pub fn new(pantry_dir: PathBuf) -> Self {
Self {
stack: vec![pantry_dir.clone()],
pantry_dir,
}
}
}
impl Iterator for PackageEntryIterator {
type Item = PantryEntry;
fn next(&mut self) -> Option<Self::Item> {
while let Some(path) = self.stack.pop() {
if path.is_dir() {
// push subdirectories and files into the stack
if let Ok(entries) = fs::read_dir(&path) {
for entry in entries.flatten() {
self.stack.push(entry.path());
}
}
} else if path.file_name() == Some("package.yml".as_ref()) {
if let Ok(entry) = PantryEntry::from_path(&path, &self.pantry_dir) {
return Some(entry);
} else if cfg!(debug_assertions) {
eprintln!("parse failure: {:?}", path);
}
}
}
None
}
}
pub fn ls(config: &Config) -> PackageEntryIterator {
PackageEntryIterator::new(config.pantry_dir.join("projects"))
}
#[derive(Debug, Deserialize)]
struct RawPantryEntry {
dependencies: Option<Deps>,
provides: Option<Provides>,
companions: Option<Deps>,
runtime: Option<Runtime>,
#[serde(rename = "display-name")]
display_name: Option<String>,
}
#[derive(Debug)]
struct Runtime {
env: HashMap<String, String>,
}
impl<'de> Deserialize<'de> for Runtime {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[cfg(target_os = "macos")]
let platform_key = "darwin";
#[cfg(target_os = "linux")]
let platform_key = "linux";
#[cfg(target_os = "windows")]
let platform_key = "windows";
#[cfg(target_arch = "aarch64")]
let arch_key = "aarch64";
#[cfg(target_arch = "x86_64")]
let arch_key = "x86-64";
fn stringify(value: serde_yaml::Value) -> Option<String> {
match value {
serde_yaml::Value::String(s) => Some(s.clone()),
serde_yaml::Value::Number(n) => Some(n.to_string()),
serde_yaml::Value::Bool(b) => Some(b.to_string()),
_ => None,
}
}
let mut result = HashMap::new();
let root: HashMap<String, serde_yaml::Value> = Deserialize::deserialize(deserializer)?;
if let Some(env) = root.get("env").and_then(|x| x.as_mapping()).cloned() {
for (key, value) in env {
if key == "linux" || key == "darwin" || key == "windows" {
// If the key is platform-specific, only include values for the current platform
if key == platform_key {
if let serde_yaml::Value::Mapping(value) = value {
for (key, value) in value {
if let (Some(key), Some(value)) = (stringify(key), stringify(value))
{
result.insert(key, value);
}
}
}
}
} else if key == "aarch64" || key == "x86-64" {
if key == arch_key {
if let serde_yaml::Value::Mapping(value) = value {
for (key, value) in value {
if let (Some(key), Some(value)) = (stringify(key), stringify(value))
{
result.insert(key, value);
}
}
}
}
} else if let (Some(key), Some(value)) = (stringify(key), stringify(value)) {
result.insert(key, value);
}
}
}
Ok(Runtime { env: result })
}
}
#[derive(Debug)]
struct Deps(HashMap<String, String>);
impl<'de> Deserialize<'de> for Deps {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
// Deserialize the map as a generic HashMap
let full_map: HashMap<String, serde_yaml::Value> = Deserialize::deserialize(deserializer)?;
// Determine the current platform
#[cfg(target_os = "macos")]
let platform_key = "darwin";
#[cfg(target_os = "linux")]
let platform_key = "linux";
#[cfg(target_os = "windows")]
let platform_key = "windows";
// Create the result map
let mut result = HashMap::new();
fn handle_value(input: &serde_yaml::Value) -> Option<String> {
match input {
serde_yaml::Value::String(s) => Some(if s.chars().next().unwrap().is_numeric() {
format!("^{}", s)
} else {
s.clone()
}),
serde_yaml::Value::Number(n) => Some(format!("^{}", n)),
_ => None,
}
}
for (key, value) in full_map {
if key == "linux" || key == "darwin" || key == "windows" {
// If the key is platform-specific, only include values for the current platform
if key == platform_key {
if let serde_yaml::Value::Mapping(platform_values) = value {
for (k, v) in platform_values {
if let (serde_yaml::Value::String(k), Some(v)) = (k, handle_value(&v)) {
result.insert(k, v);
}
}
}
}
} else if let Some(value) = handle_value(&value) {
result.insert(key, value);
}
}
Ok(Deps(result))
}
}
#[derive(Debug)]
struct Provides(Vec<String>);
impl<'de> Deserialize<'de> for Provides {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
// Define an enum to capture the possible YAML structures
#[derive(Deserialize)]
#[serde(untagged)]
enum ProvidesHelper {
List(Vec<String>),
Map(HashMap<String, Vec<String>>),
}
match ProvidesHelper::deserialize(deserializer)? {
ProvidesHelper::List(list) => Ok(Provides(list)),
ProvidesHelper::Map(map) => {
#[cfg(target_os = "macos")]
let key = "darwin";
#[cfg(target_os = "linux")]
let key = "linux";
#[cfg(windows)]
let key = "windows";
if let Some(values) = map.get(key) {
Ok(Provides(values.clone()))
} else {
Ok(Provides(Vec::new())) // Return an empty Vec if the key isn't found
}
}
}
}
}
impl RawPantryEntry {
fn from_path(path: &PathBuf) -> Result<Self, Box<dyn std::error::Error>> {
let content = fs::read_to_string(path)?;
Ok(serde_yaml::from_str(&content)?)
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/utils.rs | crates/lib/src/utils.rs | #[cfg(not(windows))]
use std::os::unix::fs::PermissionsExt;
use std::{error::Error, path::Path};
pub async fn find_program(arg: &str, paths: &Vec<String>) -> Result<String, Box<dyn Error>> {
if Path::new(arg).is_absolute() {
return Ok(arg.to_string());
} else if arg.contains("/") {
return Ok(std::env::current_dir()
.unwrap()
.join(arg)
.to_str()
.unwrap()
.to_string());
}
for path in paths {
#[cfg(unix)]
let full_path = Path::new(&path).join(arg);
#[cfg(unix)]
if full_path.is_file() {
if let Ok(metadata) = full_path.metadata() {
if metadata.permissions().mode() & 0o111 != 0 {
return Ok(full_path.to_str().unwrap().to_string());
}
}
}
#[cfg(windows)]
for ext in ["exe", "bat", "cmd"].iter() {
let full_path = Path::new(&path).join(format!("{}.{}", arg, ext));
if full_path.is_file() {
return Ok(full_path.to_str().unwrap().to_string());
}
}
}
Err(format!("cmd not found: {}", arg).into())
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/resolve.rs | crates/lib/src/resolve.rs | use crate::config::Config;
use crate::types::{Installation, Package, PackageReq};
use crate::{cellar, inventory};
use std::error::Error;
#[derive(Debug, Default)]
pub struct Resolution {
/// fully resolved list (includes both installed and pending)
pub pkgs: Vec<Package>,
/// already installed packages
pub installed: Vec<Installation>,
/// these are the pkgs that aren’t yet installed
pub pending: Vec<Package>,
}
//TODO no need to take array since it doesn’t consider anything
use futures::stream::{FuturesUnordered, StreamExt};
pub async fn resolve(
reqs: &Vec<PackageReq>,
config: &Config,
) -> Result<Resolution, Box<dyn Error>> {
let mut rv = Resolution::default();
// Create a FuturesUnordered to run the tasks concurrently
let mut futures = FuturesUnordered::new();
for req in reqs {
futures.push(async move {
if let Some(installation) = cellar::resolve(req, config).await? {
Ok::<_, Box<dyn Error>>((
Some((installation.clone(), installation.pkg.clone())),
None,
))
} else if let Some(version) = inventory::select(req, config).await? {
let pkg = Package {
project: req.project.clone(),
version,
};
Ok::<_, Box<dyn Error>>((None, Some(pkg)))
} else {
Err(Box::new(ResolveError { pkg: req.clone() }) as Box<dyn Error>)
}
});
}
// Process the results as they are completed
while let Some(result) = futures.next().await {
match result? {
(Some((installation, pkg)), None) => {
rv.installed.push(installation);
rv.pkgs.push(pkg);
}
(None, Some(pkg)) => {
rv.pkgs.push(pkg.clone());
rv.pending.push(pkg);
}
_ => unreachable!(), // This should not happen
}
}
Ok(rv)
}
use std::fmt;
#[derive(Debug)]
pub struct ResolveError {
pub pkg: PackageReq, // Holds the package or requirement
}
impl Error for ResolveError {}
impl fmt::Display for ResolveError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "not-found: pkg: {:?}", self.pkg)
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/lib/src/install_multi.rs | crates/lib/src/install_multi.rs | use std::error::Error;
use std::sync::Arc;
use crate::install::{install, InstallEvent};
use crate::types::{Installation, Package};
use futures::stream::FuturesUnordered;
use futures::StreamExt;
use crate::config::Config;
pub trait ProgressBarExt {
fn inc(&self, n: u64);
fn inc_length(&self, n: u64);
}
pub async fn install_multi(
pending: &[Package],
config: &Config,
pb: Option<Arc<impl ProgressBarExt + Send + Sync + 'static>>,
) -> Result<Vec<Installation>, Box<dyn Error>> {
pending
.iter()
.map(|pkg| {
install(
pkg,
config,
pb.clone().map(|pb| {
move |event| match event {
InstallEvent::DownloadSize(size) => {
pb.inc_length(size);
}
InstallEvent::Progress(chunk) => {
pb.inc(chunk);
}
}
}),
)
})
.collect::<FuturesUnordered<_>>()
.collect::<Vec<_>>()
.await
.into_iter()
.collect()
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/help.rs | crates/cli/src/help.rs | use regex::Regex;
fn dim(input: &str) -> String {
// Placeholder function for "dim" styling
format!("\x1b[2m{}\x1b[0m", input)
}
pub fn usage() -> String {
#[cfg(target_os = "macos")]
let open = "open";
#[cfg(windows)]
let open = "foo";
#[cfg(target_os = "linux")]
let open = "xdg-open";
let usage = r##"
usage:
pkgx [+pkg@x.y…] <program|path> [--] [arg…]
examples:
$ pkgx gum format "# hello world" "sup?"
$ pkgx node@18 --eval 'console.log("hello world")'
$ pkgx +openssl cargo build
modes:
$ pkgx --query bun # could you run `bun`? (-Q)
$ pkgx --help # hi mom!
$ pkgx --version
flags:
-q, --quiet # suppress brief informational messages
-qq, --silent # no chat. no errors. just execute.
-j, --json=v2 # output JSON (if sensible)
-C, --chdir <d> # change directory first
--sync # sync first (note: rarely if ever needed)
-v # print version and continue
more:
$ OPEN https://docs.pkgx.sh
"##;
let usage = usage
.replace('[', &dim("["))
.replace(']', &dim("]"))
.replace('<', &dim("<"))
.replace('>', &dim(">"))
.replace('$', &dim("$"))
.replace('|', &dim("|"))
.replace("OPEN", open);
let re = Regex::new("(?m) #.*$").unwrap();
re.replace_all(&usage, |caps: ®ex::Captures| {
dim(caps.get(0).unwrap().as_str())
})
.trim()
.to_string()
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/args.rs | crates/cli/src/args.rs | use console::style;
pub enum Mode {
X,
Help,
Version,
Query,
}
pub struct Flags {
pub quiet: bool,
pub silent: bool,
pub json: Option<isize>,
pub version_n_continue: bool,
pub shebang: bool,
pub sync: bool,
pub chdir: Option<String>,
}
pub struct Args {
pub plus: Vec<String>,
pub args: Vec<String>,
pub find_program: bool,
pub mode: Mode,
pub flags: Flags,
}
pub fn parse() -> Args {
let mut mode = Mode::X;
let mut plus = Vec::new();
let mut args = Vec::new();
let mut silent: bool = false;
let mut quiet: bool = false;
let mut json = None;
let mut find_program = false;
let mut collecting_args = false;
let mut version_n_continue = false;
let mut shebang = false;
let mut sync = false;
let mut chdir = None;
let json_latest_v: isize = 2;
let mut args_iter = std::env::args().skip(1);
while let Some(arg) = args_iter.next() {
if collecting_args {
args.push(arg);
} else if arg.starts_with('+') {
plus.push(arg.trim_start_matches('+').to_string());
} else if arg == "--" {
find_program = false;
collecting_args = true;
} else if arg.starts_with("--") {
match arg.as_str() {
"--shebang" => shebang = true,
"--json" => {
if !silent {
eprintln!(
"{} use --json=v{}",
style("warning: --json is not stable").yellow(),
json_latest_v
);
}
json = Some(2);
}
"--chdir" | "--cd" => chdir = args_iter.next(),
"--json=v1" => json = Some(1),
"--json=v2" => json = Some(2),
"--silent" => silent = true,
"--help" => mode = Mode::Help,
"--version" => mode = Mode::Version,
"--quiet" => quiet = true,
"--query" => mode = Mode::Query,
"--sync" => sync = true,
"--shellcode" => {
if !silent {
eprintln!("{}", style("⨯ migration required").red());
eprintln!(
"{} pkgx^2 is now exclusively focused on executing packages",
style("│").red()
);
eprintln!(
"{} you need to migrate to the new, independent `dev` command",
style("│").red()
);
eprintln!("{} run the following:", style("│").red());
eprintln!(
"{} pkgx pkgx^1 deintegrate && pkgx dev integrate",
style("╰─➤").red()
);
}
std::process::exit(1);
}
_ => panic!("unknown argument {}", arg),
}
} else if arg.starts_with('-') {
// spit arg into characters
for c in arg.chars().skip(1) {
match c {
'q' => {
if quiet {
silent = true
} else {
quiet = true
}
}
'h' => mode = Mode::Help,
's' => silent = true,
'j' => json = Some(json_latest_v),
'v' => version_n_continue = true,
'!' => shebang = true,
'Q' => mode = Mode::Query,
'C' => chdir = args_iter.next(),
_ => panic!("unknown argument: -{}", c),
}
}
} else {
find_program = !arg.contains('/');
collecting_args = true;
args.push(arg);
}
}
Args {
plus,
args,
find_program,
mode,
flags: Flags {
shebang,
silent,
json,
quiet,
version_n_continue,
sync,
chdir,
},
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/execve.rs | crates/cli/src/execve.rs | #[cfg(unix)]
use nix::unistd::execve as nix_execve;
#[cfg(unix)]
use std::ffi::CString;
use libpkgx::platform_case_aware_env_key::PlatformCaseAwareEnvKey;
use std::{collections::HashMap, error::Error};
#[cfg(unix)]
pub fn execve(
cmd: String,
mut args: Vec<String>,
env: HashMap<PlatformCaseAwareEnvKey, String>,
) -> Result<(), Box<dyn Error>> {
// Convert the command to a CString
let c_command = CString::new(cmd.clone())
.map_err(|e| format!("Failed to convert command to CString: {}", e))?;
// execve expects the command to be the first argument (yes, as well)
args.insert(0, cmd);
// Convert the arguments to CStrings and collect them into a Vec
let c_args: Vec<CString> = args
.iter()
.map(|arg| {
CString::new(arg.clone())
.map_err(|e| format!("Failed to convert argument to CString: {}", e))
})
.collect::<Result<_, _>>()?;
// Convert the environment to a Vec of `KEY=VALUE` strings
let env_vars: Vec<String> = env
.iter()
.map(|(key, value)| format!("{}={}", key, value))
.collect();
// Convert the environment variables to CStrings and collect them into a Vec
let c_env: Vec<CString> = env_vars
.iter()
.map(|env| {
CString::new(env.clone())
.map_err(|e| format!("Failed to convert environment variable to CString: {}", e))
})
.collect::<Result<_, _>>()?;
// Replace the process with the new command, arguments, and environment
let execve_result = nix_execve(&c_command, &c_args, &c_env);
if execve_result.is_err() {
let errno = execve_result.unwrap_err();
return Err(format!("execve failed with errno: {}", errno).into());
}
Ok(())
}
#[cfg(windows)]
use std::process::{exit, Command};
#[cfg(windows)]
pub fn execve(
cmd: String,
args: Vec<String>,
env: HashMap<PlatformCaseAwareEnvKey, String>,
) -> Result<(), Box<dyn Error>> {
let status = Command::new(cmd)
.args(args)
.envs(env.iter().map(|(k, v)| (&k.0, v)))
.spawn()?
.wait()?;
exit(status.code().unwrap_or(1));
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/which.rs | crates/cli/src/which.rs | use libpkgx::types::PackageReq;
use rusqlite::Connection;
#[derive(Debug)]
pub enum WhichError {
CmdNotFound(String),
MultipleProjects(String, Vec<String>),
DbError(rusqlite::Error),
}
impl std::fmt::Display for WhichError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
WhichError::CmdNotFound(cmd) => write!(f, "cmd not found: {}", cmd),
WhichError::MultipleProjects(cmd, projects) => {
write!(f, "multiple projects found for {}: {:?}", cmd, projects)
}
WhichError::DbError(err) => write!(f, "db error: {}", err),
}
}
}
impl std::error::Error for WhichError {}
pub async fn which(
cmd: &String,
conn: &Connection,
pkgs: &[PackageReq],
) -> Result<String, WhichError> {
let candidates =
libpkgx::pantry_db::projects_for_symbol(cmd, conn).map_err(WhichError::DbError)?;
if candidates.len() == 1 {
Ok(candidates[0].clone())
} else if candidates.is_empty() {
Err(WhichError::CmdNotFound(cmd.clone()))
} else {
let selected_pkgs = candidates
.clone()
.into_iter()
.filter(|candidate| {
pkgs.iter().any(|pkg| {
let PackageReq { project, .. } = pkg;
project == candidate
})
})
.collect::<Vec<String>>();
if selected_pkgs.len() == 1 {
Ok(selected_pkgs[0].clone())
} else {
Err(WhichError::MultipleProjects(cmd.clone(), candidates))
}
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/dump.rs | crates/cli/src/dump.rs | use std::{collections::HashMap, path::PathBuf, vec};
use libpkgx::{
env::expand_moustaches, pantry_db,
platform_case_aware_env_key::construct_platform_case_aware_env_key, types::Installation,
};
use serde::Serialize;
use serde_json::json;
pub fn dump(
conn: rusqlite::Connection,
installations: Vec<Installation>,
flags: &crate::args::Flags,
) -> Result<(), Box<dyn std::error::Error>> {
if let Some(v) = flags.json {
if v < 2 {
let env = libpkgx::env::map(&installations);
let mut runtime_env = HashMap::new();
for pkg in installations.clone() {
let pkg_runtime_env =
libpkgx::pantry_db::runtime_env_for_project(&pkg.pkg.project, &conn)?;
if !pkg_runtime_env.is_empty() {
runtime_env.insert(pkg.pkg.project, pkg_runtime_env);
}
}
let json = json!({
"pkgs": installations,
"env": env,
"runtime_env": runtime_env
});
println!("{}", json);
} else {
let mut pkgs: HashMap<String, JsonV2Pkg> = HashMap::new();
for installation in installations.clone() {
let env = libpkgx::env::map(&vec![installation.clone()]);
let project = installation.pkg.project.clone();
let mut runtime_env = libpkgx::pantry_db::runtime_env_for_project(&project, &conn)?;
for (installation_key, installation_value) in runtime_env.clone() {
let installation_value =
expand_moustaches(&installation_value, &installation, &installations);
runtime_env.insert(installation_key, installation_value);
}
let programs = pantry_db::programs_for_project(&project, &conn)?;
let companions = pantry_db::companions_for_projects(&[project.clone()], &conn)?
.iter()
.map(|c| c.to_string())
.collect::<Vec<String>>();
let pkg = JsonV2Pkg {
path: installation.path,
project,
version: installation.pkg.version,
env,
runtime_env,
programs,
companions,
};
pkgs.insert(pkg.project.clone(), pkg);
}
let json = json!({
"pkgs": pkgs, "env": libpkgx::env::map(&installations)
});
println!("{}", json);
}
} else {
let env = libpkgx::env::map(&installations);
let env = env
.iter()
.map(|(k, v)| {
(
construct_platform_case_aware_env_key(k.clone()),
v.join(":"),
)
})
.collect();
let env = libpkgx::env::mix_runtime(&env, &installations, &conn)?;
for (key, value) in env {
println!(
"{}=\"{}\"",
key,
value.replace(&format!(":${}", key), &format!("${{{}:+:${}}}", key, key))
);
}
}
Ok(())
}
#[derive(Serialize)]
struct JsonV2Pkg {
project: String,
version: libpkgx::Version,
#[serde(skip_serializing_if = "HashMap::is_empty")]
env: HashMap<String, Vec<String>>,
#[serde(skip_serializing_if = "HashMap::is_empty")]
runtime_env: HashMap<String, String>,
path: PathBuf,
#[serde(skip_serializing_if = "Vec::is_empty")]
programs: Vec<String>,
#[serde(skip_serializing_if = "Vec::is_empty")]
companions: Vec<String>,
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/resolve.rs | crates/cli/src/resolve.rs | use libpkgx::{
config::Config,
hydrate::hydrate,
install_multi::install_multi,
pantry_db, sync,
types::{Installation, PackageReq},
VersionRange,
};
use rusqlite::Connection;
use crate::{
spinner::Spinner,
which::{which, WhichError},
};
pub async fn resolve(
args: &mut [String],
plus: &[String],
find_program: bool,
config: &Config,
conn: &mut Connection,
did_sync: bool,
spinner: &mut Spinner,
) -> std::result::Result<(Vec<Installation>, Vec<PackageReq>), Box<dyn std::error::Error>> {
spinner.set_message("resolving pkg graph…");
let mut pkgs = vec![];
for pkgspec in plus {
let mut pkgspec = parse_pkgspec(pkgspec)?;
if !config
.pantry_dir
.join("projects")
.join(pkgspec.project())
.is_dir()
{
let project = which(&pkgspec.project(), conn, &pkgs).await?;
pkgspec.set_project(project);
}
pkgs.push(pkgspec.pkgreq(config).await);
}
if find_program {
let mut pkgspec = parse_pkgspec(&args[0])?;
let cmd = pkgspec.project();
args[0] = cmd.clone(); // invoke eg. `node` rather than eg. `node@20`
let project = match which(&cmd, conn, &pkgs).await {
Err(WhichError::CmdNotFound(cmd)) => {
if !did_sync {
spinner.set_message(&format!("{} not found, syncing…", cmd));
sync::update(config, conn).await?; // cmd not found ∴ sync in case it is new
spinner.set_message("resolving pkg graph…");
which(&cmd, conn, &pkgs).await
} else {
Err(WhichError::CmdNotFound(cmd))
}
}
Err(err) => Err(err),
Ok(project) => Ok(project),
}?;
pkgspec.set_project(project.clone());
pkgs.push(pkgspec.pkgreq(config).await);
}
let companions = pantry_db::companions_for_projects(
&pkgs
.iter()
.map(|project| project.project.clone())
.collect::<Vec<_>>(),
conn,
)?;
pkgs.extend(companions);
let graph = hydrate(&pkgs, |project| pantry_db::deps_for_project(&project, conn)).await?;
let resolution = libpkgx::resolve::resolve(&graph, config).await?;
let mut installations = resolution.installed;
if !resolution.pending.is_empty() {
let installed = install_multi(&resolution.pending, config, spinner.arc()).await?;
installations.extend(installed);
}
Ok((installations, graph))
}
enum Pkgspec {
Req(PackageReq),
Latest(String),
}
impl Pkgspec {
fn project(&self) -> String {
match self {
Pkgspec::Req(req) => req.project.clone(),
Pkgspec::Latest(project) => project.clone(),
}
}
fn set_project(&mut self, project: String) {
match self {
Pkgspec::Req(req) => req.project = project,
Pkgspec::Latest(_) => *self = Pkgspec::Latest(project),
}
}
async fn constraint(&self, config: &Config) -> VersionRange {
match self {
Pkgspec::Req(req) => req.constraint.clone(),
Pkgspec::Latest(project) => match libpkgx::inventory::ls(project, config).await {
Ok(versions) if !versions.is_empty() => {
VersionRange::from_semver(versions.iter().max().unwrap()).unwrap()
}
_ => VersionRange::any(),
},
}
}
async fn pkgreq(&self, config: &Config) -> PackageReq {
let project = self.project();
let constraint = self.constraint(config).await;
PackageReq {
project,
constraint,
}
}
}
fn parse_pkgspec(pkgspec: &str) -> Result<Pkgspec, Box<dyn std::error::Error>> {
if let Some(project) = pkgspec.strip_suffix("@latest") {
Ok(Pkgspec::Latest(project.to_string()))
} else {
let pkgspec = PackageReq::parse(pkgspec)?;
Ok(Pkgspec::Req(pkgspec))
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/x.rs | crates/cli/src/x.rs | use std::{collections::HashMap, result::Result};
use libpkgx::{
platform_case_aware_env_key::{construct_platform_case_aware_env_key, PlatformCaseAwareEnvKey},
types::{Installation, PackageReq},
utils,
};
use regex::Regex;
use crate::args::Flags;
pub async fn exec(
find_program: bool,
mut args: Vec<String>,
installations: Vec<Installation>,
env: HashMap<String, Vec<String>>,
flags: Flags,
conn: rusqlite::Connection,
graph: Vec<PackageReq>,
) -> Result<
(
String,
Vec<String>,
HashMap<PlatformCaseAwareEnvKey, String>,
),
Box<dyn std::error::Error>,
> {
let pkgx_lvl = std::env::var("PKGX_LVL")
.unwrap_or("0".to_string())
.parse()
.unwrap_or(0)
+ 1;
if pkgx_lvl >= 10 {
return Err("PKGX_LVL exceeded: https://github.com/orgs/pkgxdev/discussions/11".into());
}
let cmd = if find_program {
utils::find_program(&args.remove(0), &env["PATH"]).await?
} else if args[0].contains('/') {
// user specified a path to program which we should use
args.remove(0)
} else {
// user wants a system tool, eg. pkgx +wget -- git clone
// NOTE we still check the injected PATH since they may have added the tool anyway
// it’s just this route allows the user to get a non-error for delegating through to the system
let mut paths = vec![];
if let Some(pkgpaths) = env.get("PATH") {
paths.append(&mut pkgpaths.clone());
}
if let Ok(syspaths) = std::env::var("PATH") {
#[cfg(windows)]
let sep = ";";
#[cfg(not(windows))]
let sep = ":";
paths.extend(
syspaths
.split(sep)
.map(|x| x.to_string())
.collect::<Vec<String>>(),
);
}
utils::find_program(&args.remove(0), &paths).await?
};
let env = libpkgx::env::mix(env);
let mut env = libpkgx::env::mix_runtime(&env, &installations, &conn)?;
let re = Regex::new(r"^\$\{\w+:-([^}]+)\}$").unwrap();
#[cfg(unix)]
let sep = ":";
#[cfg(windows)]
let sep = ";";
for (key, value) in env.clone() {
if let Some(caps) = re.captures(&value) {
env.insert(key, caps.get(1).unwrap().as_str().to_string());
} else {
let cleaned_value = value
.replace(&format!("{}${}", sep, key), "")
.replace(&format!("${}{}", key, sep), "")
.replace(&format!("; ${}", key), "") // one pantry instance of this
.replace(&format!("${}", key), "");
env.insert(key, cleaned_value);
}
}
// fork bomb protection
env.insert(
construct_platform_case_aware_env_key("PKGX_LVL".to_string()),
pkgx_lvl.to_string(),
);
env.insert(
construct_platform_case_aware_env_key("PKGX_VERSION".to_string()),
env!("CARGO_PKG_VERSION").to_string(),
);
// TODO should be output by +syntax too
env.insert(
construct_platform_case_aware_env_key("PKGX_ENV".to_string()),
graph
.iter()
.map(|pkg| format!("{}", pkg))
.collect::<Vec<String>>()
.join(libpkgx::env::SEP),
);
if flags.shebang {
// removes the filename of the shebang script
args.remove(0);
}
Ok((cmd, args, env))
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/query.rs | crates/cli/src/query.rs | use std::error::Error;
use libpkgx::pantry_db;
use rusqlite::{params, Connection};
pub fn query(args: &Vec<String>, silent: bool, conn: &Connection) -> Result<(), Box<dyn Error>> {
if args.is_empty() {
let mut stmt = conn.prepare("SELECT program FROM provides")?;
let mut rows = stmt.query(params![])?;
while let Some(row) = rows.next()? {
let program: String = row.get(0)?;
println!("{}", program);
}
} else {
let mut fail = false;
for arg in args {
let projects = pantry_db::which(arg, conn)?;
if projects.is_empty() && silent {
std::process::exit(1);
} else if projects.is_empty() {
println!("{} not found", arg);
fail = true;
} else if !silent {
println!("{}", projects.join(", "));
}
}
if fail {
std::process::exit(1);
}
}
Ok(())
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/main.rs | crates/cli/src/main.rs | mod args;
mod dump;
mod execve;
mod help;
mod query;
mod resolve;
mod spinner;
#[cfg(test)]
mod tests;
mod which;
mod x;
use execve::execve;
use libpkgx::{config::Config, sync};
use spinner::Spinner;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let args::Args {
plus,
mut args,
mode,
flags,
find_program,
} = args::parse();
if let Some(dir) = &flags.chdir {
std::env::set_current_dir(dir)?;
}
if flags.version_n_continue {
eprintln!("{}", get_version_string(flags.json.is_some()));
}
match mode {
args::Mode::Help => {
println!("{}", help::usage());
Ok(())
}
args::Mode::Version => {
println!("{}", get_version_string(flags.json.is_some()));
Ok(())
}
args::Mode::Query => {
let (conn, _, _, _) = setup(&flags).await?;
query::query(&args, flags.silent, &conn)
}
args::Mode::X => {
let (mut conn, did_sync, config, mut spinner) = setup(&flags).await?;
let (installations, graph) = resolve::resolve(
&mut args,
&plus,
find_program,
&config,
&mut conn,
did_sync,
&mut spinner,
)
.await?;
if !args.is_empty() {
let env = libpkgx::env::map(&installations);
let (cmd, args, env) =
x::exec(find_program, args, installations, env, flags, conn, graph).await?;
spinner.finish_and_clear();
execve(cmd, args, env)?;
Ok(())
} else if !plus.is_empty() {
spinner.finish_and_clear();
dump::dump(conn, installations, &flags)?;
Ok(())
} else if flags.version_n_continue || flags.sync {
Ok(())
} else {
spinner.finish_and_clear();
eprintln!("{}", help::usage());
std::process::exit(2);
}
}
}
}
async fn setup(
flags: &args::Flags,
) -> Result<(rusqlite::Connection, bool, Config, Spinner), Box<dyn std::error::Error>> {
let config = Config::new()?;
std::fs::create_dir_all(config.pantry_db_file.parent().unwrap())?;
let mut conn = rusqlite::Connection::open(&config.pantry_db_file)?;
let mut spinner = Spinner::new(flags.quiet, flags.silent);
let did_sync = if flags.sync || sync::should(&config)? {
spinner.set_message("syncing pkg-db…");
sync::ensure(&config, &mut conn).await?;
true
} else {
false
};
Ok((conn, did_sync, config, spinner))
}
fn get_version_string(json: bool) -> String {
if !json {
format!("pkgx {}", env!("CARGO_PKG_VERSION"))
} else {
format!(
"{{\"program\": \"pkgx\", \"version\": \"{}\"}}",
env!("CARGO_PKG_VERSION")
)
}
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/spinner.rs | crates/cli/src/spinner.rs | use std::{sync::Arc, time::Duration};
use libpkgx::install_multi::ProgressBarExt;
pub struct Spinner {
quiet: bool,
silent: bool,
bar: Option<indicatif::ProgressBar>,
}
impl Spinner {
pub fn new(quiet: bool, silent: bool) -> Spinner {
Self {
bar: None,
quiet,
silent,
}
}
pub fn set_message(&mut self, msg: &str) {
if self.silent || self.quiet {
return;
}
if let Some(bar) = &self.bar {
bar.set_message(msg.to_string());
} else {
let bar = indicatif::ProgressBar::new_spinner();
bar.set_message(msg.to_string());
bar.enable_steady_tick(Duration::from_millis(100));
self.bar = Some(bar);
}
}
pub fn finish_and_clear(&self) {
if let Some(bar) = &self.bar {
bar.finish_and_clear();
}
}
pub fn arc(&self) -> Option<Arc<impl ProgressBarExt + Send + Sync + 'static>> {
if let Some(bar) = &self.bar {
configure_bar(bar);
Some(Arc::new(MultiProgressBar { pb: bar.clone() }))
} else {
None
}
}
}
use indicatif::{ProgressBar, ProgressState, ProgressStyle};
use std::fmt::Write;
struct MultiProgressBar {
pb: ProgressBar,
}
impl libpkgx::install_multi::ProgressBarExt for MultiProgressBar {
fn inc(&self, n: u64) {
self.pb.inc(n);
}
fn inc_length(&self, n: u64) {
self.pb.inc_length(n);
}
}
// ProgressBar is Send + Sync
unsafe impl Send for MultiProgressBar {}
unsafe impl Sync for MultiProgressBar {}
fn configure_bar(pb: &ProgressBar) {
pb.set_length(1_000_000); // prevent progress var jump where we start receiving bytes before we know the length
pb.set_style(
ProgressStyle::with_template(
"{elapsed:.dim} ❲{wide_bar:.red}❳ {percent}% {bytes_per_sec:.dim} {bytes:.dim}",
)
.unwrap()
.with_key("elapsed", |state: &ProgressState, w: &mut dyn Write| {
let s = state.elapsed().as_secs_f64();
let precision = precision(s);
write!(w, "{:.precision$}s", s, precision = precision).unwrap()
})
.with_key("bytes", |state: &ProgressState, w: &mut dyn Write| {
let (right, divisor) = pretty_size(state.len().unwrap());
let left = state.pos() as f64 / divisor as f64;
let leftprecision = precision(left);
write!(
w,
"{:.precision$}/{}",
left,
right,
precision = leftprecision
)
.unwrap()
})
.progress_chars("⚯ "),
);
pb.enable_steady_tick(Duration::from_millis(50));
}
// pub(crate) for tests (FIXME)
pub(crate) fn pretty_size(n: u64) -> (String, u64) {
let units = ["B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"];
// number of 1024s
let thousands = n.max(1).ilog(1024).clamp(0, units.len() as u32 - 1) as usize;
// size in the appropriate unit
let size = n as f64 / 1024.0f64.powi(thousands as i32);
// the divisor to get back to bytes
let divisor = 1024u64.pow(thousands as u32);
// number of decimal places to show (0 if we're bytes. no fractional bytes. come on.)
let precision = if thousands == 0 { 0 } else { precision(size) };
let formatted = format!(
"{:.precision$} {}",
size,
units[thousands],
precision = precision
);
(formatted, divisor)
}
// pub(crate) for tests (FIXME)
pub(crate) fn precision(n: f64) -> usize {
// 1 > 1.00, 10 > 10.0, 100 > 100
2 - (n.log10().clamp(0.0, 2.0) as usize)
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/tests/mod.rs | crates/cli/src/tests/mod.rs | mod main;
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
pkgxdev/pkgx | https://github.com/pkgxdev/pkgx/blob/838eecd24a027fa11921dfc0d7e867c30dcf1e78/crates/cli/src/tests/main.rs | crates/cli/src/tests/main.rs | use crate::spinner::{precision, pretty_size};
#[test]
fn test_pretty_size() {
assert_eq!(pretty_size(0), ("0 B".to_string(), 1));
assert_eq!(pretty_size(1), ("1 B".to_string(), 1));
assert_eq!(pretty_size(1024), ("1.00 KiB".to_string(), 1024));
assert_eq!(
pretty_size(1024 * 1024),
("1.00 MiB".to_string(), 1024 * 1024)
);
assert_eq!(
pretty_size(1024 * 1024 * 1024),
("1.00 GiB".to_string(), 1024 * 1024 * 1024)
);
assert_eq!(
pretty_size(1024 * 1024 * 1024 * 1024),
("1.00 TiB".to_string(), 1024 * 1024 * 1024 * 1024)
);
assert_eq!(
pretty_size(1024 * 1024 * 1024 * 1024 * 1024),
("1.00 PiB".to_string(), 1024 * 1024 * 1024 * 1024 * 1024)
);
assert_eq!(
pretty_size(1024 * 1024 * 1024 * 1024 * 1024 * 1024),
(
"1.00 EiB".to_string(),
1024 * 1024 * 1024 * 1024 * 1024 * 1024
)
);
// these are bigger than u64
// assert_eq!(
// pretty_size(1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024),
// (
// "1 ZiB".to_string(),
// 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
// )
// );
// assert_eq!(
// pretty_size(1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024),
// (
// "1 YiB".to_string(),
// 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024 * 1024
// )
// );
assert_eq!(pretty_size(5000), ("4.88 KiB".to_string(), 1024));
assert_eq!(pretty_size(5120), ("5.00 KiB".to_string(), 1024));
assert_eq!(
pretty_size(1024 * 1024 + 1),
("1.00 MiB".to_string(), 1024 * 1024)
);
assert_eq!(
pretty_size(35_245 * 1024),
("34.4 MiB".to_string(), 1024 * 1024)
);
assert_eq!(
pretty_size(356_245 * 1024 + 1),
("348 MiB".to_string(), 1024 * 1024)
);
}
#[test]
fn test_precision() {
assert_eq!(precision(1.0), 2);
assert_eq!(precision(1.1), 2);
assert_eq!(precision(9.99), 2);
assert_eq!(precision(10.0), 1);
assert_eq!(precision(10.1), 1);
assert_eq!(precision(99.9), 1);
assert_eq!(precision(100.0), 0);
assert_eq!(precision(100.1), 0);
assert_eq!(precision(999.9), 0);
}
| rust | Apache-2.0 | 838eecd24a027fa11921dfc0d7e867c30dcf1e78 | 2026-01-04T15:44:50.555963Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/db.rs | src/db.rs | use rusqlite::Connection;
use crate::client::{Client, RequestResult};
fn create_db(conn: &Connection) -> Result<usize, rusqlite::Error> {
conn.execute(
"CREATE TABLE IF NOT EXISTS oha (
url TEXT NOT NULL,
start REAL NOT NULL,
start_latency_correction REAL,
end REAL NOT NULL,
duration REAL NOT NULL,
status INTEGER NOT NULL,
len_bytes INTEGER NOT NULL,
run INTEGER NOT NULL
)",
(),
)
}
pub fn store(
client: &Client,
db_url: &str,
start: std::time::Instant,
request_records: &[RequestResult],
run: u64,
) -> Result<usize, rusqlite::Error> {
let mut conn = Connection::open(db_url)?;
create_db(&conn)?;
let t = conn.transaction()?;
let mut affected_rows = 0;
for request in request_records {
let req = client.generate_request(&mut request.rng.clone()).unwrap().1;
let url = req.uri();
affected_rows += t.execute(
"INSERT INTO oha (url, start, start_latency_correction, end, duration, status, len_bytes, run) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
(
url.to_string(),
(request.start - start).as_secs_f64(),
request.start_latency_correction.map(|d| (d - start).as_secs_f64()),
(request.end - start).as_secs_f64(),
request.duration().as_secs_f64(),
request.status.as_u16() as i64,
request.len_bytes,
run
),
)?;
}
t.commit()?;
Ok(affected_rows)
}
#[cfg(test)]
mod test_db {
use rand::SeedableRng;
use super::*;
#[test]
fn test_store() {
let run = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap()
.as_secs();
let start = std::time::Instant::now();
let test_val = RequestResult {
rng: SeedableRng::seed_from_u64(0),
status: hyper::StatusCode::OK,
len_bytes: 100,
start_latency_correction: None,
start: std::time::Instant::now(),
connection_time: None,
first_byte: None,
end: std::time::Instant::now(),
};
let test_vec = vec![test_val.clone(), test_val.clone()];
let client = Client::default();
let result = store(&client, ":memory:", start, &test_vec, run);
assert_eq!(result.unwrap(), 2);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/lib.rs | src/lib.rs | use anyhow::Context;
use aws_auth::AwsSignatureConfig;
use bytes::Bytes;
use clap::Parser;
use crossterm::tty::IsTty;
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
use humantime::Duration;
use hyper::{
HeaderMap,
http::{self, header::HeaderName, header::HeaderValue},
};
use printer::{PrintConfig, PrintMode};
use rand_regex::Regex;
use ratatui::crossterm;
use result_data::ResultData;
use std::{
env,
fs::File,
io::{BufRead, BufReader, Read},
path::{Path, PathBuf},
pin::Pin,
sync::Arc,
};
use timescale::TimeScale;
use url::Url;
use url_generator::UrlGenerator;
mod aws_auth;
mod cli;
mod client;
#[cfg(feature = "http3")]
mod client_h3;
mod curl_compat;
mod db;
mod histogram;
mod monitor;
mod pcg64si;
mod printer;
mod request_generator;
mod result_data;
mod timescale;
mod tls_config;
mod url_generator;
#[cfg(not(target_env = "msvc"))]
use tikv_jemallocator::Jemalloc;
use crate::{
cli::{ConnectToEntry, parse_header},
request_generator::{BodyGenerator, Proxy, RequestGenerator},
};
#[cfg(not(target_env = "msvc"))]
#[global_allocator]
static GLOBAL: Jemalloc = Jemalloc;
#[derive(Parser)]
#[command(version, about, long_about = None)]
#[command(arg_required_else_help(true))]
#[command(styles = clap_cargo::style::CLAP_STYLING)]
pub struct Opts {
#[arg(help = "Target URL or file with multiple URLs.")]
url: String,
#[arg(
help = "Number of requests to run. Accepts plain numbers or suffixes: k = 1,000, m = 1,000,000 (e.g. 10k, 1m).",
short = 'n',
default_value = "200",
conflicts_with = "duration",
value_parser = cli::parse_n_requests
)]
n_requests: usize,
#[arg(
help = "Number of connections to run concurrently. You may should increase limit to number of open files for larger `-c`.",
short = 'c',
default_value = "50"
)]
n_connections: usize,
#[arg(
help = "Number of parallel requests to send on HTTP/2. `oha` will run c * p concurrent workers in total.",
short = 'p',
default_value = "1"
)]
n_http2_parallel: usize,
#[arg(
help = "Duration of application to send requests.
On HTTP/1, When the duration is reached, ongoing requests are aborted and counted as \"aborted due to deadline\"
You can change this behavior with `-w` option.
Currently, on HTTP/2, When the duration is reached, ongoing requests are waited. `-w` option is ignored.
Examples: -z 10s -z 3m.",
short = 'z',
conflicts_with = "n_requests"
)]
duration: Option<Duration>,
#[arg(
help = "When the duration is reached, ongoing requests are waited",
short,
long,
default_value = "false",
requires = "duration"
)]
wait_ongoing_requests_after_deadline: bool,
#[arg(help = "Rate limit for all, in queries per second (QPS)", short = 'q', conflicts_with_all = ["burst_duration", "burst_requests"])]
query_per_second: Option<f64>,
#[arg(
help = "Introduce delay between a predefined number of requests.
Note: If qps is specified, burst will be ignored",
long = "burst-delay",
requires = "burst_requests",
conflicts_with = "query_per_second"
)]
burst_duration: Option<Duration>,
#[arg(
help = "Rates of requests for burst. Default is 1
Note: If qps is specified, burst will be ignored",
long = "burst-rate",
requires = "burst_duration",
conflicts_with = "query_per_second"
)]
burst_requests: Option<usize>,
#[arg(
help = "Generate URL by rand_regex crate but dot is disabled for each query e.g. http://127.0.0.1/[a-z][a-z][0-9]. Currently dynamic scheme, host and port with keep-alive do not work well. See https://docs.rs/rand_regex/latest/rand_regex/struct.Regex.html for details of syntax.",
default_value = "false",
long
)]
rand_regex_url: bool,
#[arg(
help = "Read the URLs to query from a file",
default_value = "false",
long
)]
urls_from_file: bool,
#[arg(
help = "A parameter for the '--rand-regex-url'. The max_repeat parameter gives the maximum extra repeat counts the x*, x+ and x{n,} operators will become.",
default_value = "4",
long,
requires = "rand_regex_url"
)]
max_repeat: u32,
#[arg(
help = "Dump target Urls <DUMP_URLS> times to debug --rand-regex-url",
long
)]
dump_urls: Option<usize>,
#[arg(
help = "Correct latency to avoid coordinated omission problem. It's ignored if -q is not set.",
long = "latency-correction"
)]
latency_correction: bool,
#[arg(help = "No realtime tui", long = "no-tui")]
no_tui: bool,
#[arg(help = "Frame per second for tui.", default_value = "16", long = "fps")]
fps: usize,
#[arg(
help = "HTTP method",
short = 'm',
long = "method",
default_value = "GET"
)]
method: http::Method,
#[arg(help = "Custom HTTP header. Examples: -H \"foo: bar\"", short = 'H', value_parser = parse_header)]
headers: Vec<(HeaderName, HeaderValue)>,
#[arg(
help = "Custom Proxy HTTP header. Examples: --proxy-header \"foo: bar\"",
long = "proxy-header",
value_parser = parse_header
)]
proxy_headers: Vec<(HeaderName, HeaderValue)>,
#[arg(help = "Timeout for each request. Default to infinite.", short = 't')]
timeout: Option<humantime::Duration>,
#[arg(
help = "Timeout for establishing a new connection. Default to 5s.",
long = "connect-timeout",
default_value = "5s"
)]
connect_timeout: humantime::Duration,
#[arg(help = "HTTP Accept Header.", short = 'A')]
accept_header: Option<String>,
#[arg(help = "HTTP request body.", short = 'd', conflicts_with_all = ["body_path", "body_path_lines", "form"])]
body_string: Option<String>,
#[arg(help = "HTTP request body from file.", short = 'D', conflicts_with_all = ["body_string", "body_path_lines", "form"])]
body_path: Option<std::path::PathBuf>,
#[arg(help = "HTTP request body from file line by line.", short = 'Z', conflicts_with_all = ["body_string", "body_path", "form"])]
body_path_lines: Option<std::path::PathBuf>,
#[arg(
help = "Specify HTTP multipart POST data (curl compatible). Examples: -F 'name=value' -F 'file=@path/to/file'",
short = 'F',
long = "form",
conflicts_with_all = ["body_string", "body_path", "body_path_lines"]
)]
form: Vec<String>,
#[arg(help = "Content-Type.", short = 'T')]
content_type: Option<String>,
#[arg(
help = "Basic authentication (username:password), or AWS credentials (access_key:secret_key)",
short = 'a'
)]
basic_auth: Option<String>,
#[arg(help = "AWS session token", long = "aws-session")]
aws_session: Option<String>,
#[arg(
help = "AWS SigV4 signing params (format: aws:amz:region:service)",
long = "aws-sigv4"
)]
aws_sigv4: Option<String>,
#[arg(help = "HTTP proxy", short = 'x')]
proxy: Option<Url>,
#[arg(
help = "HTTP version to connect to proxy. Available values 0.9, 1.0, 1.1, 2.",
long = "proxy-http-version"
)]
proxy_http_version: Option<String>,
#[arg(
help = "Use HTTP/2 to connect to proxy. Shorthand for --proxy-http-version=2",
long = "proxy-http2"
)]
proxy_http2: bool,
#[arg(
help = "HTTP version. Available values 0.9, 1.0, 1.1, 2, 3",
long = "http-version"
)]
http_version: Option<String>,
#[arg(help = "Use HTTP/2. Shorthand for --http-version=2", long = "http2")]
http2: bool,
#[arg(help = "HTTP Host header", long = "host")]
host: Option<String>,
#[arg(help = "Disable compression.", long = "disable-compression")]
disable_compression: bool,
#[arg(
help = "Limit for number of Redirect. Set 0 for no redirection. Redirection isn't supported for HTTP/2.",
default_value = "10",
short = 'r',
long = "redirect"
)]
redirect: usize,
#[arg(
help = "Disable keep-alive, prevents re-use of TCP connections between different HTTP requests. This isn't supported for HTTP/2.",
long = "disable-keepalive"
)]
disable_keepalive: bool,
#[arg(
help = "*Not* perform a DNS lookup at beginning to cache it",
long = "no-pre-lookup",
default_value = "false"
)]
no_pre_lookup: bool,
#[arg(help = "Lookup only ipv6.", long = "ipv6")]
ipv6: bool,
#[arg(help = "Lookup only ipv4.", long = "ipv4")]
ipv4: bool,
#[arg(
help = "(TLS) Use the specified certificate file to verify the peer. Native certificate store is used even if this argument is specified.",
long
)]
cacert: Option<PathBuf>,
#[arg(
help = "(TLS) Use the specified client certificate file. --key must be also specified",
long,
requires = "key"
)]
cert: Option<PathBuf>,
#[arg(
help = "(TLS) Use the specified client key file. --cert must be also specified",
long,
requires = "cert"
)]
key: Option<PathBuf>,
#[arg(help = "Accept invalid certs.", long = "insecure")]
insecure: bool,
#[arg(
help = "Override DNS resolution and default port numbers with strings like 'example.org:443:localhost:8443'
Note: if used several times for the same host:port:target_host:target_port, a random choice is made",
long = "connect-to"
)]
connect_to: Vec<ConnectToEntry>,
#[arg(
help = "Disable the color scheme.",
alias = "disable-color",
long = "no-color",
env = "NO_COLOR"
)]
no_color: bool,
#[cfg(unix)]
#[arg(
help = "Connect to a unix socket instead of the domain in the URL. Only for non-HTTPS URLs.",
long = "unix-socket",
group = "socket-type"
)]
unix_socket: Option<std::path::PathBuf>,
#[cfg(feature = "vsock")]
#[arg(
help = "Connect to a VSOCK socket using 'cid:port' instead of the domain in the URL. Only for non-HTTPS URLs.",
long = "vsock-addr",
value_parser = cli::parse_vsock_addr,
group = "socket-type"
)]
vsock_addr: Option<tokio_vsock::VsockAddr>,
#[arg(
help = "Include a response status code successful or not successful breakdown for the time histogram and distribution statistics",
long = "stats-success-breakdown"
)]
stats_success_breakdown: bool,
#[arg(
help = "Write succeeded requests to sqlite database url E.G test.db",
long = "db-url"
)]
db_url: Option<String>,
#[arg(
long,
help = "Perform a single request and dump the request and response"
)]
debug: bool,
#[arg(
help = "Output file to write the results to. If not specified, results are written to stdout.",
long,
short
)]
output: Option<PathBuf>,
#[arg(help = "Output format", long, default_value = "text")]
output_format: Option<PrintMode>,
#[arg(
help = "Time unit to be used. If not specified, the time unit is determined automatically. This option affects only text format.",
long,
short = 'u'
)]
time_unit: Option<TimeScale>,
}
pub async fn run(mut opts: Opts) -> anyhow::Result<()> {
let work_mode = opts.work_mode();
// Parse AWS credentials from basic auth if AWS signing is requested
let aws_config = if let Some(signing_params) = opts.aws_sigv4 {
if let Some(auth) = &opts.basic_auth {
let parts: Vec<&str> = auth.split(':').collect();
if parts.len() != 2 {
anyhow::bail!("Invalid AWS credentials format. Expected access_key:secret_key");
}
let access_key = parts[0];
let secret_key = parts[1];
let session_token = opts.aws_session.take();
Some(AwsSignatureConfig::new(
access_key,
secret_key,
&signing_params,
session_token,
)?)
} else {
anyhow::bail!("AWS credentials (--auth) required when using --aws-sigv4");
}
} else {
None
};
let parse_http_version = |is_http2: bool, version: Option<&str>| match (is_http2, version) {
(true, Some(_)) => anyhow::bail!("--http2 and --http-version are exclusive"),
(true, None) => Ok(http::Version::HTTP_2),
(false, Some(http_version)) => match http_version.trim() {
"0.9" => Ok(http::Version::HTTP_09),
"1.0" => Ok(http::Version::HTTP_10),
"1.1" => Ok(http::Version::HTTP_11),
"2.0" | "2" => Ok(http::Version::HTTP_2),
#[cfg(feature = "http3")]
"3.0" | "3" => Ok(http::Version::HTTP_3),
#[cfg(not(feature = "http3"))]
"3.0" | "3" => anyhow::bail!(
"Your Oha instance has not been built with HTTP/3 support. Try recompiling with the feature enabled."
),
_ => anyhow::bail!("Unknown HTTP version. Valid versions are 0.9, 1.0, 1.1, 2, 3"),
},
(false, None) => Ok(http::Version::HTTP_11),
};
let http_version: http::Version = parse_http_version(opts.http2, opts.http_version.as_deref())?;
let proxy_http_version: http::Version =
parse_http_version(opts.proxy_http2, opts.proxy_http_version.as_deref())?;
let url_generator = if opts.rand_regex_url {
// Almost URL has dot in domain, so disable dot in regex for convenience.
let dot_disabled: String = opts
.url
.chars()
.map(|c| {
if c == '.' {
regex_syntax::escape(".")
} else {
c.to_string()
}
})
.collect();
UrlGenerator::new_dynamic(Regex::compile(&dot_disabled, opts.max_repeat)?)
} else if opts.urls_from_file {
let path = Path::new(opts.url.as_str());
let file = File::open(path)?;
let reader = std::io::BufReader::new(file);
let urls: Vec<Url> = reader
.lines()
.map_while(Result::ok)
.filter(|line| !line.trim().is_empty())
.map(|url_str| Url::parse(&url_str))
.collect::<Result<Vec<_>, _>>()?;
UrlGenerator::new_multi_static(urls)
} else {
UrlGenerator::new_static(Url::parse(&opts.url)?)
};
if let Some(n) = opts.dump_urls {
let mut rng = rand::rng();
for _ in 0..n {
let url = url_generator.generate(&mut rng)?;
println!("{url}");
}
return Ok(());
}
let url = url_generator.generate(&mut rand::rng())?;
// Process form data or regular body first
let has_form_data = !opts.form.is_empty();
let (body_generator, form_content_type): (BodyGenerator, Option<String>) = if has_form_data {
let mut form = curl_compat::Form::new();
for form_str in opts.form {
let part: curl_compat::FormPart = form_str
.parse()
.with_context(|| format!("Failed to parse form data: {form_str}"))?;
form.add_part(part);
}
let form_body = form.body();
let content_type = form.content_type();
(BodyGenerator::Static(form_body.into()), Some(content_type))
} else if let Some(body_string) = opts.body_string {
(BodyGenerator::Static(body_string.into()), None)
} else if let Some(body_path) = opts.body_path {
let mut buf = Vec::new();
std::fs::File::open(body_path)?.read_to_end(&mut buf)?;
(BodyGenerator::Static(buf.into()), None)
} else if let Some(body_path_lines) = opts.body_path_lines {
let lines = BufReader::new(std::fs::File::open(body_path_lines)?)
.lines()
.map_while(Result::ok)
.map(Bytes::from)
.collect::<Vec<_>>();
(BodyGenerator::Random(lines), None)
} else {
(BodyGenerator::Static(Bytes::new()), None)
};
// Set method to POST if form data is used and method is GET
let method = if has_form_data && opts.method == http::Method::GET {
http::Method::POST
} else {
opts.method
};
let headers = {
let mut headers: http::header::HeaderMap = Default::default();
// Accept all
headers.insert(
http::header::ACCEPT,
http::header::HeaderValue::from_static("*/*"),
);
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Accept-Encoding
if !opts.disable_compression {
headers.insert(
http::header::ACCEPT_ENCODING,
http::header::HeaderValue::from_static("gzip, compress, deflate, br"),
);
}
// User agent
headers
.entry(http::header::USER_AGENT)
.or_insert(HeaderValue::from_static(concat!(
"oha/",
env!("CARGO_PKG_VERSION")
)));
if let Some(h) = opts.accept_header {
headers.insert(http::header::ACCEPT, HeaderValue::from_bytes(h.as_bytes())?);
}
if let Some(h) = opts.content_type.or(form_content_type) {
headers.insert(
http::header::CONTENT_TYPE,
HeaderValue::from_bytes(h.as_bytes())?,
);
}
if let Some(h) = opts.host {
headers.insert(http::header::HOST, HeaderValue::from_bytes(h.as_bytes())?);
}
if let Some(auth) = opts.basic_auth {
let u_p = auth.splitn(2, ':').collect::<Vec<_>>();
anyhow::ensure!(u_p.len() == 2, anyhow::anyhow!("Parse auth"));
let mut header_value = b"Basic ".to_vec();
{
use std::io::Write;
let username = u_p[0];
let password = if u_p[1].is_empty() {
None
} else {
Some(u_p[1])
};
let mut encoder = base64::write::EncoderWriter::new(
&mut header_value,
&base64::engine::general_purpose::STANDARD,
);
// The unwraps here are fine because Vec::write* is infallible.
write!(encoder, "{username}:").unwrap();
if let Some(password) = password {
write!(encoder, "{password}").unwrap();
}
}
headers.insert(
http::header::AUTHORIZATION,
HeaderValue::from_bytes(&header_value)?,
);
}
if opts.disable_keepalive && http_version == http::Version::HTTP_11 {
headers.insert(http::header::CONNECTION, HeaderValue::from_static("close"));
}
for (k, v) in opts.headers.into_iter() {
headers.insert(k, v);
}
headers
};
let proxy_headers = opts.proxy_headers.into_iter().collect::<HeaderMap<_>>();
let ip_strategy = match (opts.ipv4, opts.ipv6) {
(false, false) => {
if cfg!(target_os = "macos") && (url.host_str() == Some("localhost")) {
// #784
// On macOS, localhost resolves to ::1 first, So web servers that bind to localhost tend to listen ipv6 only.
// So prefer ipv6 on macos for localhost.
hickory_resolver::config::LookupIpStrategy::Ipv6thenIpv4
} else {
Default::default()
}
}
(true, false) => hickory_resolver::config::LookupIpStrategy::Ipv4Only,
(false, true) => hickory_resolver::config::LookupIpStrategy::Ipv6Only,
(true, true) => hickory_resolver::config::LookupIpStrategy::Ipv4AndIpv6,
};
let (config, mut resolver_opts) = system_resolv_conf()?;
resolver_opts.ip_strategy = ip_strategy;
let resolver = hickory_resolver::Resolver::builder_with_config(
config,
hickory_resolver::name_server::TokioConnectionProvider::default(),
)
.with_options(resolver_opts)
.build();
let cacert = opts.cacert.as_deref().map(std::fs::read).transpose()?;
let client_auth = match (opts.cert, opts.key) {
(Some(cert), Some(key)) => Some((std::fs::read(cert)?, std::fs::read(key)?)),
(None, None) => None,
// Not possible because of clap requires
_ => anyhow::bail!("Both --cert and --key must be specified"),
};
let url = url.into_owned();
let client = Arc::new(client::Client {
request_generator: RequestGenerator {
url_generator,
https: url.scheme() == "https",
version: http_version,
aws_config,
method,
headers,
body_generator,
http_proxy: if opts.proxy.is_some() && url.scheme() == "http" {
Some(Proxy {
headers: proxy_headers.clone(),
version: proxy_http_version,
})
} else {
None
},
},
proxy_http_version,
proxy_headers,
dns: client::Dns {
resolver,
connect_to: opts.connect_to,
},
timeout: opts.timeout.map(|d| d.into()),
connect_timeout: opts.connect_timeout.into(),
redirect_limit: opts.redirect,
disable_keepalive: opts.disable_keepalive,
proxy_url: opts.proxy,
#[cfg(unix)]
unix_socket: opts.unix_socket,
#[cfg(feature = "vsock")]
vsock_addr: opts.vsock_addr,
#[cfg(feature = "rustls")]
rustls_configs: tls_config::RuslsConfigs::new(
opts.insecure,
cacert.as_deref(),
client_auth
.as_ref()
.map(|(cert, key)| (cert.as_slice(), key.as_slice())),
),
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
native_tls_connectors: tls_config::NativeTlsConnectors::new(
opts.insecure,
cacert.as_deref(),
client_auth
.as_ref()
.map(|(cert, key)| (cert.as_slice(), key.as_slice())),
),
});
if !opts.no_pre_lookup {
client.pre_lookup().await?;
}
let no_tui = opts.no_tui || !std::io::stdout().is_tty() || opts.debug;
let print_config = {
let mode = opts.output_format.unwrap_or_default();
let disable_style = opts.no_color || !std::io::stdout().is_tty() || opts.output.is_some();
let output: Box<dyn std::io::Write + Send + 'static> = if let Some(output) = opts.output {
Box::new(File::create(output)?)
} else {
Box::new(std::io::stdout())
};
PrintConfig {
mode,
output,
disable_style,
stats_success_breakdown: opts.stats_success_breakdown,
time_unit: opts.time_unit,
}
};
let run = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)?
.as_secs();
let start = std::time::Instant::now();
let data_collect_future: Pin<Box<dyn std::future::Future<Output = (ResultData, PrintConfig)>>> =
match work_mode {
WorkMode::Debug => {
let mut print_config = print_config;
client::work_debug(&mut print_config.output, client).await?;
return Ok(());
}
WorkMode::FixedNumber {
n_requests,
n_connections,
n_http2_parallel,
query_limit: None,
latency_correction: _,
} if no_tui => {
// Use optimized worker of no_tui mode.
let (result_tx, result_rx) = kanal::unbounded();
client::fast::work(
client.clone(),
result_tx,
n_requests,
n_connections,
n_http2_parallel,
)
.await;
Box::pin(async move {
let mut res = ResultData::default();
for r in result_rx {
res.merge(r);
}
(res, print_config)
})
}
WorkMode::Until {
duration,
n_connections,
n_http2_parallel,
query_limit: None,
latency_correction: _,
wait_ongoing_requests_after_deadline,
} if no_tui => {
// Use optimized worker of no_tui mode.
let (result_tx, result_rx) = kanal::unbounded();
client::fast::work_until(
client.clone(),
result_tx,
start + duration,
n_connections,
n_http2_parallel,
wait_ongoing_requests_after_deadline,
)
.await;
Box::pin(async move {
let mut res = ResultData::default();
for r in result_rx {
res.merge(r);
}
(res, print_config)
})
}
mode => {
let (result_tx, result_rx) = kanal::unbounded();
let data_collector = if no_tui {
// When `--no-tui` is enabled, just collect all data.
let token = tokio_util::sync::CancellationToken::new();
let result_rx_ctrl_c = result_rx.clone();
let token_ctrl_c = token.clone();
let ctrl_c = tokio::spawn(async move {
tokio::select! {
_ = tokio::signal::ctrl_c() => {
let mut all: ResultData = Default::default();
let mut buf = Vec::new();
let _ = result_rx_ctrl_c.drain_into(&mut buf);
for res in buf {
all.push(res);
}
let _ = printer::print_result(print_config, start, &all, start.elapsed());
std::process::exit(libc::EXIT_SUCCESS);
}
_ = token_ctrl_c.cancelled() => {
print_config
}
}
});
Box::pin(async move {
token.cancel();
let config = ctrl_c.await.unwrap();
let mut all = ResultData::default();
while let Ok(res) = result_rx.recv() {
all.push(res);
}
(all, config)
})
as Pin<Box<dyn std::future::Future<Output = (ResultData, PrintConfig)>>>
} else {
// Spawn monitor future which draws realtime tui
let join_handle = tokio::spawn(
monitor::Monitor {
print_config,
end_line: opts
.duration
.map(|d| monitor::EndLine::Duration(d.into()))
.unwrap_or(monitor::EndLine::NumQuery(opts.n_requests)),
report_receiver: result_rx,
start,
fps: opts.fps,
disable_color: opts.no_color,
time_unit: opts.time_unit,
}
.monitor(),
);
Box::pin(async { join_handle.await.unwrap().unwrap() })
as Pin<Box<dyn std::future::Future<Output = (ResultData, PrintConfig)>>>
};
match mode {
WorkMode::Debug => unreachable!("Must be already handled"),
WorkMode::FixedNumber {
n_requests,
n_connections,
n_http2_parallel,
query_limit,
latency_correction,
} => {
if let Some(query_limit) = query_limit {
if latency_correction {
client::work_with_qps(
client.clone(),
result_tx,
query_limit,
n_requests,
n_connections,
n_http2_parallel,
)
.await;
} else {
client::work_with_qps_latency_correction(
client.clone(),
result_tx,
query_limit,
n_requests,
n_connections,
n_http2_parallel,
)
.await;
}
} else {
client::work(
client.clone(),
result_tx,
n_requests,
n_connections,
n_http2_parallel,
)
.await;
}
}
WorkMode::Until {
duration,
n_connections,
n_http2_parallel,
query_limit,
latency_correction,
wait_ongoing_requests_after_deadline,
} => {
if let Some(query_limit) = query_limit {
if latency_correction {
client::work_until_with_qps_latency_correction(
client.clone(),
result_tx,
query_limit,
start,
start + duration,
n_connections,
n_http2_parallel,
wait_ongoing_requests_after_deadline,
)
.await;
} else {
client::work_until_with_qps(
client.clone(),
result_tx,
query_limit,
start,
start + duration,
n_connections,
n_http2_parallel,
wait_ongoing_requests_after_deadline,
)
.await;
}
} else {
client::work_until(
client.clone(),
result_tx,
start + duration,
n_connections,
n_http2_parallel,
wait_ongoing_requests_after_deadline,
)
.await;
}
}
}
data_collector
}
};
let duration = start.elapsed();
let (res, print_config) = data_collect_future.await;
printer::print_result(print_config, start, &res, duration)?;
if let Some(db_url) = opts.db_url {
eprintln!("Storing results to {db_url}");
db::store(&client, &db_url, start, res.success(), run)?;
}
Ok(())
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | true |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/timescale.rs | src/timescale.rs | use std::{fmt, time::Duration};
#[derive(Clone, Copy, PartialEq, Eq, Debug, PartialOrd, Ord)]
pub enum TimeScale {
Nanosecond, // 1e-9
Microsecond, // 1e-6
Millisecond, // 1e-3
Second, // 1
TenSeconds, // 10
Minute, // 60
TenMinutes, // 600
Hour, // 3600
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct TimeLabel {
pub x: usize,
pub timescale: TimeScale,
}
impl fmt::Display for TimeScale {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimeScale::Nanosecond => write!(f, "ns"),
TimeScale::Microsecond => write!(f, "us"),
TimeScale::Millisecond => write!(f, "ms"),
TimeScale::Second => write!(f, "sec"),
TimeScale::TenSeconds => write!(f, "10 sec"),
TimeScale::Minute => write!(f, "min"),
TimeScale::TenMinutes => write!(f, "10 min"),
TimeScale::Hour => write!(f, "hr"),
}
}
}
impl fmt::Display for TimeLabel {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimeLabel {
x,
timescale: TimeScale::Nanosecond,
} => write!(f, "{x}ns"),
TimeLabel {
x,
timescale: TimeScale::Microsecond,
} => write!(f, "{x}us"),
TimeLabel {
x,
timescale: TimeScale::Millisecond,
} => write!(f, "{x}ms"),
TimeLabel {
x,
timescale: TimeScale::Second,
} => write!(f, "{x}s"),
TimeLabel {
x,
timescale: TimeScale::TenSeconds,
} => write!(f, "{}s", 10 * x),
TimeLabel {
x,
timescale: TimeScale::Minute,
} => write!(f, "{x}m"),
TimeLabel {
x,
timescale: TimeScale::TenMinutes,
} => write!(f, "{}m", 10 * x),
TimeLabel {
x,
timescale: TimeScale::Hour,
} => write!(f, "{x}h"),
}
}
}
impl clap::ValueEnum for TimeScale {
fn value_variants<'a>() -> &'a [Self] {
&[
Self::Nanosecond,
Self::Microsecond,
Self::Millisecond,
Self::Second,
Self::Minute,
Self::Hour,
]
}
fn to_possible_value(&self) -> Option<clap::builder::PossibleValue> {
match self {
TimeScale::Nanosecond => Some(clap::builder::PossibleValue::new("ns")),
TimeScale::Microsecond => Some(clap::builder::PossibleValue::new("us")),
TimeScale::Millisecond => Some(clap::builder::PossibleValue::new("ms")),
TimeScale::Second => Some(clap::builder::PossibleValue::new("s")),
TimeScale::Minute => Some(clap::builder::PossibleValue::new("m")),
TimeScale::Hour => Some(clap::builder::PossibleValue::new("h")),
TimeScale::TenSeconds | TimeScale::TenMinutes => None,
}
}
}
impl TimeScale {
pub fn as_secs_f64(&self) -> f64 {
match self {
TimeScale::Nanosecond => 1e-9,
TimeScale::Microsecond => 1e-6,
TimeScale::Millisecond => 1e-3,
TimeScale::Second => 1.0,
TimeScale::TenSeconds => 10.0,
TimeScale::Minute => 60.0,
TimeScale::TenMinutes => 10.0 * 60.0,
TimeScale::Hour => 60.0 * 60.0,
}
}
/// From seconds as f64
pub fn from_f64(seconds: f64) -> Self {
for ts in &[
TimeScale::Hour,
TimeScale::TenMinutes,
TimeScale::Minute,
TimeScale::TenSeconds,
TimeScale::Second,
TimeScale::Millisecond,
TimeScale::Microsecond,
TimeScale::Nanosecond,
] {
if seconds > ts.as_secs_f64() {
return *ts;
}
}
TimeScale::Nanosecond
}
pub fn from_elapsed(duration: Duration) -> Self {
Self::from_f64(duration.as_secs_f64())
}
pub fn inc(&self) -> Self {
match self {
TimeScale::Nanosecond => TimeScale::Microsecond,
TimeScale::Microsecond => TimeScale::Millisecond,
TimeScale::Millisecond => TimeScale::Second,
TimeScale::Second => TimeScale::TenSeconds,
TimeScale::TenSeconds => TimeScale::Minute,
TimeScale::Minute => TimeScale::TenMinutes,
TimeScale::TenMinutes => TimeScale::Hour,
TimeScale::Hour => TimeScale::Hour,
}
}
pub fn dec(&self) -> Self {
match self {
TimeScale::Nanosecond => TimeScale::Nanosecond,
TimeScale::Microsecond => TimeScale::Nanosecond,
TimeScale::Millisecond => TimeScale::Microsecond,
TimeScale::Second => TimeScale::Millisecond,
TimeScale::TenSeconds => TimeScale::Second,
TimeScale::Minute => TimeScale::TenSeconds,
TimeScale::TenMinutes => TimeScale::Minute,
TimeScale::Hour => TimeScale::TenMinutes,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
fn assert_timescale_correct_for_seconds_range(
range: [f64; 2],
expected_timescale: TimeScale,
expected_timescale_str: &str,
expected_timescale_as_secs: f64,
) {
for durations in range {
let timescale = TimeScale::from_elapsed(Duration::from_secs_f64(durations));
assert_eq!(timescale, expected_timescale);
assert_eq!(format!("{timescale}"), expected_timescale_str);
assert_eq!(timescale.as_secs_f64(), expected_timescale_as_secs);
}
}
#[test]
fn test_timescale_ranges() {
assert_timescale_correct_for_seconds_range(
[f64::MIN_POSITIVE, 1e-6],
TimeScale::Nanosecond,
"ns",
1e-9,
);
assert_timescale_correct_for_seconds_range(
[0.000_001_1, 1e-3],
TimeScale::Microsecond,
"us",
1e-6,
);
assert_timescale_correct_for_seconds_range(
[0.001_1, 1.0],
TimeScale::Millisecond,
"ms",
1e-3,
);
assert_timescale_correct_for_seconds_range([1.1, 10.0], TimeScale::Second, "sec", 1.0);
assert_timescale_correct_for_seconds_range(
[10.1, 60.0],
TimeScale::TenSeconds,
"10 sec",
10.0,
);
assert_timescale_correct_for_seconds_range([60.1, 600.0], TimeScale::Minute, "min", 60.0);
assert_timescale_correct_for_seconds_range(
[600.1, 3600.0],
TimeScale::TenMinutes,
"10 min",
600.0,
);
assert_timescale_correct_for_seconds_range(
[3600.1, 31536000.0],
TimeScale::Hour,
"hr",
3600.0,
);
}
#[test]
fn test_timescale_inc() {
let timescale = TimeScale::from_elapsed(Duration::from_secs_f64(1e-10));
let timescale_microsecond = timescale.inc();
assert_eq!(timescale_microsecond, TimeScale::Microsecond);
let timescale_millisecond = timescale_microsecond.inc();
assert_eq!(timescale_millisecond, TimeScale::Millisecond);
let timescale_second = timescale_millisecond.inc();
assert_eq!(timescale_second, TimeScale::Second);
let timescale_ten_seconds = timescale_second.inc();
assert_eq!(timescale_ten_seconds, TimeScale::TenSeconds);
let timescale_minute = timescale_ten_seconds.inc();
assert_eq!(timescale_minute, TimeScale::Minute);
let timescale_ten_minutes = timescale_minute.inc();
assert_eq!(timescale_ten_minutes, TimeScale::TenMinutes);
let timescale_hour = timescale_ten_minutes.inc();
assert_eq!(timescale_hour, TimeScale::Hour);
}
#[test]
fn test_timescale_dec() {
let timescale = TimeScale::from_elapsed(Duration::from_secs_f64(31536000.0));
let timescale_ten_minutes = timescale.dec();
assert_eq!(timescale_ten_minutes, TimeScale::TenMinutes);
let timescale_minute = timescale_ten_minutes.dec();
assert_eq!(timescale_minute, TimeScale::Minute);
let timescale_ten_seconds = timescale_minute.dec();
assert_eq!(timescale_ten_seconds, TimeScale::TenSeconds);
let timescale_second = timescale_ten_seconds.dec();
assert_eq!(timescale_second, TimeScale::Second);
let timescale_millisecond = timescale_second.dec();
assert_eq!(timescale_millisecond, TimeScale::Millisecond);
let timescale_microsecond = timescale_millisecond.dec();
assert_eq!(timescale_microsecond, TimeScale::Microsecond);
let timescale_nanosecond = timescale_microsecond.dec();
assert_eq!(timescale_nanosecond, TimeScale::Nanosecond);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/aws_auth.rs | src/aws_auth.rs | use anyhow::Result;
use bytes::Bytes;
use hyper::{
HeaderMap,
header::{self, HeaderName},
};
use thiserror::Error;
use url::Url;
pub struct AwsSignatureConfig {
pub access_key: String,
pub secret_key: String,
pub session_token: Option<String>,
pub service: String,
pub region: String,
}
#[derive(Error, Debug)]
pub enum AwsSignatureError {
#[error("URL must contain a host {0}")]
NoHost(Url),
#[error("Invalid host header name {0}")]
InvalidHost(String),
#[error("Invalid authorization header name {0}")]
InvalidAuthorization(String),
}
// Initialize unsignable headers as a static constant
static UNSIGNABLE_HEADERS: [HeaderName; 8] = [
header::ACCEPT,
header::ACCEPT_ENCODING,
header::USER_AGENT,
header::EXPECT,
header::RANGE,
header::CONNECTION,
HeaderName::from_static("presigned-expires"),
HeaderName::from_static("x-amzn-trace-id"),
];
impl AwsSignatureConfig {
pub fn sign_request(
&self,
method: &str,
headers: &mut HeaderMap,
url: &Url,
body: &Bytes,
) -> Result<(), AwsSignatureError> {
let datetime = chrono::Utc::now();
let header_amz_date = datetime
.format("%Y%m%dT%H%M%SZ")
.to_string()
.parse()
.unwrap();
if !headers.contains_key(header::HOST) {
let host = url
.host_str()
.ok_or_else(|| AwsSignatureError::NoHost(url.clone()))?;
headers.insert(
header::HOST,
host.parse()
.map_err(|_| AwsSignatureError::InvalidHost(host.to_string()))?,
);
}
headers.insert("x-amz-date", header_amz_date);
if let Some(session_token) = &self.session_token {
headers.insert("x-amz-security-token", session_token.parse().unwrap());
}
headers.remove(header::AUTHORIZATION);
//remove and store headers in a vec from unsignable_headers
let removed_headers: Vec<(header::HeaderName, header::HeaderValue)> = UNSIGNABLE_HEADERS
.iter()
.filter_map(|k| headers.remove(k).map(|v| (k.clone(), v)))
.collect();
headers.insert(
header::CONTENT_LENGTH,
body.len().to_string().parse().unwrap(),
);
let aws_sign = aws_sign_v4::AwsSign::new(
method,
url.as_str(),
&datetime,
headers,
&self.region,
&self.access_key,
&self.secret_key,
&self.service,
body,
);
let signature = aws_sign.sign();
//insert headers
for (key, value) in removed_headers {
headers.insert(key, value);
}
headers.insert(
header::AUTHORIZATION,
signature
.parse()
.map_err(|_| AwsSignatureError::InvalidAuthorization(signature.to_string()))?,
);
Ok(())
}
pub fn new(
access_key: &str,
secret_key: &str,
signing_params: &str,
session_token: Option<String>,
) -> Result<Self, anyhow::Error> {
let parts: Vec<&str> = signing_params
.strip_prefix("aws:amz:")
.unwrap_or_default()
.split(':')
.collect();
if parts.len() != 2 {
anyhow::bail!("Invalid AWS signing params format. Expected aws:amz:region:service");
}
Ok(Self {
access_key: access_key.into(),
secret_key: secret_key.into(),
session_token,
region: parts[0].to_string(),
service: parts[1].to_string(),
})
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/cli.rs | src/cli.rs | use hyper::http::header::{HeaderName, HeaderValue};
use std::str::FromStr;
pub fn parse_header(s: &str) -> Result<(HeaderName, HeaderValue), anyhow::Error> {
let header = s.splitn(2, ':').collect::<Vec<_>>();
anyhow::ensure!(header.len() == 2, anyhow::anyhow!("Parse header"));
let name = HeaderName::from_str(header[0])?;
let value = HeaderValue::from_str(header[1].trim_start_matches(' '))?;
Ok::<(HeaderName, HeaderValue), anyhow::Error>((name, value))
}
pub fn parse_n_requests(s: &str) -> Result<usize, String> {
let s = s.trim().to_lowercase();
if let Some(num) = s.strip_suffix('k') {
num.parse::<f64>()
.map(|n| (n * 1000_f64) as usize)
.map_err(|e| e.to_string())
} else if let Some(num) = s.strip_suffix('m') {
num.parse::<f64>()
.map(|n| (n * 1_000_000_f64) as usize)
.map_err(|e| e.to_string())
} else {
s.parse::<usize>().map_err(|e| e.to_string())
}
}
/// An entry specified by `connect-to` to override DNS resolution and default
/// port numbers. For example, `example.org:80:localhost:5000` will connect to
/// `localhost:5000` whenever `http://example.org` is requested.
#[derive(Clone, Debug)]
pub struct ConnectToEntry {
pub requested_host: String,
pub requested_port: u16,
pub target_host: String,
pub target_port: u16,
}
impl FromStr for ConnectToEntry {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let expected_syntax: &str = "syntax for --connect-to is host:port:target_host:target_port";
let (s, target_port) = s.rsplit_once(':').ok_or(expected_syntax)?;
let (s, target_host) = if s.ends_with(']') {
// ipv6
let i = s.rfind(":[").ok_or(expected_syntax)?;
(&s[..i], &s[i + 1..])
} else {
s.rsplit_once(':').ok_or(expected_syntax)?
};
let (requested_host, requested_port) = s.rsplit_once(':').ok_or(expected_syntax)?;
Ok(ConnectToEntry {
requested_host: requested_host.into(),
requested_port: requested_port.parse().map_err(|err| {
format!("requested port must be an u16, but got {requested_port}: {err}")
})?,
target_host: target_host.into(),
target_port: target_port.parse().map_err(|err| {
format!("target port must be an u16, but got {target_port}: {err}")
})?,
})
}
}
#[cfg(feature = "vsock")]
pub fn parse_vsock_addr(s: &str) -> Result<tokio_vsock::VsockAddr, String> {
let (cid, port) = s
.split_once(':')
.ok_or("syntax for --vsock-addr is cid:port")?;
Ok(tokio_vsock::VsockAddr::new(
cid.parse()
.map_err(|err| format!("cid must be a u32, but got {cid}: {err}"))?,
port.parse()
.map_err(|err| format!("port must be a u32, but got {port}: {err}"))?,
))
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/pcg64si.rs | src/pcg64si.rs | // https://github.com/imneme/pcg-c
use rand::{RngCore, SeedableRng};
use rand_core::impls;
#[derive(Debug, Copy, Clone)]
#[repr(transparent)]
pub struct Pcg64Si {
state: u64,
}
impl RngCore for Pcg64Si {
fn next_u32(&mut self) -> u32 {
self.next_u64() as u32
}
fn next_u64(&mut self) -> u64 {
let old_state = self.state;
self.state = self
.state
.wrapping_mul(6364136223846793005)
.wrapping_add(1442695040888963407);
let word =
((old_state >> ((old_state >> 59) + 5)) ^ old_state).wrapping_mul(12605985483714917081);
(word >> 43) ^ word
}
fn fill_bytes(&mut self, dest: &mut [u8]) {
impls::fill_bytes_via_next(self, dest)
}
}
impl SeedableRng for Pcg64Si {
type Seed = [u8; 8];
fn from_seed(seed: Self::Seed) -> Pcg64Si {
Pcg64Si {
state: u64::from_le_bytes(seed),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::collections::HashSet;
// For a given seed the RNG is deterministic
// thus we can perform some basic tests consistently
#[test]
fn test_rng_next() {
let mut rng = Pcg64Si::from_seed([1, 2, 3, 4, 5, 6, 7, 8]);
let mut values_set: HashSet<u32> = HashSet::new();
// Generate 1000 values modulus 100 (so each value is between 0 and 99)
for _ in 0..1000 {
values_set.insert(rng.next_u32() % 100);
}
// Expect to generate every number between 0 and 99 (the generated values are somewhat evenly distributed)
assert_eq!(values_set.len(), 100);
}
#[test]
fn test_rng_from_seed() {
// Different seeds should result in a different RNG state
let rng1 = Pcg64Si::from_seed([1, 2, 3, 4, 5, 6, 7, 8]);
let rng2 = Pcg64Si::from_seed([1, 2, 3, 4, 5, 6, 7, 7]);
assert_ne!(rng1.state, rng2.state);
}
#[test]
fn test_rng_fill_bytes() {
// This uses the next_u64/u32 functions underneath, so don't need to test the pseudo randomness again
let mut array: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0];
let mut rng = Pcg64Si::from_seed([1, 2, 3, 4, 5, 6, 7, 8]);
rng.fill_bytes(&mut array);
assert_ne!(array, [0, 0, 0, 0, 0, 0, 0, 0]);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/client.rs | src/client.rs | use bytes::Bytes;
#[cfg(test)]
use hickory_resolver::config::{ResolverConfig, ResolverOpts};
use http_body_util::{BodyExt, Full};
use hyper::{Method, Request, http};
use hyper_util::rt::{TokioExecutor, TokioIo};
use rand::prelude::*;
use std::{
borrow::Cow,
io::Write,
sync::{
Arc,
atomic::{AtomicBool, Ordering::Relaxed},
},
time::Instant,
};
use thiserror::Error;
use tokio::{
io::{AsyncRead, AsyncWrite},
net::TcpStream,
};
use url::{ParseError, Url};
use crate::{
ConnectToEntry,
pcg64si::Pcg64Si,
request_generator::{RequestGenerationError, RequestGenerator},
url_generator::UrlGeneratorError,
};
#[cfg(feature = "http3")]
use crate::client_h3::send_debug_request_http3;
type SendRequestHttp1 = hyper::client::conn::http1::SendRequest<Full<Bytes>>;
type SendRequestHttp2 = hyper::client::conn::http2::SendRequest<Full<Bytes>>;
fn format_host_port(host: &str, port: u16) -> String {
if host.contains(':') && !(host.starts_with('[') && host.ends_with(']')) {
format!("[{host}]:{port}")
} else {
format!("{host}:{port}")
}
}
#[derive(Debug, Clone, Copy)]
pub struct ConnectionTime {
pub dns_lookup: std::time::Duration,
pub dialup: std::time::Duration,
}
#[derive(Debug, Clone)]
/// a result for a request
pub struct RequestResult {
pub rng: Pcg64Si,
// When the query should started
pub start_latency_correction: Option<std::time::Instant>,
/// When the query started
pub start: std::time::Instant,
/// DNS + dialup
/// None when reuse connection
pub connection_time: Option<ConnectionTime>,
/// First body byte received
pub first_byte: Option<std::time::Instant>,
/// When the query ends
pub end: std::time::Instant,
/// HTTP status
pub status: http::StatusCode,
/// Length of body
pub len_bytes: usize,
}
impl RequestResult {
/// Duration the request takes.
pub fn duration(&self) -> std::time::Duration {
self.end - self.start_latency_correction.unwrap_or(self.start)
}
}
// encapsulates the HTTP generation of the work type. Used internally only for conditional logic.
#[derive(Debug, Clone, Copy, PartialEq)]
enum HttpWorkType {
H1,
H2,
#[cfg(feature = "http3")]
H3,
}
pub struct Dns {
pub connect_to: Vec<ConnectToEntry>,
pub resolver:
hickory_resolver::Resolver<hickory_resolver::name_server::TokioConnectionProvider>,
}
impl Dns {
fn select_connect_to<'a, R: Rng>(
&'a self,
host: &str,
port: u16,
rng: &mut R,
) -> Option<&'a ConnectToEntry> {
self.connect_to
.iter()
.filter(|entry| entry.requested_port == port && entry.requested_host == host)
.collect::<Vec<_>>()
.choose(rng)
.copied()
}
/// Perform a DNS lookup for a given url and returns (ip_addr, port)
async fn lookup<R: Rng>(
&self,
url: &Url,
rng: &mut R,
) -> Result<(std::net::IpAddr, u16), ClientError> {
let host = url.host_str().ok_or(ClientError::HostNotFound)?;
let port = url
.port_or_known_default()
.ok_or(ClientError::PortNotFound)?;
// Try to find an override (passed via `--connect-to`) that applies to this (host, port),
// choosing one randomly if several match.
let (host, port) = if let Some(entry) = self.select_connect_to(host, port, rng) {
(entry.target_host.as_str(), entry.target_port)
} else {
(host, port)
};
let host = if host.starts_with('[') && host.ends_with(']') {
// host is [ipv6] format
// remove first [ and last ]
&host[1..host.len() - 1]
} else {
host
};
// Perform actual DNS lookup, either on the original (host, port), or
// on the (host, port) specified with `--connect-to`.
let addrs = self
.resolver
.lookup_ip(host)
.await
.map_err(Box::new)?
.iter()
.collect::<Vec<_>>();
let addr = *addrs.choose(rng).ok_or(ClientError::DNSNoRecord)?;
Ok((addr, port))
}
}
#[derive(Error, Debug)]
pub enum ClientError {
#[error("failed to get port from URL")]
PortNotFound,
#[error("failed to get host from URL")]
HostNotFound,
#[error("No record returned from DNS")]
DNSNoRecord,
#[error("Redirection limit has reached")]
TooManyRedirect,
#[error(transparent)]
// Use Box here because ResolveError is big.
Resolve(#[from] Box<hickory_resolver::ResolveError>),
#[cfg(feature = "native-tls")]
#[error(transparent)]
NativeTls(#[from] native_tls::Error),
#[cfg(feature = "rustls")]
#[error(transparent)]
Rustls(#[from] rustls::Error),
#[cfg(feature = "rustls")]
#[error(transparent)]
InvalidDnsName(#[from] rustls_pki_types::InvalidDnsNameError),
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
Http(#[from] http::Error),
#[error(transparent)]
Hyper(#[from] hyper::Error),
#[error(transparent)]
InvalidUriParts(#[from] http::uri::InvalidUriParts),
#[error(transparent)]
InvalidHeaderValue(#[from] http::header::InvalidHeaderValue),
#[error("Failed to get header from builder")]
GetHeaderFromBuilder,
#[error(transparent)]
HeaderToStr(#[from] http::header::ToStrError),
#[error(transparent)]
InvalidUri(#[from] http::uri::InvalidUri),
#[error("timeout")]
Timeout,
#[error("aborted due to deadline")]
Deadline,
#[error(transparent)]
UrlGenerator(#[from] UrlGeneratorError),
#[error(transparent)]
UrlParse(#[from] ParseError),
#[error("Request generation error: {0}")]
RequestGeneration(#[from] RequestGenerationError),
#[cfg(feature = "http3")]
#[error(transparent)]
Http3(#[from] crate::client_h3::Http3Error),
}
pub struct Client {
pub request_generator: RequestGenerator,
pub proxy_http_version: http::Version,
pub proxy_headers: http::header::HeaderMap,
pub dns: Dns,
pub timeout: Option<std::time::Duration>,
pub connect_timeout: std::time::Duration,
pub redirect_limit: usize,
pub disable_keepalive: bool,
pub proxy_url: Option<Url>,
#[cfg(unix)]
pub unix_socket: Option<std::path::PathBuf>,
#[cfg(feature = "vsock")]
pub vsock_addr: Option<tokio_vsock::VsockAddr>,
#[cfg(feature = "rustls")]
pub rustls_configs: crate::tls_config::RuslsConfigs,
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
pub native_tls_connectors: crate::tls_config::NativeTlsConnectors,
}
#[cfg(test)]
impl Default for Client {
fn default() -> Self {
use crate::request_generator::BodyGenerator;
let (resolver_config, resolver_opts) = crate::system_resolv_conf()
.unwrap_or_else(|_| (ResolverConfig::default(), ResolverOpts::default()));
let resolver = hickory_resolver::Resolver::builder_with_config(
resolver_config,
hickory_resolver::name_server::TokioConnectionProvider::default(),
)
.with_options(resolver_opts)
.build();
Self {
request_generator: RequestGenerator {
url_generator: crate::url_generator::UrlGenerator::new_static(
"http://example.com".parse().unwrap(),
),
https: false,
http_proxy: None,
method: http::Method::GET,
version: http::Version::HTTP_11,
headers: http::header::HeaderMap::new(),
body_generator: BodyGenerator::Static(Bytes::new()),
aws_config: None,
},
proxy_http_version: http::Version::HTTP_11,
proxy_headers: http::header::HeaderMap::new(),
dns: Dns {
resolver,
connect_to: Vec::new(),
},
timeout: None,
connect_timeout: std::time::Duration::from_secs(5),
redirect_limit: 0,
disable_keepalive: false,
proxy_url: None,
#[cfg(unix)]
unix_socket: None,
#[cfg(feature = "vsock")]
vsock_addr: None,
#[cfg(feature = "rustls")]
rustls_configs: crate::tls_config::RuslsConfigs::new(false, None, None),
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
native_tls_connectors: crate::tls_config::NativeTlsConnectors::new(false, None, None),
}
}
}
struct ClientStateHttp1 {
rng: Pcg64Si,
send_request: Option<SendRequestHttp1>,
}
impl Default for ClientStateHttp1 {
fn default() -> Self {
Self {
rng: SeedableRng::from_os_rng(),
send_request: None,
}
}
}
struct ClientStateHttp2 {
rng: Pcg64Si,
send_request: SendRequestHttp2,
}
pub enum QueryLimit {
Qps(f64),
Burst(std::time::Duration, usize),
}
// To avoid dynamic dispatch
// I'm not sure how much this is effective
pub(crate) enum Stream {
Tcp(TcpStream),
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
Tls(tokio_native_tls::TlsStream<TcpStream>),
#[cfg(feature = "rustls")]
// Box for large variant
Tls(Box<tokio_rustls::client::TlsStream<TcpStream>>),
#[cfg(unix)]
Unix(tokio::net::UnixStream),
#[cfg(feature = "vsock")]
Vsock(tokio_vsock::VsockStream),
#[cfg(feature = "http3")]
Quic(quinn::Connection),
}
impl Stream {
async fn handshake_http1(self, with_upgrade: bool) -> Result<SendRequestHttp1, ClientError> {
match self {
Stream::Tcp(stream) => {
let (send_request, conn) =
hyper::client::conn::http1::handshake(TokioIo::new(stream)).await?;
if with_upgrade {
tokio::spawn(conn.with_upgrades());
} else {
tokio::spawn(conn);
}
Ok(send_request)
}
Stream::Tls(stream) => {
let (send_request, conn) =
hyper::client::conn::http1::handshake(TokioIo::new(stream)).await?;
if with_upgrade {
tokio::spawn(conn.with_upgrades());
} else {
tokio::spawn(conn);
}
Ok(send_request)
}
#[cfg(unix)]
Stream::Unix(stream) => {
let (send_request, conn) =
hyper::client::conn::http1::handshake(TokioIo::new(stream)).await?;
if with_upgrade {
tokio::spawn(conn.with_upgrades());
} else {
tokio::spawn(conn);
}
Ok(send_request)
}
#[cfg(feature = "vsock")]
Stream::Vsock(stream) => {
let (send_request, conn) =
hyper::client::conn::http1::handshake(TokioIo::new(stream)).await?;
if with_upgrade {
tokio::spawn(conn.with_upgrades());
} else {
tokio::spawn(conn);
}
Ok(send_request)
}
#[cfg(feature = "http3")]
Stream::Quic(_) => {
panic!("quic is not supported in http1")
}
}
}
async fn handshake_http2(self) -> Result<SendRequestHttp2, ClientError> {
let mut builder = hyper::client::conn::http2::Builder::new(TokioExecutor::new());
builder
// from nghttp2's default
.initial_stream_window_size((1 << 30) - 1)
.initial_connection_window_size((1 << 30) - 1);
match self {
Stream::Tcp(stream) => {
let (send_request, conn) = builder.handshake(TokioIo::new(stream)).await?;
tokio::spawn(conn);
Ok(send_request)
}
Stream::Tls(stream) => {
let (send_request, conn) = builder.handshake(TokioIo::new(stream)).await?;
tokio::spawn(conn);
Ok(send_request)
}
#[cfg(unix)]
Stream::Unix(stream) => {
let (send_request, conn) = builder.handshake(TokioIo::new(stream)).await?;
tokio::spawn(conn);
Ok(send_request)
}
#[cfg(feature = "vsock")]
Stream::Vsock(stream) => {
let (send_request, conn) = builder.handshake(TokioIo::new(stream)).await?;
tokio::spawn(conn);
Ok(send_request)
}
#[cfg(feature = "http3")]
Stream::Quic(_) => {
panic!("quic is not supported in http2")
}
}
}
}
impl Client {
#[inline]
fn is_http2(&self) -> bool {
self.request_generator.version == http::Version::HTTP_2
}
#[inline]
fn is_proxy_http2(&self) -> bool {
self.proxy_http_version == http::Version::HTTP_2
}
fn is_work_http2(&self) -> bool {
if self.proxy_url.is_some() {
if self.request_generator.https {
self.is_http2()
} else {
self.is_proxy_http2()
}
} else {
self.is_http2()
}
}
fn work_type(&self) -> HttpWorkType {
#[cfg(feature = "http3")]
if self.request_generator.version == http::Version::HTTP_3 {
return HttpWorkType::H3;
}
if self.is_work_http2() {
HttpWorkType::H2
} else {
HttpWorkType::H1
}
}
/// Perform a DNS lookup to cache it
/// This is useful to avoid DNS lookup latency at the first concurrent requests
pub async fn pre_lookup(&self) -> Result<(), ClientError> {
// If the client is using a unix socket, we don't need to do a DNS lookup
#[cfg(unix)]
if self.unix_socket.is_some() {
return Ok(());
}
// If the client is using a vsock address, we don't need to do a DNS lookup
#[cfg(feature = "vsock")]
if self.vsock_addr.is_some() {
return Ok(());
}
let mut rng = StdRng::from_os_rng();
let url = self.request_generator.url_generator.generate(&mut rng)?;
// It automatically caches the result
self.dns.lookup(&url, &mut rng).await?;
Ok(())
}
#[allow(clippy::type_complexity)]
pub fn generate_request<R: Rng + Copy>(
&self,
rng: &mut R,
) -> Result<(Cow<'_, Url>, Request<Full<Bytes>>, R), ClientError> {
let snapshot = *rng;
let (url, mut req) = self.request_generator.generate(rng)?;
if self.proxy_url.is_some() && req.uri().scheme_str() == Some("http") {
if let Some(authority) = req.uri().authority() {
let requested_host = authority.host();
let requested_port = authority.port_u16().unwrap_or(80);
if let Some(entry) = self
.dns
.select_connect_to(requested_host, requested_port, rng)
{
let new_authority: http::uri::Authority =
format_host_port(entry.target_host.as_str(), entry.target_port).parse()?;
let mut parts = req.uri().clone().into_parts();
parts.authority = Some(new_authority);
let new_uri = http::Uri::from_parts(parts)?;
*req.uri_mut() = new_uri;
}
}
}
Ok((url, req, snapshot))
}
/**
* Returns a stream of the underlying transport. NOT a HTTP client
*/
pub(crate) async fn client<R: Rng>(
&self,
url: &Url,
rng: &mut R,
http_version: http::Version,
) -> Result<(Instant, Stream), ClientError> {
let timeout_duration = self.connect_timeout;
#[cfg(feature = "http3")]
if http_version == http::Version::HTTP_3 {
let addr = self.dns.lookup(url, rng).await?;
let dns_lookup = Instant::now();
let stream = tokio::time::timeout(timeout_duration, self.quic_client(addr, url)).await;
return match stream {
Ok(Ok(stream)) => Ok((dns_lookup, stream)),
Ok(Err(err)) => Err(err),
Err(_) => Err(ClientError::Timeout),
};
}
if url.scheme() == "https" {
let addr = self.dns.lookup(url, rng).await?;
let dns_lookup = Instant::now();
// If we do not put a timeout here then the connections attempts will
// linger long past the configured timeout
let stream =
tokio::time::timeout(timeout_duration, self.tls_client(addr, url, http_version))
.await;
return match stream {
Ok(Ok(stream)) => Ok((dns_lookup, stream)),
Ok(Err(err)) => Err(err),
Err(_) => Err(ClientError::Timeout),
};
}
#[cfg(unix)]
if let Some(socket_path) = &self.unix_socket {
let dns_lookup = Instant::now();
let stream = tokio::time::timeout(
timeout_duration,
tokio::net::UnixStream::connect(socket_path),
)
.await;
return match stream {
Ok(Ok(stream)) => Ok((dns_lookup, Stream::Unix(stream))),
Ok(Err(err)) => Err(ClientError::Io(err)),
Err(_) => Err(ClientError::Timeout),
};
}
#[cfg(feature = "vsock")]
if let Some(addr) = self.vsock_addr {
let dns_lookup = Instant::now();
let stream =
tokio::time::timeout(timeout_duration, tokio_vsock::VsockStream::connect(addr))
.await;
return match stream {
Ok(Ok(stream)) => Ok((dns_lookup, Stream::Vsock(stream))),
Ok(Err(err)) => Err(ClientError::Io(err)),
Err(_) => Err(ClientError::Timeout),
};
}
// HTTP
let addr = self.dns.lookup(url, rng).await?;
let dns_lookup = Instant::now();
let stream =
tokio::time::timeout(timeout_duration, tokio::net::TcpStream::connect(addr)).await;
match stream {
Ok(Ok(stream)) => {
stream.set_nodelay(true)?;
Ok((dns_lookup, Stream::Tcp(stream)))
}
Ok(Err(err)) => Err(ClientError::Io(err)),
Err(_) => Err(ClientError::Timeout),
}
}
async fn tls_client(
&self,
addr: (std::net::IpAddr, u16),
url: &Url,
http_version: http::Version,
) -> Result<Stream, ClientError> {
let stream = tokio::net::TcpStream::connect(addr).await?;
stream.set_nodelay(true)?;
let stream = self.connect_tls(stream, url, http_version).await?;
Ok(Stream::Tls(stream))
}
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
async fn connect_tls<S>(
&self,
stream: S,
url: &Url,
http_version: http::Version,
) -> Result<tokio_native_tls::TlsStream<S>, ClientError>
where
S: AsyncRead + AsyncWrite + Unpin,
{
let connector = self
.native_tls_connectors
.connector(http_version >= http::Version::HTTP_2);
let stream = connector
.connect(url.host_str().ok_or(ClientError::HostNotFound)?, stream)
.await?;
Ok(stream)
}
#[cfg(feature = "rustls")]
async fn connect_tls<S>(
&self,
stream: S,
url: &Url,
http_version: http::Version,
) -> Result<Box<tokio_rustls::client::TlsStream<S>>, ClientError>
where
S: AsyncRead + AsyncWrite + Unpin,
{
let connector =
tokio_rustls::TlsConnector::from(self.rustls_configs.config(http_version).clone());
let domain = rustls_pki_types::ServerName::try_from(
url.host_str().ok_or(ClientError::HostNotFound)?,
)?;
let stream = connector.connect(domain.to_owned(), stream).await?;
Ok(Box::new(stream))
}
async fn client_http1<R: Rng>(
&self,
url: &Url,
rng: &mut R,
) -> Result<(Instant, SendRequestHttp1), ClientError> {
if let Some(proxy_url) = &self.proxy_url {
let http_proxy_version = if self.is_proxy_http2() {
http::Version::HTTP_2
} else {
http::Version::HTTP_11
};
let (dns_lookup, stream) = self.client(proxy_url, rng, http_proxy_version).await?;
if url.scheme() == "https" {
let requested_host = url.host_str().ok_or(ClientError::HostNotFound)?;
let requested_port = url
.port_or_known_default()
.ok_or(ClientError::PortNotFound)?;
let (connect_host, connect_port) = if let Some(entry) =
self.dns
.select_connect_to(requested_host, requested_port, rng)
{
(entry.target_host.as_str(), entry.target_port)
} else {
(requested_host, requested_port)
};
let connect_authority = format_host_port(connect_host, connect_port);
// Do CONNECT request to proxy
let req = {
let mut builder = http::Request::builder()
.method(Method::CONNECT)
.uri(connect_authority);
*builder
.headers_mut()
.ok_or(ClientError::GetHeaderFromBuilder)? = self.proxy_headers.clone();
builder.body(http_body_util::Full::default())?
};
let res = if self.proxy_http_version == http::Version::HTTP_2 {
let mut send_request = stream.handshake_http2().await?;
send_request.send_request(req).await?
} else {
let mut send_request = stream.handshake_http1(true).await?;
send_request.send_request(req).await?
};
let stream = hyper::upgrade::on(res).await?;
let stream = self
.connect_tls(TokioIo::new(stream), url, self.request_generator.version)
.await?;
let (send_request, conn) =
hyper::client::conn::http1::handshake(TokioIo::new(stream)).await?;
tokio::spawn(conn);
Ok((dns_lookup, send_request))
} else {
// Send full URL in request() for HTTP proxy
Ok((dns_lookup, stream.handshake_http1(false).await?))
}
} else {
let (dns_lookup, stream) = self.client(url, rng, http::Version::HTTP_11).await?;
Ok((dns_lookup, stream.handshake_http1(false).await?))
}
}
async fn work_http1(
&self,
client_state: &mut ClientStateHttp1,
) -> Result<RequestResult, ClientError> {
let do_req = async {
let (url, request, rng) = self.generate_request(&mut client_state.rng)?;
let mut start = std::time::Instant::now();
let mut first_byte: Option<std::time::Instant> = None;
let mut connection_time: Option<ConnectionTime> = None;
let mut send_request = if let Some(send_request) = client_state.send_request.take() {
send_request
} else {
let (dns_lookup, send_request) =
self.client_http1(&url, &mut client_state.rng).await?;
let dialup = std::time::Instant::now();
connection_time = Some(ConnectionTime {
dns_lookup: dns_lookup - start,
dialup: dialup - start,
});
send_request
};
while send_request.ready().await.is_err() {
// This gets hit when the connection for HTTP/1.1 faults
// This re-connects
start = std::time::Instant::now();
let (dns_lookup, send_request_) =
self.client_http1(&url, &mut client_state.rng).await?;
send_request = send_request_;
let dialup = std::time::Instant::now();
connection_time = Some(ConnectionTime {
dns_lookup: dns_lookup - start,
dialup: dialup - start,
});
}
match send_request.send_request(request).await {
Ok(res) => {
let (parts, mut stream) = res.into_parts();
let mut status = parts.status;
let mut len_bytes = 0;
while let Some(chunk) = stream.frame().await {
if first_byte.is_none() {
first_byte = Some(std::time::Instant::now())
}
len_bytes += chunk?.data_ref().map(|d| d.len()).unwrap_or_default();
}
if self.redirect_limit != 0 {
if let Some(location) = parts.headers.get("Location") {
let (send_request_redirect, new_status, len) = self
.redirect(
url,
rng,
send_request,
location,
self.redirect_limit,
&mut client_state.rng,
)
.await?;
send_request = send_request_redirect;
status = new_status;
len_bytes = len;
}
}
let end = std::time::Instant::now();
let result = RequestResult {
rng,
start_latency_correction: None,
start,
first_byte,
end,
status,
len_bytes,
connection_time,
};
if !self.disable_keepalive {
client_state.send_request = Some(send_request);
}
Ok::<_, ClientError>(result)
}
Err(e) => {
client_state.send_request = Some(send_request);
Err(e.into())
}
}
};
if let Some(timeout) = self.timeout {
tokio::select! {
res = do_req => {
res
}
_ = tokio::time::sleep(timeout) => {
Err(ClientError::Timeout)
}
}
} else {
do_req.await
}
}
async fn connect_http2<R: Rng>(
&self,
url: &Url,
rng: &mut R,
) -> Result<(ConnectionTime, SendRequestHttp2), ClientError> {
let start = std::time::Instant::now();
if let Some(proxy_url) = &self.proxy_url {
let http_proxy_version = if self.is_proxy_http2() {
http::Version::HTTP_2
} else {
http::Version::HTTP_11
};
let (dns_lookup, stream) = self.client(proxy_url, rng, http_proxy_version).await?;
if url.scheme() == "https" {
let requested_host = url.host_str().ok_or(ClientError::HostNotFound)?;
let requested_port = url
.port_or_known_default()
.ok_or(ClientError::PortNotFound)?;
let (connect_host, connect_port) = if let Some(entry) =
self.dns
.select_connect_to(requested_host, requested_port, rng)
{
(entry.target_host.as_str(), entry.target_port)
} else {
(requested_host, requested_port)
};
let connect_authority = format_host_port(connect_host, connect_port);
let req = {
let mut builder = http::Request::builder()
.method(Method::CONNECT)
.uri(connect_authority);
*builder
.headers_mut()
.ok_or(ClientError::GetHeaderFromBuilder)? = self.proxy_headers.clone();
builder.body(http_body_util::Full::default())?
};
let res = if self.proxy_http_version == http::Version::HTTP_2 {
let mut send_request = stream.handshake_http2().await?;
send_request.send_request(req).await?
} else {
let mut send_request = stream.handshake_http1(true).await?;
send_request.send_request(req).await?
};
let stream = hyper::upgrade::on(res).await?;
let stream = self
.connect_tls(TokioIo::new(stream), url, http::Version::HTTP_2)
.await?;
let (send_request, conn) =
hyper::client::conn::http2::Builder::new(TokioExecutor::new())
// from nghttp2's default
.initial_stream_window_size((1 << 30) - 1)
.initial_connection_window_size((1 << 30) - 1)
.handshake(TokioIo::new(stream))
.await?;
tokio::spawn(conn);
let dialup = std::time::Instant::now();
Ok((
ConnectionTime {
dns_lookup: dns_lookup - start,
dialup: dialup - start,
},
send_request,
))
} else {
let send_request = stream.handshake_http2().await?;
let dialup = std::time::Instant::now();
Ok((
ConnectionTime {
dns_lookup: dns_lookup - start,
dialup: dialup - start,
},
send_request,
))
}
} else {
let (dns_lookup, stream) = self
.client(url, rng, self.request_generator.version)
.await?;
let send_request = stream.handshake_http2().await?;
let dialup = std::time::Instant::now();
Ok((
ConnectionTime {
dns_lookup: dns_lookup - start,
dialup: dialup - start,
},
send_request,
))
}
}
async fn work_http2(
&self,
client_state: &mut ClientStateHttp2,
) -> Result<RequestResult, ClientError> {
let do_req = async {
let (_url, request, rng) = self.generate_request(&mut client_state.rng)?;
let start = std::time::Instant::now();
let mut first_byte: Option<std::time::Instant> = None;
let connection_time: Option<ConnectionTime> = None;
match client_state.send_request.send_request(request).await {
Ok(res) => {
let (parts, mut stream) = res.into_parts();
let status = parts.status;
let mut len_bytes = 0;
while let Some(chunk) = stream.frame().await {
if first_byte.is_none() {
first_byte = Some(std::time::Instant::now())
}
len_bytes += chunk?.data_ref().map(|d| d.len()).unwrap_or_default();
}
let end = std::time::Instant::now();
let result = RequestResult {
rng,
start_latency_correction: None,
start,
first_byte,
end,
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | true |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/client_h3.rs | src/client_h3.rs | use bytes::Buf;
use bytes::Bytes;
use core::sync::atomic::Ordering;
use http::Request;
use http_body_util::BodyExt;
use hyper::http;
use kanal::AsyncReceiver;
use quinn::default_runtime;
use std::net::SocketAddr;
use std::net::UdpSocket;
use std::sync::Arc;
use std::sync::atomic::AtomicBool;
use std::sync::atomic::AtomicIsize;
use std::time::Instant;
use tokio::sync::Semaphore;
use url::Url;
pub type SendRequestHttp3 = (
h3::client::Connection<h3_quinn::Connection, Bytes>,
h3::client::SendRequest<h3_quinn::OpenStreams, Bytes>,
);
// HTTP3-specific error types
#[derive(thiserror::Error, Debug)]
pub enum Http3Error {
#[error("QUIC Client: {0}")]
QuicClientConfig(#[from] quinn::crypto::rustls::NoInitialCipherSuite),
#[error("QUIC connect: {0}")]
QuicConnect(#[from] quinn::ConnectError),
#[error("QUIC connection: {0}")]
QuicConnection(#[from] quinn::ConnectionError),
#[error("Quic connection closed earlier than expected")]
QuicDriverClosedEarly(#[from] tokio::sync::oneshot::error::RecvError),
#[error("HTTP3 connection: {0}")]
H3Connection(#[from] h3::error::ConnectionError),
#[error("HTTP3 Stream: {0}")]
H3Stream(#[from] h3::error::StreamError),
}
use crate::client::QueryLimit;
use crate::client::{
Client, ClientError, ConnectionTime, RequestResult, Stream, is_cancel_error,
set_connection_time, set_start_latency_correction,
};
use crate::pcg64si::Pcg64Si;
use crate::result_data::ResultData;
use rand::SeedableRng;
use rand::prelude::Rng;
pub(crate) struct ClientStateHttp3 {
pub(crate) rng: Pcg64Si,
pub(crate) send_request: h3::client::SendRequest<h3_quinn::OpenStreams, Bytes>,
}
impl ClientStateHttp3 {
fn new(send_request: h3::client::SendRequest<h3_quinn::OpenStreams, Bytes>) -> Self {
Self {
rng: SeedableRng::from_os_rng(),
send_request,
}
}
}
impl Client {
pub(crate) async fn connect_http3<R: Rng>(
&self,
url: &Url,
rng: &mut R,
) -> Result<(ConnectionTime, SendRequestHttp3), ClientError> {
let start = std::time::Instant::now();
let (dns_lookup, stream) = self.client(url, rng, http::Version::HTTP_3).await?;
let send_request = stream.handshake_http3().await?;
let dialup = std::time::Instant::now();
Ok((
ConnectionTime {
dns_lookup: dns_lookup - start,
dialup: dialup - start,
},
send_request,
))
}
pub(crate) async fn quic_client(
&self,
addr: (std::net::IpAddr, u16),
url: &Url,
) -> Result<Stream, ClientError> {
let endpoint_config = h3_quinn::quinn::EndpointConfig::default();
let local_socket = UdpSocket::bind("0.0.0.0:0").expect("couldn't bind to address");
// If we can set the right build flags, we can use `h3_quinn::quinn::Endpoint::client` instead
let mut client_endpoint = h3_quinn::quinn::Endpoint::new(
endpoint_config,
None,
local_socket,
default_runtime().unwrap(),
)
.unwrap();
let tls_config = self.rustls_configs.config(http::Version::HTTP_3).clone();
let client_config = quinn::ClientConfig::new(Arc::new(
quinn::crypto::rustls::QuicClientConfig::try_from(tls_config)
.map_err(Http3Error::from)?,
));
client_endpoint.set_default_client_config(client_config);
let remote_socket_address = SocketAddr::new(addr.0, addr.1);
let server_name = url.host_str().ok_or(ClientError::HostNotFound)?;
let conn = client_endpoint
.connect(remote_socket_address, server_name)
.map_err(Http3Error::from)?
.await
.map_err(Http3Error::from)?;
Ok(Stream::Quic(conn))
}
pub(crate) async fn work_http3(
&self,
client_state: &mut ClientStateHttp3,
) -> Result<RequestResult, ClientError> {
let do_req = async {
let (_url, request, rng) = self.generate_request(&mut client_state.rng)?;
let start = std::time::Instant::now();
let connection_time: Option<ConnectionTime> = None;
let mut first_byte: Option<std::time::Instant> = None;
// if we implement http_body::Body on our H3 SendRequest, we can do some nice streaming stuff
// with the response here. However as we don't really use the response we can get away
// with not doing this for now
let (head, mut req_body) = request.into_parts();
let request = http::request::Request::from_parts(head, ());
let mut stream = client_state
.send_request
.send_request(request)
.await
.map_err(Http3Error::from)?;
// send the request body now
if let Some(Ok(frame)) = req_body.frame().await {
if let Ok(data) = frame.into_data() {
stream.send_data(data).await.map_err(Http3Error::from)?;
}
}
stream.finish().await.map_err(Http3Error::from)?;
// now read the response headers
let response = stream.recv_response().await.map_err(Http3Error::from)?;
let (parts, _) = response.into_parts();
let status = parts.status;
// now read the response body
let mut len_bytes = 0;
while let Some(chunk) = stream.recv_data().await.map_err(Http3Error::from)? {
if first_byte.is_none() {
first_byte = Some(std::time::Instant::now())
}
len_bytes += chunk.remaining();
}
let end = std::time::Instant::now();
let result = RequestResult {
rng,
start_latency_correction: None,
start,
first_byte,
end,
status,
len_bytes,
connection_time,
};
Ok::<_, ClientError>(result)
};
if let Some(timeout) = self.timeout {
tokio::select! {
res = do_req => {
res
}
_ = tokio::time::sleep(timeout) => {
Err(ClientError::Timeout)
}
}
} else {
do_req.await
}
}
}
impl Stream {
async fn handshake_http3(self) -> Result<SendRequestHttp3, Http3Error> {
let Stream::Quic(quic_conn) = self else {
panic!("You cannot call http3 handshake on a non-quic stream");
};
let h3_quinn_conn = h3_quinn::Connection::new(quic_conn);
// TODO add configuration settings to allow 'send_grease' etc.
Ok(h3::client::new(h3_quinn_conn).await?)
}
}
pub(crate) async fn send_debug_request_http3(
h3_connection: h3::client::Connection<h3_quinn::Connection, Bytes>,
mut client_state: h3::client::SendRequest<h3_quinn::OpenStreams, Bytes>,
request: Request<http_body_util::Full<Bytes>>,
) -> Result<http::Response<Bytes>, Http3Error> {
// Prepare a channel to stop the driver thread
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
// Run the driver
let http3_driver = spawn_http3_driver(h3_connection, shutdown_rx).await;
let (head, mut req_body) = request.into_parts();
let request = http::request::Request::from_parts(head, ());
let mut stream = client_state.send_request(request).await?;
if let Some(Ok(frame)) = req_body.frame().await {
if let Ok(data) = frame.into_data() {
stream.send_data(data).await?;
}
}
stream.finish().await?;
let response = stream.recv_response().await.unwrap_or_else(|err| {
panic!("{}", err);
});
let mut body_bytes = bytes::BytesMut::new();
while let Some(mut chunk) = stream.recv_data().await? {
let bytes = chunk.copy_to_bytes(chunk.remaining());
body_bytes.extend_from_slice(&bytes);
}
let body = body_bytes.freeze();
let (parts, _) = response.into_parts();
let _ = shutdown_tx.send(0);
let _ = http3_driver.await.unwrap();
Ok(http::Response::from_parts(parts, body))
}
/**
* Create `n_connections` parallel HTTP3 connections (on independent QUIC connections).
* On each of those, run `n_http3_parallel` requests continuously until `deadline` is reached.
*/
pub(crate) async fn parallel_work_http3(
n_connections: usize,
n_http_parallel: usize,
rx: AsyncReceiver<Option<Instant>>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
client: Arc<Client>,
deadline: Option<std::time::Instant>,
) -> Vec<tokio::task::JoinHandle<()>> {
let s = Arc::new(tokio::sync::Semaphore::new(0));
let has_deadline = deadline.is_some();
let futures = (0..n_connections)
.map(|_| {
let report_tx = report_tx.clone();
let rx = rx.clone();
let client = client.clone();
let s = s.clone();
tokio::spawn(create_and_load_up_single_connection_http3(
n_http_parallel,
rx,
report_tx,
client,
s,
))
})
.collect::<Vec<_>>();
if has_deadline {
tokio::time::sleep_until(deadline.unwrap().into()).await;
s.close();
}
futures
}
/**
* For use in the 'slow' functions - send a report of every response in real time for display to the end-user.
* Semaphore is closed to shut down all the tasks.
* Very similar to how http2 loops work, just that we explicitly spawn the HTTP3 connection driver.
*/
async fn create_and_load_up_single_connection_http3(
n_http_parallel: usize,
rx: AsyncReceiver<Option<Instant>>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
client: Arc<Client>,
s: Arc<Semaphore>,
) {
let mut rng: Pcg64Si = SeedableRng::from_os_rng();
loop {
// create a HTTP3 connection
match setup_http3(&client, &mut rng).await {
Ok((connection_time, (h3_connection, send_request))) => {
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
let http3_driver = spawn_http3_driver(h3_connection, shutdown_rx).await;
let futures = (0..n_http_parallel)
.map(|_| {
let report_tx = report_tx.clone();
let rx = rx.clone();
let client = client.clone();
let mut client_state = ClientStateHttp3::new(send_request.clone());
let s = s.clone();
tokio::spawn(async move {
// This is where HTTP3 loops to make all the requests for a given client and worker
while let Ok(start_time_option) = rx.recv().await {
let (is_cancel, is_reconnect) = work_http3_once(
&client,
&mut client_state,
&report_tx,
connection_time,
start_time_option,
)
.await;
let is_cancel = is_cancel || s.is_closed();
if is_cancel || is_reconnect {
return is_cancel;
}
}
true
})
})
.collect::<Vec<_>>();
// collect all the requests we have spawned, and end the process if/when the semaphore says
let mut connection_gone = false;
for f in futures {
tokio::select! {
r = f => {
match r {
Ok(true) => {
// All works done
connection_gone = true;
}
Err(_) => {
// Unexpected
connection_gone = true;
}
_ => {}
}
}
_ = s.acquire() => {
report_tx.send(Err(ClientError::Deadline)).unwrap();
connection_gone = true;
}
}
}
if connection_gone {
// Try and politely shut down the HTTP3 connection
let _ = shutdown_tx.send(0);
let _ = http3_driver.await;
return;
}
}
Err(err) => {
if s.is_closed() {
break;
// Consume a task
} else if rx.recv().await.is_ok() {
report_tx.send(Err(err)).unwrap();
} else {
return;
}
}
}
}
}
/**
* This is structured to work very similarly to the `setup_http2`
* function in `client.rs`
*/
pub(crate) async fn setup_http3<R: Rng>(
client: &Client,
rng: &mut R,
) -> Result<(ConnectionTime, SendRequestHttp3), ClientError> {
let url = client.request_generator.url_generator.generate(rng)?;
// Whatever rng state, all urls should have the same authority
let (connection_time, send_request) = client.connect_http3(&url, rng).await?;
Ok((connection_time, send_request))
}
pub(crate) async fn spawn_http3_driver(
mut h3_connection: h3::client::Connection<h3_quinn::Connection, Bytes>,
shutdown_rx: tokio::sync::oneshot::Receiver<usize>,
) -> tokio::task::JoinHandle<std::result::Result<(), Http3Error>> {
tokio::spawn(async move {
tokio::select! {
// Drive the connection
closed = std::future::poll_fn(|cx| h3_connection.poll_close(cx)) => {
if closed.is_h3_no_error() {
Ok(())
} else {
Err(Http3Error::H3Connection(closed))
}
},
// Listen for shutdown condition
_ = shutdown_rx => {
// Initiate shutdown
h3_connection.shutdown(0).await?;
// Wait for ongoing work to complete
let closed = std::future::poll_fn(|cx| h3_connection.poll_close(cx)).await;
if closed.is_h3_no_error() {
Ok(())
} else {
Err(Http3Error::H3Connection(closed))
}
}
}
})
}
pub(crate) async fn work_http3_once(
client: &Client,
client_state: &mut ClientStateHttp3,
report_tx: &kanal::Sender<Result<RequestResult, ClientError>>,
connection_time: ConnectionTime,
start_latency_correction: Option<Instant>,
) -> (bool, bool) {
let mut res = client.work_http3(client_state).await;
let is_cancel = is_cancel_error(&res);
let is_reconnect = is_h3_error(&res);
set_connection_time(&mut res, connection_time);
if let Some(start_latency_correction) = start_latency_correction {
set_start_latency_correction(&mut res, start_latency_correction);
}
report_tx.send(res).unwrap();
(is_cancel, is_reconnect)
}
fn is_h3_error(res: &Result<RequestResult, ClientError>) -> bool {
res.as_ref()
.err()
.map(|err| matches!(err, ClientError::Http3(_) | ClientError::Io(_)))
.unwrap_or(false)
}
/**
* 'Fast' implementation of HTTP3 load generation.
* If `n_tasks` is set, it will generate up to that many tasks.
* Othrwise it will terminate when `is_end` becomes set to true.
*/
#[allow(clippy::too_many_arguments)]
pub(crate) fn http3_connection_fast_work_until(
num_connections: usize,
n_http_parallel: usize,
report_tx: kanal::Sender<ResultData>,
client: Arc<Client>,
token: tokio_util::sync::CancellationToken,
counter: Option<Arc<AtomicIsize>>,
is_end: Arc<AtomicBool>,
rt: tokio::runtime::Runtime,
) {
let is_counting_tasks = counter.is_some();
let client = client.clone();
let local = tokio::task::LocalSet::new();
for _ in 0..num_connections {
let report_tx = report_tx.clone();
let client = client.clone();
let token = token.clone();
let is_end = is_end.clone();
let counter = counter.clone();
local.spawn_local(Box::pin(async move {
let mut has_err = false;
let mut result_data_err = ResultData::default();
let mut rng: Pcg64Si = SeedableRng::from_os_rng();
loop {
let client = client.clone();
match setup_http3(&client, &mut rng).await {
Ok((connection_time, (h3_connection, send_request))) => {
let (shutdown_tx, shutdown_rx) = tokio::sync::oneshot::channel();
let http3_driver = spawn_http3_driver(h3_connection, shutdown_rx).await;
let futures = (0..n_http_parallel)
.map(|_| {
let mut client_state = ClientStateHttp3::new(send_request.clone());
let client = client.clone();
let report_tx = report_tx.clone();
let token = token.clone();
let is_end = is_end.clone();
let counter = counter.clone();
tokio::task::spawn_local(async move {
let mut result_data = ResultData::default();
let work = async {
loop {
if is_counting_tasks
&& counter
.as_ref()
.unwrap()
.fetch_sub(1, Ordering::Relaxed)
<= 0
{
return true;
}
let mut res =
client.work_http3(&mut client_state).await;
let is_cancel = is_cancel_error(&res)
|| is_end.load(Ordering::Relaxed);
let is_reconnect = is_h3_error(&res);
set_connection_time(&mut res, connection_time);
result_data.push(res);
if is_cancel || is_reconnect {
return is_cancel;
}
}
};
let is_cancel = tokio::select! {
is_cancel = work => {
is_cancel
}
_ = token.cancelled() => {
result_data.push(Err(ClientError::Deadline));
true
}
};
report_tx.send(result_data).unwrap();
is_cancel
})
})
.collect::<Vec<_>>();
let mut connection_gone = false;
for f in futures {
match f.await {
Ok(true) => {
// All works done
connection_gone = true;
}
Err(_) => {
// Unexpected
connection_gone = true;
}
_ => {}
}
}
if connection_gone {
let _ = shutdown_tx.send(0);
let _ = http3_driver.await;
break;
}
}
Err(err) => {
has_err = true;
result_data_err.push(Err(err));
if is_end.load(Ordering::Relaxed)
|| (is_counting_tasks
&& counter.as_ref().unwrap().fetch_sub(1, Ordering::Relaxed) <= 0)
{
break;
}
}
}
}
if has_err {
report_tx.send(result_data_err).unwrap();
}
}));
}
rt.block_on(local);
}
/// Work function for HTTP3 client that generates `n_tasks` tasks.
pub async fn work(
client: Arc<Client>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
n_tasks: usize,
n_connections: usize,
n_http2_parallel: usize,
) {
let (tx, rx) = kanal::unbounded::<Option<Instant>>();
let rx = rx.to_async();
let n_tasks_emitter = async move {
for _ in 0..n_tasks {
tx.send(None)?
}
drop(tx);
Ok::<(), kanal::SendError>(())
};
let futures =
parallel_work_http3(n_connections, n_http2_parallel, rx, report_tx, client, None).await;
n_tasks_emitter.await.unwrap();
for f in futures {
let _ = f.await;
}
}
/// n tasks by m workers limit to qps works in a second
pub async fn work_with_qps(
client: Arc<Client>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
query_limit: QueryLimit,
n_tasks: usize,
n_connections: usize,
n_http_parallel: usize,
) {
let (tx, rx) = kanal::unbounded::<Option<Instant>>();
let work_queue = async move {
match query_limit {
QueryLimit::Qps(qps) => {
let start = std::time::Instant::now();
for i in 0..n_tasks {
tokio::time::sleep_until(
(start + std::time::Duration::from_secs_f64(i as f64 * 1f64 / qps)).into(),
)
.await;
tx.send(None)?;
}
}
QueryLimit::Burst(duration, rate) => {
let mut n = 0;
// Handle via rate till n_tasks out of bound
while n + rate < n_tasks {
tokio::time::sleep(duration).await;
for _ in 0..rate {
tx.send(None)?;
}
n += rate;
}
// Handle the remaining tasks
if n_tasks > n {
tokio::time::sleep(duration).await;
for _ in 0..n_tasks - n {
tx.send(None)?;
}
}
}
}
// tx gone
drop(tx);
Ok::<(), kanal::SendError>(())
};
let rx = rx.to_async();
let futures =
parallel_work_http3(n_connections, n_http_parallel, rx, report_tx, client, None).await;
work_queue.await.unwrap();
for f in futures {
let _ = f.await;
}
}
/// n tasks by m workers limit to qps works in a second with latency correction
pub async fn work_with_qps_latency_correction(
client: Arc<Client>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
query_limit: QueryLimit,
n_tasks: usize,
n_connections: usize,
n_http2_parallel: usize,
) {
let (tx, rx) = kanal::unbounded();
let _work_queue = async move {
match query_limit {
QueryLimit::Qps(qps) => {
let start = std::time::Instant::now();
for i in 0..n_tasks {
tokio::time::sleep_until(
(start + std::time::Duration::from_secs_f64(i as f64 * 1f64 / qps)).into(),
)
.await;
let now = std::time::Instant::now();
tx.send(Some(now))?;
}
}
QueryLimit::Burst(duration, rate) => {
let mut n = 0;
// Handle via rate till n_tasks out of bound
while n + rate < n_tasks {
tokio::time::sleep(duration).await;
let now = std::time::Instant::now();
for _ in 0..rate {
tx.send(Some(now))?;
}
n += rate;
}
// Handle the remaining tasks
if n_tasks > n {
tokio::time::sleep(duration).await;
let now = std::time::Instant::now();
for _ in 0..n_tasks - n {
tx.send(Some(now))?;
}
}
}
}
// tx gone
drop(tx);
Ok::<(), kanal::SendError>(())
};
let rx = rx.to_async();
let futures =
parallel_work_http3(n_connections, n_http2_parallel, rx, report_tx, client, None).await;
for f in futures {
let _ = f.await;
}
}
/// Run until dead_line by n workers
pub async fn work_until(
client: Arc<Client>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
dead_line: std::time::Instant,
n_connections: usize,
n_http_parallel: usize,
_wait_ongoing_requests_after_deadline: bool,
) {
let (tx, rx) = kanal::bounded_async::<Option<Instant>>(5000);
// This emitter is used for H3 to give it unlimited tokens to emit work.
let cancel_token = tokio_util::sync::CancellationToken::new();
let emitter_handle = endless_emitter(cancel_token.clone(), tx).await;
let futures = parallel_work_http3(
n_connections,
n_http_parallel,
rx,
report_tx.clone(),
client.clone(),
Some(dead_line),
)
.await;
for f in futures {
let _ = f.await;
}
// Cancel the emitter when we're done with the futures
cancel_token.cancel();
// Wait for the emitter to exit cleanly
let _ = emitter_handle.await;
}
/// Run until dead_line by n workers limit to qps works in a second
#[allow(clippy::too_many_arguments)]
pub async fn work_until_with_qps(
client: Arc<Client>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
query_limit: QueryLimit,
start: std::time::Instant,
dead_line: std::time::Instant,
n_connections: usize,
n_http2_parallel: usize,
_wait_ongoing_requests_after_deadline: bool,
) {
let rx = match query_limit {
QueryLimit::Qps(qps) => {
let (tx, rx) = kanal::unbounded::<Option<Instant>>();
tokio::spawn(async move {
for i in 0.. {
if std::time::Instant::now() > dead_line {
break;
}
tokio::time::sleep_until(
(start + std::time::Duration::from_secs_f64(i as f64 * 1f64 / qps)).into(),
)
.await;
let _ = tx.send(None);
}
// tx gone
});
rx
}
QueryLimit::Burst(duration, rate) => {
let (tx, rx) = kanal::unbounded();
tokio::spawn(async move {
// Handle via rate till deadline is reached
for _ in 0.. {
if std::time::Instant::now() > dead_line {
break;
}
tokio::time::sleep(duration).await;
for _ in 0..rate {
let _ = tx.send(None);
}
}
// tx gone
});
rx
}
};
let rx = rx.to_async();
let futures = parallel_work_http3(
n_connections,
n_http2_parallel,
rx,
report_tx,
client,
Some(dead_line),
)
.await;
for f in futures {
let _ = f.await;
}
}
/// Run until dead_line by n workers limit to qps works in a second with latency correction
#[allow(clippy::too_many_arguments)]
pub async fn work_until_with_qps_latency_correction(
client: Arc<Client>,
report_tx: kanal::Sender<Result<RequestResult, ClientError>>,
query_limit: QueryLimit,
start: std::time::Instant,
dead_line: std::time::Instant,
n_connections: usize,
n_http2_parallel: usize,
_wait_ongoing_requests_after_deadline: bool,
) {
let (tx, rx) = kanal::unbounded();
match query_limit {
QueryLimit::Qps(qps) => {
tokio::spawn(async move {
for i in 0.. {
tokio::time::sleep_until(
(start + std::time::Duration::from_secs_f64(i as f64 * 1f64 / qps)).into(),
)
.await;
let now = std::time::Instant::now();
if now > dead_line {
break;
}
let _ = tx.send(Some(now));
}
// tx gone
});
}
QueryLimit::Burst(duration, rate) => {
tokio::spawn(async move {
// Handle via rate till deadline is reached
loop {
tokio::time::sleep(duration).await;
let now = std::time::Instant::now();
if now > dead_line {
break;
}
for _ in 0..rate {
let _ = tx.send(Some(now));
}
}
// tx gone
});
}
};
let rx = rx.to_async();
let futures = parallel_work_http3(
n_connections,
n_http2_parallel,
rx,
report_tx,
client,
Some(dead_line),
)
.await;
for f in futures {
let _ = f.await;
}
}
#[cfg(feature = "http3")]
async fn endless_emitter(
cancellation_token: tokio_util::sync::CancellationToken,
tx: kanal::AsyncSender<Option<Instant>>,
) -> tokio::task::JoinHandle<()> {
tokio::spawn(async move {
loop {
tokio::select! {
_ = cancellation_token.cancelled() => {
break;
}
_ = async {
// As we our `work_http2_once` function is limited by the number of `tx` we send, but we only
// want to stop when our semaphore is closed, just dump unlimited `Nones` into the tx to un-constrain it
let _ = tx.send(None).await;
} => {}
}
}
})
}
pub mod fast {
use std::sync::{
Arc,
atomic::{AtomicBool, AtomicIsize, Ordering},
};
use crate::{
client::Client, client_h3::http3_connection_fast_work_until, result_data::ResultData,
};
/// Run n tasks by m workers
pub async fn work(
client: Arc<Client>,
report_tx: kanal::Sender<ResultData>,
n_tasks: usize,
n_connections: usize,
n_http_parallel: usize,
) {
let counter = Arc::new(AtomicIsize::new(n_tasks as isize));
let num_threads = num_cpus::get_physical();
let connections = (0..num_threads).filter_map(|i| {
let num_connection = n_connections / num_threads
+ (if (n_connections % num_threads) > i {
1
} else {
0
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | true |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/printer.rs | src/printer.rs | use crate::{result_data::ResultData, timescale::TimeScale};
use average::{Max, Min, Variance};
use byte_unit::Byte;
use crossterm::style::{StyledContent, Stylize};
use hyper::http::{self, StatusCode};
use ratatui::crossterm;
use std::{
collections::BTreeMap,
io::Write,
time::{Duration, Instant},
};
#[derive(Clone, Copy)]
struct StyleScheme {
style_enabled: bool,
}
impl StyleScheme {
fn no_style(self, text: &str) -> StyledContent<&str> {
StyledContent::new(crossterm::style::ContentStyle::new(), text)
}
fn heading(self, text: &str) -> StyledContent<&str> {
if self.style_enabled {
text.bold().underlined()
} else {
self.no_style(text)
}
}
fn success_rate(self, text: &str, success_rate: f64) -> StyledContent<&str> {
if self.style_enabled {
if success_rate >= 100.0 {
text.green().bold()
} else if success_rate >= 99.0 {
text.yellow().bold()
} else {
text.red().bold()
}
} else {
self.no_style(text)
}
}
fn fastest(self, text: &str) -> StyledContent<&str> {
if self.style_enabled {
text.green()
} else {
self.no_style(text)
}
}
fn slowest(self, text: &str) -> StyledContent<&str> {
if self.style_enabled {
text.yellow()
} else {
self.no_style(text)
}
}
fn average(self, text: &str) -> StyledContent<&str> {
if self.style_enabled {
text.cyan()
} else {
self.no_style(text)
}
}
fn latency_distribution(self, text: &str, label: f64) -> StyledContent<&str> {
// See #609 for justification of these thresholds
const LATENCY_YELLOW_THRESHOLD: f64 = 0.1;
const LATENCY_RED_THRESHOLD: f64 = 0.4;
if self.style_enabled {
if label <= LATENCY_YELLOW_THRESHOLD {
text.green()
} else if label <= LATENCY_RED_THRESHOLD {
text.yellow()
} else {
text.red()
}
} else {
self.no_style(text)
}
}
fn status_distribution(self, text: &str, status: StatusCode) -> StyledContent<&str> {
if self.style_enabled {
if status.is_success() {
text.green()
} else if status.is_client_error() {
text.yellow()
} else if status.is_server_error() {
text.red()
} else {
text.white()
}
} else {
self.no_style(text)
}
}
}
#[derive(Clone, Copy, Debug, Default, clap::ValueEnum)]
pub enum PrintMode {
#[default]
Text,
Json,
Csv,
Quiet,
}
pub struct PrintConfig {
pub output: Box<dyn Write + Send + 'static>,
pub mode: PrintMode,
pub disable_style: bool,
pub stats_success_breakdown: bool,
pub time_unit: Option<TimeScale>,
}
pub fn print_result(
mut config: PrintConfig,
start: Instant,
res: &ResultData,
total_duration: Duration,
) -> anyhow::Result<()> {
match config.mode {
PrintMode::Text => print_summary(
&mut config.output,
res,
total_duration,
config.disable_style,
config.stats_success_breakdown,
config.time_unit,
)?,
PrintMode::Json => print_json(
&mut config.output,
start,
res,
total_duration,
config.stats_success_breakdown,
)?,
PrintMode::Csv => print_csv(&mut config.output, start, res)?,
PrintMode::Quiet => { /* Do nothing */ }
}
Ok(())
}
/// Print all summary as JSON
fn print_json<W: Write>(
w: &mut W,
start: Instant,
res: &ResultData,
total_duration: Duration,
stats_success_breakdown: bool,
) -> serde_json::Result<()> {
use serde::Serialize;
#[derive(Serialize)]
struct Summary {
#[serde(rename = "successRate")]
success_rate: f64,
total: f64,
slowest: f64,
fastest: f64,
average: f64,
#[serde(rename = "requestsPerSec")]
requests_per_sec: f64,
#[serde(rename = "totalData")]
total_data: u64,
#[serde(rename = "sizePerRequest")]
size_per_request: Option<u64>,
#[serde(rename = "sizePerSec")]
size_per_sec: f64,
}
#[derive(Serialize)]
struct Triple {
average: f64,
fastest: f64,
slowest: f64,
}
#[derive(Serialize)]
struct Details {
#[serde(rename = "DNSDialup")]
dns_dialup: Triple,
#[serde(rename = "DNSLookup")]
dns_lookup: Triple,
}
#[derive(Serialize)]
struct Rps {
mean: f64,
stddev: f64,
max: f64,
min: f64,
percentiles: BTreeMap<String, f64>,
}
#[derive(Serialize)]
struct Result {
summary: Summary,
#[serde(rename = "responseTimeHistogram")]
response_time_histogram: BTreeMap<String, usize>,
#[serde(rename = "latencyPercentiles")]
latency_percentiles: BTreeMap<String, f64>,
#[serde(
rename = "responseTimeHistogramSuccessful",
skip_serializing_if = "Option::is_none"
)]
response_time_histogram_successful: Option<BTreeMap<String, usize>>,
#[serde(
rename = "latencyPercentilesSuccessful",
skip_serializing_if = "Option::is_none"
)]
latency_percentiles_successful: Option<BTreeMap<String, f64>>,
#[serde(
rename = "responseTimeHistogramNotSuccessful",
skip_serializing_if = "Option::is_none"
)]
response_time_histogram_not_successful: Option<BTreeMap<String, usize>>,
#[serde(
rename = "latencyPercentilesNotSuccessful",
skip_serializing_if = "Option::is_none"
)]
latency_percentiles_not_successful: Option<BTreeMap<String, f64>>,
#[serde(rename = "rps")]
rps: Rps,
details: Details,
#[serde(rename = "statusCodeDistribution")]
status_code_distribution: BTreeMap<String, usize>,
#[serde(rename = "errorDistribution")]
error_distribution: BTreeMap<String, usize>,
}
let latency_stat = res.latency_stat();
let summary = Summary {
success_rate: res.success_rate(),
total: total_duration.as_secs_f64(),
slowest: latency_stat.max(),
fastest: latency_stat.min(),
average: latency_stat.mean(),
requests_per_sec: res.len() as f64 / total_duration.as_secs_f64(),
total_data: res.total_data() as u64,
size_per_request: res.size_per_request(),
size_per_sec: res.total_data() as f64 / total_duration.as_secs_f64(),
};
let durations_statistics = res.duration_all_statistics();
let response_time_histogram = durations_statistics
.histogram
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect();
let latency_percentiles = durations_statistics
.percentiles
.into_iter()
.map(|(p, v)| (format!("p{p}"), v))
.collect();
let mut response_time_histogram_successful: Option<BTreeMap<String, usize>> = None;
let mut latency_percentiles_successful: Option<BTreeMap<String, f64>> = None;
let mut response_time_histogram_not_successful: Option<BTreeMap<String, usize>> = None;
let mut latency_percentiles_not_successful: Option<BTreeMap<String, f64>> = None;
if stats_success_breakdown {
let durations_successful_statistics = res.duration_successful_statistics();
response_time_histogram_successful = Some(
durations_successful_statistics
.histogram
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
);
latency_percentiles_successful = Some(
durations_successful_statistics
.percentiles
.into_iter()
.map(|(p, v)| (format!("p{p}"), v))
.collect(),
);
let durations_not_successful_statistics = res.duration_not_successful_statistics();
response_time_histogram_not_successful = Some(
durations_not_successful_statistics
.histogram
.into_iter()
.map(|(k, v)| (k.to_string(), v))
.collect(),
);
latency_percentiles_not_successful = Some(
durations_not_successful_statistics
.percentiles
.into_iter()
.map(|(p, v)| (format!("p{p}"), v))
.collect(),
);
}
let mut ends = res
.end_times_from_start(start)
.map(|d| d.as_secs_f64())
.collect::<Vec<_>>();
ends.push(0.0);
float_ord::sort(&mut ends);
let mut rps: Vec<f64> = Vec::new();
// 10ms
const INTERVAL: f64 = 0.01;
let mut r = 0;
loop {
let prev_r = r;
// increment at least 1
if r + 1 < ends.len() {
r += 1;
}
while r + 1 < ends.len() && ends[prev_r] + INTERVAL > ends[r + 1] {
r += 1;
}
if r == prev_r {
break;
}
let n = r - prev_r;
let t = ends[r] - ends[prev_r];
rps.push(n as f64 / t);
}
let rps_percentiles = percentiles(&mut rps);
let variance = rps.iter().collect::<Variance>();
let rps = Rps {
mean: variance.mean(),
stddev: variance.sample_variance().sqrt(),
max: rps.iter().collect::<Max>().max(),
min: rps.iter().collect::<Min>().min(),
percentiles: rps_percentiles,
};
let status_code_distribution = res.status_code_distribution();
let dns_dialup_stat = res.dns_dialup_stat();
let dns_lookup_stat = res.dns_lookup_stat();
let details = Details {
dns_dialup: Triple {
average: dns_dialup_stat.mean(),
fastest: dns_dialup_stat.min(),
slowest: dns_dialup_stat.max(),
},
dns_lookup: Triple {
average: dns_lookup_stat.mean(),
fastest: dns_lookup_stat.min(),
slowest: dns_lookup_stat.max(),
},
};
serde_json::to_writer_pretty(
w,
&Result {
summary,
response_time_histogram,
latency_percentiles,
response_time_histogram_successful,
latency_percentiles_successful,
response_time_histogram_not_successful,
latency_percentiles_not_successful,
rps,
details,
status_code_distribution: status_code_distribution
.into_iter()
.map(|(k, v)| (k.as_u16().to_string(), v))
.collect(),
error_distribution: res.error_distribution().clone(),
},
)
}
fn print_csv<W: Write>(w: &mut W, start: Instant, res: &ResultData) -> std::io::Result<()> {
// csv header
writeln!(
w,
"request-start,DNS,DNS+dialup,Response-delay,request-duration,bytes,status"
)?;
let mut success_requests = res.success().to_vec();
success_requests.sort_by_key(|r| r.start);
for request in success_requests {
let dns_and_dialup = match request.connection_time {
Some(connection_time) => (connection_time.dns_lookup, connection_time.dialup),
None => (std::time::Duration::ZERO, std::time::Duration::ZERO),
};
let first_byte = match request.first_byte {
Some(first_byte) => first_byte - request.start,
None => std::time::Duration::ZERO,
};
writeln!(
w,
"{},{},{},{},{},{},{}",
(request.start - start).as_secs_f64(),
dns_and_dialup.0.as_secs_f64(),
dns_and_dialup.1.as_secs_f64(),
first_byte.as_secs_f64(),
request.duration().as_secs_f64(),
request.len_bytes,
request.status.as_u16(),
)?;
}
Ok(())
}
/// Print all summary as Text
fn print_summary<W: Write>(
w: &mut W,
res: &ResultData,
total_duration: Duration,
disable_style: bool,
stats_success_breakdown: bool,
time_unit: Option<TimeScale>,
) -> std::io::Result<()> {
let style = StyleScheme {
style_enabled: !disable_style,
};
writeln!(w, "{}", style.heading("Summary:"))?;
let success_rate = 100.0 * res.success_rate();
writeln!(
w,
"{}",
style.success_rate(
&format!(" Success rate:\t{success_rate:.2}%"),
success_rate
)
)?;
let latency_stat = res.latency_stat();
// Determine timescale automatically
let timescale = if let Some(timescale) = time_unit {
timescale
} else {
// Use max latency (slowest request)
TimeScale::from_f64(latency_stat.max())
};
writeln!(
w,
" Total:\t{:.4} {timescale}",
total_duration.as_secs_f64() / timescale.as_secs_f64()
)?;
writeln!(
w,
"{}",
style.slowest(&format!(
" Slowest:\t{:.4} {timescale}",
latency_stat.max() / timescale.as_secs_f64()
))
)?;
writeln!(
w,
"{}",
style.fastest(&format!(
" Fastest:\t{:.4} {timescale}",
latency_stat.min() / timescale.as_secs_f64()
))
)?;
writeln!(
w,
"{}",
style.average(&format!(
" Average:\t{:.4} {timescale}",
latency_stat.mean() / timescale.as_secs_f64()
))
)?;
writeln!(
w,
" Requests/sec:\t{:.4}",
res.len() as f64 / total_duration.as_secs_f64()
)?;
writeln!(w)?;
writeln!(
w,
" Total data:\t{:.2}",
Byte::from_u64(res.total_data() as u64).get_appropriate_unit(byte_unit::UnitType::Binary)
)?;
if let Some(size) = res
.size_per_request()
.map(|n| Byte::from_u64(n).get_appropriate_unit(byte_unit::UnitType::Binary))
{
writeln!(w, " Size/request:\t{size:.2}")?;
} else {
writeln!(w, " Size/request:\tNaN")?;
}
writeln!(
w,
" Size/sec:\t{:.2}",
Byte::from_u64((res.total_data() as f64 / total_duration.as_secs_f64()) as u64)
.get_appropriate_unit(byte_unit::UnitType::Binary)
)?;
writeln!(w)?;
let duration_all_statistics = res.duration_all_statistics();
writeln!(w, "{}", style.heading("Response time histogram:"))?;
print_histogram(w, &duration_all_statistics.histogram, style, timescale)?;
writeln!(w)?;
writeln!(w, "{}", style.heading("Response time distribution:"))?;
print_distribution(w, &duration_all_statistics.percentiles, style, timescale)?;
writeln!(w)?;
if stats_success_breakdown {
let durations_successful_statics = res.duration_successful_statistics();
writeln!(
w,
"{}",
style.heading("Response time histogram (2xx only):")
)?;
print_histogram(w, &durations_successful_statics.histogram, style, timescale)?;
writeln!(w)?;
writeln!(
w,
"{}",
style.heading("Response time distribution (2xx only):")
)?;
print_distribution(
w,
&durations_successful_statics.percentiles,
style,
timescale,
)?;
writeln!(w)?;
let durations_not_successful = res.duration_not_successful_statistics();
writeln!(
w,
"{}",
style.heading("Response time histogram (4xx + 5xx only):")
)?;
print_histogram(w, &durations_not_successful.histogram, style, timescale)?;
writeln!(w)?;
writeln!(
w,
"{}",
style.heading("Response time distribution (4xx + 5xx only):")
)?;
print_distribution(w, &durations_not_successful.percentiles, style, timescale)?;
writeln!(w)?;
}
writeln!(w)?;
let dns_dialup_stat = res.dns_dialup_stat();
let dns_lookup_stat = res.dns_lookup_stat();
writeln!(
w,
"{}",
style.heading("Details (average, fastest, slowest):")
)?;
writeln!(
w,
" DNS+dialup:\t{:.4} {timescale}, {:.4} {timescale}, {:.4} {timescale}",
dns_dialup_stat.mean() / timescale.as_secs_f64(),
dns_dialup_stat.min() / timescale.as_secs_f64(),
dns_dialup_stat.max() / timescale.as_secs_f64()
)?;
writeln!(
w,
" DNS-lookup:\t{:.4} {timescale}, {:.4} {timescale}, {:.4} {timescale}",
dns_lookup_stat.mean() / timescale.as_secs_f64(),
dns_lookup_stat.min() / timescale.as_secs_f64(),
dns_lookup_stat.max() / timescale.as_secs_f64()
)?;
writeln!(w)?;
let status_dist: BTreeMap<http::StatusCode, usize> = res.status_code_distribution();
let mut status_v: Vec<(http::StatusCode, usize)> = status_dist.into_iter().collect();
status_v.sort_by_key(|t| std::cmp::Reverse(t.1));
writeln!(w, "{}", style.heading("Status code distribution:"))?;
for (status, count) in status_v {
writeln!(
w,
"{}",
style.status_distribution(
&format!(" [{}] {} responses", status.as_str(), count),
status
)
)?;
}
let mut error_v: Vec<(String, usize)> = res
.error_distribution()
.iter()
.map(|(k, v)| (k.clone(), *v))
.collect();
error_v.sort_by_key(|t| std::cmp::Reverse(t.1));
if !error_v.is_empty() {
writeln!(w)?;
writeln!(w, "Error distribution:")?;
for (error, count) in error_v {
writeln!(w, " [{count}] {error}")?;
}
}
Ok(())
}
/// This is used to print histogram of response time.
fn print_histogram<W: Write>(
w: &mut W,
data: &[(f64, usize)],
style: StyleScheme,
timescale: TimeScale,
) -> std::io::Result<()> {
let max_bar = data.iter().map(|t| t.1).max().unwrap();
let str_len_max = max_bar.to_string().len();
let width = data
.iter()
.map(|t| ((t.0 / timescale.as_secs_f64()) as u64).to_string().len())
.max()
.unwrap()
+ 4;
for (label, b) in data.iter() {
let indent = str_len_max - b.to_string().len();
write!(
w,
"{}",
style.latency_distribution(
&format!(
" {:>width$.3} {timescale} [{}]{} |",
label / timescale.as_secs_f64(),
b,
" ".repeat(indent),
width = width
),
*label
)
)?;
bar(w, *b as f64 / max_bar as f64, style, *label)?;
writeln!(w)?;
}
Ok(())
}
// Print Bar like ■■■■■■■■■
fn bar<W: Write>(w: &mut W, ratio: f64, style: StyleScheme, label: f64) -> std::io::Result<()> {
// TODO: Use more block element code to show more precise bar
let width = 32;
for _ in 0..(width as f64 * ratio) as usize {
write!(w, "{}", style.latency_distribution("■", label))?;
}
Ok(())
}
fn percentile_iter(values: &mut [f64]) -> impl Iterator<Item = (f64, f64)> + '_ {
float_ord::sort(values);
[10.0, 25.0, 50.0, 75.0, 90.0, 95.0, 99.0, 99.9, 99.99]
.iter()
.map(move |&p| {
let i = (p / 100.0 * values.len() as f64) as usize;
(p, *values.get(i).unwrap_or(&f64::NAN))
})
}
/// Print distribution of collection of f64
fn print_distribution<W: Write>(
w: &mut W,
percentiles: &[(f64, f64)],
style: StyleScheme,
timescale: TimeScale,
) -> std::io::Result<()> {
for (p, v) in percentiles {
writeln!(
w,
"{}",
style.latency_distribution(
&format!(
" {p:.2}% in {:.4} {timescale}",
v / timescale.as_secs_f64()
),
*v
)
)?;
}
Ok(())
}
fn percentiles(values: &mut [f64]) -> BTreeMap<String, f64> {
percentile_iter(values)
.map(|(p, v)| (format!("p{p}"), v))
.collect()
}
#[cfg(test)]
mod tests {
use float_cmp::assert_approx_eq;
use super::*;
#[test]
fn test_percentile_iter() {
let mut values: [f64; 40] = [
5.0, 5.0, 5.0, 5.0, 5.0, 10.0, 10.0, 10.0, 10.0, 10.0, 11.0, 11.0, 11.0, 11.0, 11.0,
11.0, 11.0, 11.0, 11.0, 11.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0,
12.0, 15.0, 15.0, 15.0, 15.0, 15.0, 20.0, 20.0, 20.0, 25.0, 30.0,
];
let result: Vec<(f64, f64)> = percentile_iter(&mut values).collect();
assert_approx_eq!(&[f64], &[result[0].0, result[0].1], &[10.0, 5_f64]);
assert_approx_eq!(&[f64], &[result[1].0, result[1].1], &[25.0, 11_f64]);
assert_approx_eq!(&[f64], &[result[2].0, result[2].1], &[50.0, 12_f64]);
assert_approx_eq!(&[f64], &[result[3].0, result[3].1], &[75.0, 15_f64]);
assert_approx_eq!(&[f64], &[result[4].0, result[4].1], &[90.0, 20_f64]);
assert_approx_eq!(&[f64], &[result[5].0, result[5].1], &[95.0, 25_f64]);
assert_approx_eq!(&[f64], &[result[6].0, result[6].1], &[99.0, 30_f64]);
assert_approx_eq!(&[f64], &[result[7].0, result[7].1], &[99.9, 30_f64]);
assert_approx_eq!(&[f64], &[result[8].0, result[8].1], &[99.99, 30_f64]);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/tls_config.rs | src/tls_config.rs | #[cfg(feature = "rustls")]
pub struct RuslsConfigs {
no_alpn: std::sync::Arc<rustls::ClientConfig>,
alpn_h2: std::sync::Arc<rustls::ClientConfig>,
alpn_h3: std::sync::Arc<rustls::ClientConfig>,
}
#[cfg(feature = "rustls")]
impl RuslsConfigs {
pub fn new(
insecure: bool,
cacert_pem: Option<&[u8]>,
client_auth: Option<(&[u8], &[u8])>,
) -> Self {
use rustls_pki_types::pem::PemObject;
use std::sync::Arc;
let mut root_cert_store = rustls::RootCertStore::empty();
for cert in rustls_native_certs::load_native_certs().expect("could not load platform certs")
{
root_cert_store.add(cert).unwrap();
}
if let Some(cacert_pem) = cacert_pem {
for der in rustls_pki_types::CertificateDer::pem_slice_iter(cacert_pem) {
root_cert_store.add(der.unwrap()).unwrap();
}
}
let _ = rustls::crypto::CryptoProvider::install_default(
rustls::crypto::aws_lc_rs::default_provider(),
);
let builder = rustls::ClientConfig::builder().with_root_certificates(root_cert_store);
let mut config = if let Some((cert, key)) = client_auth {
let certs = rustls_pki_types::CertificateDer::pem_slice_iter(cert)
.collect::<Result<Vec<_>, _>>()
.unwrap();
let key = rustls_pki_types::PrivateKeyDer::from_pem_slice(key).unwrap();
builder.with_client_auth_cert(certs, key).unwrap()
} else {
builder.with_no_client_auth()
};
if insecure {
config
.dangerous()
.set_certificate_verifier(Arc::new(AcceptAnyServerCert));
}
let mut no_alpn = config.clone();
no_alpn.alpn_protocols = vec![];
let mut alpn_h2 = config.clone();
alpn_h2.alpn_protocols = vec![b"h2".to_vec()];
let mut alpn_h3 = config;
alpn_h3.alpn_protocols = vec![b"h3".to_vec()];
alpn_h3.enable_early_data = true;
Self {
no_alpn: Arc::new(no_alpn),
alpn_h2: Arc::new(alpn_h2),
alpn_h3: Arc::new(alpn_h3),
}
}
pub fn config(&self, http: hyper::http::Version) -> &std::sync::Arc<rustls::ClientConfig> {
use hyper::http;
match http {
http::Version::HTTP_09 | http::Version::HTTP_10 | http::Version::HTTP_11 => {
&self.no_alpn
}
http::Version::HTTP_2 => &self.alpn_h2,
http::Version::HTTP_3 => &self.alpn_h3,
_ => panic!("nonsupported HTTP version"),
}
}
}
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
pub struct NativeTlsConnectors {
pub no_alpn: tokio_native_tls::TlsConnector,
pub alpn_h2: tokio_native_tls::TlsConnector,
}
#[cfg(all(feature = "native-tls", not(feature = "rustls")))]
impl NativeTlsConnectors {
pub fn new(
insecure: bool,
cacert_pem: Option<&[u8]>,
client_auth: Option<(&[u8], &[u8])>,
) -> Self {
let new = |is_http2: bool| {
let mut connector_builder = native_tls::TlsConnector::builder();
if let Some(cacert_pem) = cacert_pem {
let cert = native_tls::Certificate::from_pem(cacert_pem)
.expect("Failed to parse cacert_pem");
connector_builder.add_root_certificate(cert);
}
if insecure {
connector_builder
.danger_accept_invalid_certs(true)
.danger_accept_invalid_hostnames(true);
}
if let Some((cert, key)) = client_auth {
let cert = native_tls::Identity::from_pkcs8(cert, key)
.expect("Failed to parse client_auth cert/key");
connector_builder.identity(cert);
}
if is_http2 {
connector_builder.request_alpns(&["h2"]);
}
connector_builder
.build()
.expect("Failed to build native_tls::TlsConnector")
.into()
};
Self {
no_alpn: new(false),
alpn_h2: new(true),
}
}
pub fn connector(&self, is_http2: bool) -> &tokio_native_tls::TlsConnector {
if is_http2 {
&self.alpn_h2
} else {
&self.no_alpn
}
}
}
/// A server certificate verifier that accepts any certificate.
#[cfg(feature = "rustls")]
#[derive(Debug)]
pub struct AcceptAnyServerCert;
#[cfg(feature = "rustls")]
impl rustls::client::danger::ServerCertVerifier for AcceptAnyServerCert {
fn verify_server_cert(
&self,
_end_entity: &rustls_pki_types::CertificateDer<'_>,
_intermediates: &[rustls_pki_types::CertificateDer<'_>],
_server_name: &rustls_pki_types::ServerName<'_>,
_ocsp_response: &[u8],
_now: rustls_pki_types::UnixTime,
) -> Result<rustls::client::danger::ServerCertVerified, rustls::Error> {
Ok(rustls::client::danger::ServerCertVerified::assertion())
}
fn verify_tls12_signature(
&self,
_message: &[u8],
_cert: &rustls_pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn verify_tls13_signature(
&self,
_message: &[u8],
_cert: &rustls_pki_types::CertificateDer<'_>,
_dss: &rustls::DigitallySignedStruct,
) -> Result<rustls::client::danger::HandshakeSignatureValid, rustls::Error> {
Ok(rustls::client::danger::HandshakeSignatureValid::assertion())
}
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
rustls::crypto::CryptoProvider::get_default()
.unwrap()
.signature_verification_algorithms
.supported_schemes()
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/curl_compat.rs | src/curl_compat.rs | //! Curl compatibility utilities
use std::str::FromStr;
pub struct Form {
pub boundary: String,
pub parts: Vec<FormPart>,
}
pub struct FormPart {
pub name: String,
pub filename: Option<String>,
pub content_type: Option<String>,
pub data: Vec<u8>,
}
impl Form {
pub fn new() -> Self {
Self {
boundary: Self::generate_boundary(),
parts: Vec::new(),
}
}
pub fn add_part(&mut self, part: FormPart) {
self.parts.push(part);
}
pub fn content_type(&self) -> String {
format!("multipart/form-data; boundary={}", self.boundary)
}
pub fn body(&self) -> Vec<u8> {
let mut body = Vec::new();
for part in &self.parts {
// Add boundary separator
body.extend_from_slice(b"--");
body.extend_from_slice(self.boundary.as_bytes());
body.extend_from_slice(b"\r\n");
// Add Content-Disposition header
body.extend_from_slice(b"Content-Disposition: form-data; name=\"");
body.extend_from_slice(part.name.as_bytes());
body.extend_from_slice(b"\"");
// Add filename if present
if let Some(filename) = &part.filename {
body.extend_from_slice(b"; filename=\"");
body.extend_from_slice(filename.as_bytes());
body.extend_from_slice(b"\"");
}
body.extend_from_slice(b"\r\n");
// Add Content-Type header if present
if let Some(content_type) = &part.content_type {
body.extend_from_slice(b"Content-Type: ");
body.extend_from_slice(content_type.as_bytes());
body.extend_from_slice(b"\r\n");
}
// Empty line before data
body.extend_from_slice(b"\r\n");
// Add the actual data
body.extend_from_slice(&part.data);
body.extend_from_slice(b"\r\n");
}
// Add final boundary
body.extend_from_slice(b"--");
body.extend_from_slice(self.boundary.as_bytes());
body.extend_from_slice(b"--\r\n");
body
}
fn generate_boundary() -> String {
use rand::Rng;
let mut rng = rand::rng();
let random_bytes: [u8; 16] = rng.random();
// Convert to hex string manually to avoid external hex dependency
let hex_string = random_bytes
.iter()
.map(|b| format!("{b:02x}"))
.collect::<String>();
format!("----formdata-oha-{hex_string}")
}
}
impl FromStr for FormPart {
type Err = anyhow::Error;
/// Parse curl's -F format string
/// Supports formats like:
/// - `name=value`
/// - `name=@filename` (file upload with filename)
/// - `name=<filename` (file upload without filename)
/// - `name=@filename;type=content-type`
/// - `name=value;filename=name`
fn from_str(s: &str) -> Result<Self, Self::Err> {
// Split on first '=' to separate name from value/options
let (name, rest) = s
.split_once('=')
.ok_or_else(|| anyhow::anyhow!("Invalid form format: missing '=' in '{}'", s))?;
let name = name.to_string();
// Parse the value part which may contain semicolon-separated options
let parts: Vec<&str> = rest.split(';').collect();
let value_part = parts[0];
let mut filename = None;
let mut content_type = None;
let data;
// Check if this is a file upload (@filename or <filename)
if let Some(file_path) = value_part.strip_prefix('@') {
// Remove '@' prefix
// Read file content
data = std::fs::read(file_path)
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", file_path, e))?;
// Extract filename from path
filename = std::path::Path::new(file_path)
.file_name()
.and_then(|name| name.to_str())
.map(|s| s.to_string());
} else if let Some(file_path) = value_part.strip_prefix('<') {
// Remove '<' prefix
// Read file content
data = std::fs::read(file_path)
.map_err(|e| anyhow::anyhow!("Failed to read file '{}': {}", file_path, e))?;
// Do not set filename for '<' format (curl behavior)
} else {
// Regular form field with string value
data = value_part.as_bytes().to_vec();
}
// Parse additional options (filename, type, etc.)
for part in parts.iter().skip(1) {
if let Some((key, value)) = part.split_once('=') {
match key.trim() {
"filename" => {
filename = Some(value.trim().to_string());
}
"type" => {
content_type = Some(value.trim().to_string());
}
_ => {
// Ignore unknown options for compatibility
}
}
}
}
Ok(FormPart {
name,
filename,
content_type,
data,
})
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_parse_simple_field() {
let part: FormPart = "name=value".parse().unwrap();
assert_eq!(part.name, "name");
assert_eq!(part.data, b"value");
assert_eq!(part.filename, None);
assert_eq!(part.content_type, None);
}
#[test]
fn test_parse_field_with_filename() {
let part: FormPart = "upload=data;filename=test.txt".parse().unwrap();
assert_eq!(part.name, "upload");
assert_eq!(part.data, b"data");
assert_eq!(part.filename, Some("test.txt".to_string()));
assert_eq!(part.content_type, None);
}
#[test]
fn test_parse_field_with_type() {
let part: FormPart = "data=content;type=text/plain".parse().unwrap();
assert_eq!(part.name, "data");
assert_eq!(part.data, b"content");
assert_eq!(part.filename, None);
assert_eq!(part.content_type, Some("text/plain".to_string()));
}
#[test]
fn test_parse_field_with_filename_and_type() {
let part: FormPart = "file=content;filename=test.txt;type=text/plain"
.parse()
.unwrap();
assert_eq!(part.name, "file");
assert_eq!(part.data, b"content");
assert_eq!(part.filename, Some("test.txt".to_string()));
assert_eq!(part.content_type, Some("text/plain".to_string()));
}
#[test]
fn test_parse_invalid_format() {
let result: Result<FormPart, _> = "invalid".parse();
assert!(result.is_err());
}
#[test]
fn test_parse_file_upload() {
// Create a temporary file for testing
let temp_dir = std::env::temp_dir();
let test_file = temp_dir.join("test_form_upload.txt");
std::fs::write(&test_file, b"test file content").unwrap();
let form_str = format!("upload=@{}", test_file.display());
let part: FormPart = form_str.parse().unwrap();
assert_eq!(part.name, "upload");
assert_eq!(part.data, b"test file content");
assert_eq!(part.filename, Some("test_form_upload.txt".to_string()));
assert_eq!(part.content_type, None);
// Clean up
std::fs::remove_file(&test_file).ok();
}
#[test]
fn test_parse_file_upload_without_filename() {
// Create a temporary file for testing
let temp_dir = std::env::temp_dir();
let test_file = temp_dir.join("test_form_upload_no_filename.txt");
std::fs::write(&test_file, b"test file content without filename").unwrap();
let form_str = format!("upload=<{}", test_file.display());
let part: FormPart = form_str.parse().unwrap();
assert_eq!(part.name, "upload");
assert_eq!(part.data, b"test file content without filename");
assert_eq!(part.filename, None); // No filename set for '<' format
assert_eq!(part.content_type, None);
// Clean up
std::fs::remove_file(&test_file).ok();
}
#[test]
fn test_form_creation_and_body_generation() {
let mut form = Form::new();
// Add a simple text field
let text_part: FormPart = "name=John".parse().unwrap();
form.add_part(text_part);
// Add a field with filename
let file_part: FormPart = "file=content;filename=test.txt;type=text/plain"
.parse()
.unwrap();
form.add_part(file_part);
let body = form.body();
let body_str = String::from_utf8_lossy(&body);
// Check that boundary is present
assert!(body_str.contains(&format!("--{}", form.boundary)));
// Check Content-Disposition headers
assert!(body_str.contains("Content-Disposition: form-data; name=\"name\""));
assert!(
body_str
.contains("Content-Disposition: form-data; name=\"file\"; filename=\"test.txt\"")
);
// Check Content-Type header
assert!(body_str.contains("Content-Type: text/plain"));
// Check data content
assert!(body_str.contains("John"));
assert!(body_str.contains("content"));
// Check final boundary
assert!(body_str.ends_with(&format!("--{}--\r\n", form.boundary)));
}
#[test]
fn test_form_content_type() {
let form = Form::new();
let content_type = form.content_type();
assert!(content_type.starts_with("multipart/form-data; boundary="));
assert!(content_type.contains(&form.boundary));
}
#[test]
fn test_empty_form_body() {
let form = Form::new();
let body = form.body();
let body_str = String::from_utf8_lossy(&body);
// Should only contain final boundary for empty form
assert_eq!(body_str, format!("--{}--\r\n", form.boundary));
}
#[test]
fn test_form_with_file_upload() {
// Create a temporary file for testing
let temp_dir = std::env::temp_dir();
let test_file = temp_dir.join("form_test_upload.txt");
std::fs::write(&test_file, b"file content for form").unwrap();
let mut form = Form::new();
// Parse and add file upload part
let form_str = format!("upload=@{}", test_file.display());
let file_part: FormPart = form_str.parse().unwrap();
form.add_part(file_part);
let body = form.body();
let body_str = String::from_utf8_lossy(&body);
// Check file upload formatting
assert!(body_str.contains(
"Content-Disposition: form-data; name=\"upload\"; filename=\"form_test_upload.txt\""
));
assert!(body_str.contains("file content for form"));
// Clean up
std::fs::remove_file(&test_file).ok();
}
#[test]
fn test_boundary_generation_is_random() {
let form1 = Form::new();
let form2 = Form::new();
// Boundaries should be different for different forms
assert_ne!(form1.boundary, form2.boundary);
// Boundaries should follow the expected format
assert!(form1.boundary.starts_with("----formdata-oha-"));
assert!(form2.boundary.starts_with("----formdata-oha-"));
// Boundaries should have the expected length (prefix + 32 hex chars)
assert_eq!(form1.boundary.len(), "----formdata-oha-".len() + 32);
assert_eq!(form2.boundary.len(), "----formdata-oha-".len() + 32);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/monitor.rs | src/monitor.rs | use byte_unit::Byte;
use crossterm::event::{Event, KeyCode, KeyEvent, KeyModifiers};
use hyper::http;
use ratatui::{DefaultTerminal, crossterm};
use ratatui::{
layout::{Constraint, Direction, Layout},
style::{Color, Style},
text::{Line, Span},
widgets::{BarChart, Block, Borders, Gauge, Paragraph},
};
use std::collections::BTreeMap;
use crate::{
client::{ClientError, RequestResult},
printer::PrintConfig,
result_data::{MinMaxMean, ResultData},
timescale::{TimeLabel, TimeScale},
};
/// When the monitor ends
pub enum EndLine {
/// After a duration
Duration(std::time::Duration),
/// After n query done
NumQuery(usize),
}
struct ColorScheme {
light_blue: Option<Color>,
green: Option<Color>,
yellow: Option<Color>,
}
impl ColorScheme {
fn new() -> ColorScheme {
ColorScheme {
light_blue: None,
green: None,
yellow: None,
}
}
fn set_colors(&mut self) {
self.light_blue = Some(Color::Cyan);
self.green = Some(Color::Green);
self.yellow = Some(Color::Yellow);
}
}
pub struct Monitor {
pub print_config: PrintConfig,
pub end_line: EndLine,
/// All workers sends each result to this channel
pub report_receiver: kanal::Receiver<Result<RequestResult, ClientError>>,
// When started
pub start: std::time::Instant,
// Frame per second of TUI
pub fps: usize,
pub disable_color: bool,
pub time_unit: Option<TimeScale>,
}
struct IntoRawMode;
impl IntoRawMode {
pub fn new() -> Result<(Self, DefaultTerminal), std::io::Error> {
let terminal = ratatui::try_init()?;
Ok((Self, terminal))
}
}
impl Drop for IntoRawMode {
fn drop(&mut self) {
ratatui::restore();
}
}
impl Monitor {
pub async fn monitor(self) -> Result<(ResultData, PrintConfig), std::io::Error> {
let (raw_mode, mut terminal) = IntoRawMode::new()?;
// Return this when ends to application print summary
// We must not read all data from this due to computational cost.
let mut all: ResultData = Default::default();
// stats for HTTP status
let mut status_dist: BTreeMap<http::StatusCode, usize> = Default::default();
#[cfg(unix)]
// Limit for number open files. eg. ulimit -n
let nofile_limit = rlimit::getrlimit(rlimit::Resource::NOFILE);
// None means auto timescale which depends on how long it takes
let mut timescale_auto = self.time_unit;
let mut colors = ColorScheme::new();
if !self.disable_color {
colors.set_colors();
}
let mut buf = Vec::new();
loop {
let frame_start = std::time::Instant::now();
let is_disconnected = self.report_receiver.is_disconnected();
let _ = self.report_receiver.drain_into(&mut buf);
for report in buf.drain(..) {
if let Ok(report) = report.as_ref() {
*status_dist.entry(report.status).or_default() += 1;
}
all.push(report);
}
if is_disconnected {
break;
}
let now = std::time::Instant::now();
let progress = match &self.end_line {
EndLine::Duration(d) => {
((now - self.start).as_secs_f64() / d.as_secs_f64()).clamp(0.0, 1.0)
}
EndLine::NumQuery(n) => (all.len() as f64 / *n as f64).clamp(0.0, 1.0),
};
let count = 32;
// Make ms smallest timescale viewable for TUI
let timescale = (if let Some(timescale) = timescale_auto {
timescale
} else {
TimeScale::from_elapsed(self.start.elapsed())
})
.max(TimeScale::Millisecond);
let bin = timescale.as_secs_f64();
let mut bar_num_req = vec![0u64; count];
let short_bin = (now - self.start).as_secs_f64() % bin;
for r in all.success().iter().rev() {
let past = (now - r.end).as_secs_f64();
let i = if past <= short_bin {
0
} else {
1 + ((past - short_bin) / bin) as usize
};
if i >= bar_num_req.len() {
break;
}
bar_num_req[i] += 1;
}
let cols = bar_num_req
.iter()
.map(|x| x.to_string().chars().count())
.max()
.unwrap_or(0);
let bar_num_req: Vec<(String, u64)> = bar_num_req
.into_iter()
.enumerate()
.map(|(i, n)| {
(
{
let mut s = TimeLabel { x: i, timescale }.to_string();
if cols > s.len() {
for _ in 0..cols - s.len() {
s.push(' ');
}
}
s
},
n,
)
})
.collect();
let bar_num_req_str: Vec<(&str, u64)> =
bar_num_req.iter().map(|(a, b)| (a.as_str(), *b)).collect();
#[cfg(unix)]
let nofile = std::fs::read_dir("/dev/fd").map(|dir| dir.count());
terminal.draw(|f| {
let row4 = Layout::default()
.direction(Direction::Vertical)
.constraints(
[
Constraint::Length(3),
Constraint::Length(8),
Constraint::Length(all.error_distribution().len() as u16 + 2),
Constraint::Fill(1),
]
.as_ref(),
)
.split(f.area());
let mid = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row4[1]);
let bottom = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(50), Constraint::Percentage(50)].as_ref())
.split(row4[3]);
let gauge_label = match &self.end_line {
EndLine::Duration(d) => format!(
"{} / {}",
humantime::Duration::from(std::time::Duration::from_secs(
(now - self.start).as_secs_f64() as u64
)),
humantime::Duration::from(*d)
),
EndLine::NumQuery(n) => format!("{} / {}", all.len(), n),
};
let gauge = Gauge::default()
.block(Block::default().title("Progress").borders(Borders::ALL))
.gauge_style(Style::default().fg(colors.light_blue.unwrap_or(Color::White)))
.label(Span::raw(gauge_label))
.ratio(progress);
f.render_widget(gauge, row4[0]);
let last_1_timescale = {
let success = all.success();
let index = match success.binary_search_by(|probe| {
(now - probe.end)
.as_secs_f64()
.partial_cmp(×cale.as_secs_f64())
// Should be fine
.unwrap()
.reverse()
}) {
Ok(i) => i,
Err(i) => i,
};
&success[index..]
};
let last_1_minmaxmean: MinMaxMean = last_1_timescale
.iter()
.map(|r| r.duration().as_secs_f64())
.collect();
let stats_text = vec![
Line::from(format!("Requests : {}", last_1_timescale.len())),
Line::from(vec![Span::styled(
format!("Slowest: {:.4} secs", last_1_minmaxmean.max(),),
Style::default().fg(colors.yellow.unwrap_or(Color::Reset)),
)]),
Line::from(vec![Span::styled(
format!("Fastest: {:.4} secs", last_1_minmaxmean.min(),),
Style::default().fg(colors.green.unwrap_or(Color::Reset)),
)]),
Line::from(vec![Span::styled(
format!("Average: {:.4} secs", last_1_minmaxmean.mean(),),
Style::default().fg(colors.light_blue.unwrap_or(Color::Reset)),
)]),
Line::from(format!(
"Data: {:.2}",
Byte::from_u64(
last_1_timescale
.iter()
.map(|r| r.len_bytes as u64)
.sum::<u64>()
)
.get_appropriate_unit(byte_unit::UnitType::Binary)
)),
#[cfg(unix)]
// Note: Windows can open 255 * 255 * 255 files. So not showing on windows is OK.
Line::from(format!(
"Number of open files: {} / {}",
nofile
.map(|c| c.to_string())
.unwrap_or_else(|_| "Error".to_string()),
nofile_limit
.as_ref()
.map(|(s, _h)| s.to_string())
.unwrap_or_else(|_| "Unknown".to_string())
)),
];
let stats_title = format!("Stats for last {timescale}");
let stats = Paragraph::new(stats_text).block(
Block::default()
.title(Span::raw(stats_title))
.borders(Borders::ALL),
);
f.render_widget(stats, mid[0]);
let mut status_v: Vec<(http::StatusCode, usize)> =
status_dist.clone().into_iter().collect();
status_v.sort_by_key(|t| std::cmp::Reverse(t.1));
let stats2_text = status_v
.into_iter()
.map(|(status, count)| {
Line::from(format!("[{}] {} responses", status.as_str(), count))
})
.collect::<Vec<_>>();
let stats2 = Paragraph::new(stats2_text).block(
Block::default()
.title("Status code distribution")
.borders(Borders::ALL),
);
f.render_widget(stats2, mid[1]);
let mut error_v: Vec<(String, usize)> =
all.error_distribution().clone().into_iter().collect();
error_v.sort_by_key(|t| std::cmp::Reverse(t.1));
let errors_text = error_v
.into_iter()
.map(|(e, count)| Line::from(format!("[{count}] {e}")))
.collect::<Vec<_>>();
let errors = Paragraph::new(errors_text).block(
Block::default()
.title("Error distribution")
.borders(Borders::ALL),
);
f.render_widget(errors, row4[2]);
let title = format!(
"Requests / past {}{}. press -/+/a to change",
timescale,
if timescale_auto.is_none() {
" (auto)"
} else {
""
}
);
let barchart = BarChart::default()
.block(
Block::default()
.title(Span::raw(title))
.style(
Style::default()
.fg(colors.green.unwrap_or(Color::Reset))
.bg(Color::Reset),
)
.borders(Borders::ALL),
)
.data(bar_num_req_str.as_slice())
.bar_width(
bar_num_req
.iter()
.map(|(s, _)| s.chars().count())
.max()
.map(|w| w + 2)
.unwrap_or(1) as u16,
);
f.render_widget(barchart, bottom[0]);
let resp_histo_width = 7;
let resp_histo_data: Vec<(String, u64)> = {
let bins = if bottom[1].width < 2 {
0
} else {
(bottom[1].width as usize - 2) / (resp_histo_width + 1)
}
.max(2);
let values = last_1_timescale
.iter()
.map(|r| r.duration().as_secs_f64())
.collect::<Vec<_>>();
let histo = crate::histogram::histogram(&values, bins);
histo
.into_iter()
.map(|(label, v)| (format!("{label:.4}"), v as u64))
.collect()
};
let resp_histo_data_str: Vec<(&str, u64)> = resp_histo_data
.iter()
.map(|(l, v)| (l.as_str(), *v))
.collect();
let resp_histo = BarChart::default()
.block(
Block::default()
.title("Response time histogram")
.style(
Style::default()
.fg(colors.yellow.unwrap_or(Color::Reset))
.bg(Color::Reset),
)
.borders(Borders::ALL),
)
.data(resp_histo_data_str.as_slice())
.bar_width(resp_histo_width as u16);
f.render_widget(resp_histo, bottom[1]);
})?;
while crossterm::event::poll(std::time::Duration::from_secs(0))? {
match crossterm::event::read()? {
Event::Key(KeyEvent {
code: KeyCode::Char('+'),
..
}) => {
// Make ms the smallest timescale viewable in TUI
timescale_auto = Some(timescale.dec().max(TimeScale::Millisecond))
}
Event::Key(KeyEvent {
code: KeyCode::Char('-'),
..
}) => timescale_auto = Some(timescale.inc()),
Event::Key(KeyEvent {
code: KeyCode::Char('a'),
..
}) => {
if timescale_auto.is_some() {
timescale_auto = None;
} else {
timescale_auto = Some(timescale)
}
}
// User pressed q or ctrl-c
Event::Key(KeyEvent {
code: KeyCode::Char('q'),
..
})
| Event::Key(KeyEvent {
code: KeyCode::Char('c'),
modifiers: KeyModifiers::CONTROL,
..
}) => {
drop(terminal);
drop(raw_mode);
let _ = crate::printer::print_result(
self.print_config,
self.start,
&all,
now - self.start,
);
std::process::exit(libc::EXIT_SUCCESS);
}
_ => (),
}
}
let per_frame = std::time::Duration::from_secs(1) / self.fps as u32;
let elapsed = frame_start.elapsed();
if per_frame > elapsed {
tokio::time::sleep(per_frame - elapsed).await;
}
}
Ok((all, self.print_config))
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/result_data.rs | src/result_data.rs | use std::{
collections::BTreeMap,
time::{Duration, Instant},
};
use average::{Estimate, Max, Mean, Min, concatenate};
use hyper::StatusCode;
use crate::{
client::{ClientError, RequestResult},
histogram::histogram,
};
/// Data container for the results of the all requests
/// When a request is successful, the result is pushed to the `success` vector and the memory consumption will not be a problem because the number of successful requests is limited by network overhead.
/// When a request fails, the error message is pushed to the `error` map because the number of error messages may huge.
#[derive(Debug, Default)]
pub struct ResultData {
success: Vec<RequestResult>,
error_distribution: BTreeMap<String, usize>,
}
concatenate!(pub MinMaxMean, [Min, min], [Max, max], [Mean, mean]);
pub struct Statistics {
pub percentiles: Vec<(f64, f64)>,
pub histogram: Vec<(f64, usize)>,
}
impl Statistics {
/* private */
fn new(data: &mut [f64]) -> Self {
float_ord::sort(data);
Self {
percentiles: percentile_iter(data).collect(),
histogram: histogram(data, 11),
}
}
}
fn percentile_iter(values: &mut [f64]) -> impl Iterator<Item = (f64, f64)> + '_ {
float_ord::sort(values);
[10.0, 25.0, 50.0, 75.0, 90.0, 95.0, 99.0, 99.9, 99.99]
.iter()
.map(move |&p| {
let i = (p / 100.0 * values.len() as f64) as usize;
(p, *values.get(i).unwrap_or(&f64::NAN))
})
}
impl ResultData {
#[inline]
pub fn push(&mut self, result: Result<RequestResult, ClientError>) {
match result {
Ok(result) => self.success.push(result),
Err(err) => {
let count = self.error_distribution.entry(err.to_string()).or_insert(0);
*count += 1;
}
}
}
pub fn len(&self) -> usize {
self.success.len() + self.error_distribution.values().sum::<usize>()
}
pub fn merge(&mut self, other: ResultData) {
self.success.extend(other.success);
for (k, v) in other.error_distribution {
let count = self.error_distribution.entry(k).or_insert(0);
*count += v;
}
}
// An existence of this method doesn't prevent us to using hdrhistogram.
// Because this is only called from `monitor` and `monitor` can collect own data.
pub fn success(&self) -> &[RequestResult] {
&self.success
}
// It's very happy if you can provide all below methods without array (= non liner memory consumption) and fast `push` runtime.
pub fn success_rate(&self) -> f64 {
let dead_line = ClientError::Deadline.to_string();
// We ignore deadline errors which are because of `-z` option, not because of the server
let denominator = self.success.len()
+ self
.error_distribution
.iter()
.filter_map(|(k, v)| if k == &dead_line { None } else { Some(v) })
.sum::<usize>();
let numerator = self.success.len();
numerator as f64 / denominator as f64
}
pub fn latency_stat(&self) -> MinMaxMean {
self.success
.iter()
.map(|result| result.duration().as_secs_f64())
.collect()
}
pub fn error_distribution(&self) -> &BTreeMap<String, usize> {
&self.error_distribution
}
pub fn end_times_from_start(&self, start: Instant) -> impl Iterator<Item = Duration> + '_ {
self.success.iter().map(move |result| result.end - start)
}
pub fn status_code_distribution(&self) -> BTreeMap<StatusCode, usize> {
let mut dist = BTreeMap::new();
for result in &self.success {
let count = dist.entry(result.status).or_insert(0);
*count += 1;
}
dist
}
pub fn dns_dialup_stat(&self) -> MinMaxMean {
self.success
.iter()
.filter_map(|r| r.connection_time.map(|ct| ct.dialup.as_secs_f64()))
.collect()
}
pub fn dns_lookup_stat(&self) -> MinMaxMean {
self.success
.iter()
.filter_map(|r| r.connection_time.map(|ct| ct.dns_lookup.as_secs_f64()))
.collect()
}
pub fn total_data(&self) -> usize {
self.success.iter().map(|r| r.len_bytes).sum()
}
pub fn size_per_request(&self) -> Option<u64> {
self.success
.iter()
.map(|r| r.len_bytes as u64)
.sum::<u64>()
.checked_div(self.success.len() as u64)
}
pub fn duration_all_statistics(&self) -> Statistics {
let mut data = self
.success
.iter()
.map(|r| r.duration().as_secs_f64())
.collect::<Vec<_>>();
Statistics::new(&mut data)
}
pub fn duration_successful_statistics(&self) -> Statistics {
let mut data = self
.success
.iter()
.filter(|r| r.status.is_success())
.map(|r| r.duration().as_secs_f64())
.collect::<Vec<_>>();
Statistics::new(&mut data)
}
pub fn duration_not_successful_statistics(&self) -> Statistics {
let mut data = self
.success
.iter()
.filter(|r| !r.status.is_success())
.map(|r| r.duration().as_secs_f64())
.collect::<Vec<_>>();
Statistics::new(&mut data)
}
}
#[cfg(test)]
mod tests {
use float_cmp::assert_approx_eq;
use rand::SeedableRng;
use super::*;
use crate::client::{ClientError, ConnectionTime, RequestResult};
use std::time::{Duration, Instant};
fn build_mock_request_result(
status: StatusCode,
request_time: u64,
connection_time_dns_lookup: u64,
connection_time_dialup: u64,
first_byte: u64,
size: usize,
) -> Result<RequestResult, ClientError> {
let now = Instant::now();
Ok(RequestResult {
rng: SeedableRng::seed_from_u64(0),
start_latency_correction: None,
start: now,
connection_time: Some(ConnectionTime {
dns_lookup: Duration::from_millis(connection_time_dns_lookup),
dialup: Duration::from_millis(connection_time_dialup),
}),
first_byte: Some(now.checked_add(Duration::from_millis(first_byte)).unwrap()),
end: now
.checked_add(Duration::from_millis(request_time))
.unwrap(),
status,
len_bytes: size,
})
}
fn build_mock_request_results() -> ResultData {
let mut results = ResultData::default();
results.push(build_mock_request_result(
StatusCode::OK,
1000,
200,
50,
300,
100,
));
results.push(build_mock_request_result(
StatusCode::BAD_REQUEST,
100000,
250,
100,
400,
200,
));
results.push(build_mock_request_result(
StatusCode::INTERNAL_SERVER_ERROR,
1000000,
300,
150,
500,
300,
));
results
}
#[test]
fn test_calculate_success_rate() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.success_rate(), 1.0);
}
#[test]
fn test_calculate_slowest_request() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.latency_stat().max(), 1000.0);
}
#[test]
fn test_calculate_average_request() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.latency_stat().mean(), 367.0);
}
#[test]
fn test_calculate_total_data() {
let res = build_mock_request_results();
assert_eq!(res.total_data(), 600);
}
#[test]
fn test_calculate_size_per_request() {
let res = build_mock_request_results();
assert_eq!(res.size_per_request(), Some(200));
}
#[test]
fn test_calculate_connection_times_dns_dialup_average() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.dns_dialup_stat().mean(), 0.1);
}
#[test]
fn test_calculate_connection_times_dns_dialup_fastest() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.dns_dialup_stat().min(), 0.05);
}
#[test]
fn test_calculate_connection_times_dns_dialup_slowest() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.dns_dialup_stat().max(), 0.15);
}
#[test]
fn test_calculate_connection_times_dns_lookup_average() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.dns_lookup_stat().mean(), 0.25);
}
#[test]
fn test_calculate_connection_times_dns_lookup_fastest() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.dns_lookup_stat().min(), 0.2);
}
#[test]
fn test_calculate_connection_times_dns_lookup_slowest() {
let res = build_mock_request_results();
assert_approx_eq!(f64, res.dns_lookup_stat().max(), 0.3);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/main.rs | src/main.rs | use clap::Parser;
use oha::{Opts, run};
fn main() {
let num_workers_threads = std::env::var("TOKIO_WORKER_THREADS")
.ok()
.and_then(|s| s.parse().ok())
// Prefer to use physical cores rather than logical one because it's more performant empirically.
.unwrap_or(num_cpus::get_physical());
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(num_workers_threads)
.enable_all()
.build()
.unwrap();
if let Err(e) = rt.block_on(run(Opts::parse())) {
eprintln!("Error: {e}");
std::process::exit(libc::EXIT_FAILURE);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/request_generator.rs | src/request_generator.rs | use std::borrow::Cow;
use bytes::Bytes;
use http_body_util::Full;
use hyper::http;
use hyper::{HeaderMap, Method, Version};
use rand::Rng;
use rand::seq::IndexedRandom;
use thiserror::Error;
use url::Url;
use crate::aws_auth::{self, AwsSignatureConfig};
use crate::url_generator;
pub struct Proxy {
pub headers: HeaderMap,
pub version: Version,
}
pub enum BodyGenerator {
Static(Bytes),
Random(Vec<Bytes>),
}
pub struct RequestGenerator {
pub url_generator: url_generator::UrlGenerator,
pub https: bool,
// Only if http with proxy
pub http_proxy: Option<Proxy>,
pub method: Method,
pub version: Version,
pub headers: HeaderMap,
pub body_generator: BodyGenerator,
pub aws_config: Option<AwsSignatureConfig>,
}
#[derive(Error, Debug)]
pub enum RequestGenerationError {
#[error("URL generation error: {0}")]
UrlGeneration(#[from] url_generator::UrlGeneratorError),
#[error("Request building error: {0}")]
RequestBuild(#[from] http::Error),
#[error("AWS Signature error: {0}")]
AwsSignature(#[from] aws_auth::AwsSignatureError),
}
impl RequestGenerator {
#[inline]
fn is_http1(&self) -> bool {
self.version <= Version::HTTP_11
}
fn generate_body<R: Rng>(&self, rng: &mut R) -> Bytes {
match &self.body_generator {
BodyGenerator::Static(b) => b.clone(),
BodyGenerator::Random(choices) => choices.choose(rng).cloned().unwrap_or_default(),
}
}
pub fn generate<R: Rng>(
&self,
rng: &mut R,
) -> Result<(Cow<'_, Url>, hyper::Request<Full<Bytes>>), RequestGenerationError> {
let url = self.url_generator.generate(rng)?;
let body = self.generate_body(rng);
let mut builder = hyper::Request::builder()
.uri(if !self.is_http1() || self.http_proxy.is_some() {
&url[..]
} else {
&url[url::Position::BeforePath..]
})
.method(self.method.clone())
.version(
self.http_proxy
.as_ref()
.map(|p| p.version)
.unwrap_or(self.version),
);
let mut headers = self.headers.clone();
// Apply AWS SigV4 if configured
if let Some(aws_config) = &self.aws_config {
aws_config.sign_request(self.method.as_str(), &mut headers, &url, &body)?;
}
if let Some(proxy) = &self.http_proxy {
for (key, value) in proxy.headers.iter() {
headers.insert(key, value.clone());
}
}
if self.version < Version::HTTP_2 {
headers
.entry(http::header::HOST)
.or_insert_with(|| http::header::HeaderValue::from_str(url.authority()).unwrap());
}
*builder.headers_mut().unwrap() = headers;
let req = builder.body(Full::new(body))?;
Ok((url, req))
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/url_generator.rs | src/url_generator.rs | use std::{borrow::Cow, string::FromUtf8Error};
use rand::prelude::*;
use rand_regex::Regex;
use thiserror::Error;
use url::{ParseError, Url};
#[derive(Clone, Debug)]
pub enum UrlGenerator {
Static(Url),
MultiStatic(Vec<Url>),
Dynamic(Regex),
}
#[derive(Error, Debug)]
pub enum UrlGeneratorError {
#[error("{0}, generated url: {1}")]
Parse(ParseError, String),
#[error(transparent)]
FromUtf8(#[from] FromUtf8Error),
#[error("No valid URLs found")]
NoURLs(),
#[error(transparent)]
Io(#[from] std::io::Error),
}
impl UrlGenerator {
pub fn new_static(url: Url) -> Self {
Self::Static(url)
}
pub fn new_multi_static(urls: Vec<Url>) -> Self {
assert!(!urls.is_empty());
Self::MultiStatic(urls)
}
pub fn new_dynamic(regex: Regex) -> Self {
Self::Dynamic(regex)
}
pub fn generate<R: Rng>(&self, rng: &mut R) -> Result<Cow<'_, Url>, UrlGeneratorError> {
match self {
Self::Static(url) => Ok(Cow::Borrowed(url)),
Self::MultiStatic(urls) => {
if let Some(random_url) = urls.choose(rng) {
Ok(Cow::Borrowed(random_url))
} else {
Err(UrlGeneratorError::NoURLs())
}
}
Self::Dynamic(regex) => {
let generated = Distribution::<Result<String, FromUtf8Error>>::sample(regex, rng)?;
Ok(Cow::Owned(
Url::parse(generated.as_str())
.map_err(|e| UrlGeneratorError::Parse(e, generated))?,
))
}
}
}
}
#[cfg(test)]
mod tests {
use crate::pcg64si::Pcg64Si;
use super::*;
use rand_regex::Regex as RandRegex;
use regex::Regex;
use std::net::Ipv4Addr;
use url::{Host, Url};
#[test]
fn test_url_generator_static() {
let url_generator = UrlGenerator::new_static(Url::parse("http://127.0.0.1/test").unwrap());
let url = url_generator.generate(&mut rand::rng()).unwrap();
assert_eq!(url.host(), Some(Host::Ipv4(Ipv4Addr::new(127, 0, 0, 1))));
assert_eq!(url.path(), "/test");
}
#[test]
fn test_url_generator_multistatic() {
let urls = [
"http://127.0.0.1/a1",
"http://127.0.0.1/b2",
"http://127.0.0.1/c3",
];
let url_generator =
UrlGenerator::new_multi_static(urls.iter().map(|u| Url::parse(u).unwrap()).collect());
for _ in 0..10 {
let url = url_generator.generate(&mut rand::rng()).unwrap();
assert_eq!(url.host(), Some(Host::Ipv4(Ipv4Addr::new(127, 0, 0, 1))));
assert!(urls.contains(&url.as_str()));
}
}
#[test]
fn test_url_generator_dynamic() {
let path_regex = "/[a-z][a-z][0-9]";
let url_generator = UrlGenerator::new_dynamic(
RandRegex::compile(&format!(r"http://127\.0\.0\.1{path_regex}"), 4).unwrap(),
);
let url = url_generator.generate(&mut rand::rng()).unwrap();
assert_eq!(url.host(), Some(Host::Ipv4(Ipv4Addr::new(127, 0, 0, 1))));
assert!(
Regex::new(path_regex)
.unwrap()
.captures(url.path())
.is_some()
);
}
#[test]
fn test_url_generator_dynamic_consistency() {
let url_generator = UrlGenerator::new_dynamic(
RandRegex::compile(r"http://127\.0\.0\.1/[a-z][a-z][0-9]", 4).unwrap(),
);
for _ in 0..100 {
let rng: Pcg64Si = SeedableRng::from_os_rng();
assert_eq!(
url_generator.generate(&mut rng.clone()).unwrap(),
url_generator.generate(&mut rng.clone()).unwrap()
);
}
}
#[test]
fn test_url_generator_multi_consistency() {
let urls = [
"http://example.com/a1",
"http://example.com/a2",
"http://example.com/a3",
"http://example.com/a4",
"http://example.com/a5",
];
let url_generator =
UrlGenerator::new_multi_static(urls.iter().map(|u| Url::parse(u).unwrap()).collect());
for _ in 0..100 {
let rng: Pcg64Si = SeedableRng::from_os_rng();
assert_eq!(
url_generator.generate(&mut rng.clone()).unwrap(),
url_generator.generate(&mut rng.clone()).unwrap()
);
}
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/src/histogram.rs | src/histogram.rs | pub fn histogram(values: &[f64], bins: usize) -> Vec<(f64, usize)> {
assert!(bins >= 2);
let mut bucket: Vec<usize> = vec![0; bins];
let min = values.iter().collect::<average::Min>().min();
let max = values.iter().collect::<average::Max>().max();
let step = (max - min) / (bins - 1) as f64;
for &v in values {
let i = std::cmp::min(((v - min) / step).ceil() as usize, bins - 1);
bucket[i] += 1;
}
bucket
.into_iter()
.enumerate()
.map(|(i, v)| (min + step * i as f64, v))
.collect()
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_histogram() {
let values1: [f64; 10] = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0];
assert_eq!(
histogram(&values1, 10),
vec![
(1.0, 1),
(2.0, 1),
(3.0, 1),
(4.0, 1),
(5.0, 1),
(6.0, 1),
(7.0, 1),
(8.0, 1),
(9.0, 1),
(10.0, 1)
]
);
assert_eq!(
histogram(&values1, 4),
vec![(1.0, 1), (4.0, 3), (7.0, 3), (10.0, 3)]
);
assert_eq!(
histogram(&values1, 17),
vec![
(1.0, 1),
(1.5625, 0),
(2.125, 1),
(2.6875, 0),
(3.25, 1),
(3.8125, 0),
(4.375, 1),
(4.9375, 0),
(5.5, 1),
(6.0625, 1),
(6.625, 0),
(7.1875, 1),
(7.75, 0),
(8.3125, 1),
(8.875, 0),
(9.4375, 1),
(10.0, 1)
]
);
let values2: [f64; 10] = [1.0, 1.0, 1.0, 1.0, 1.0, 10.0, 10.0, 10.0, 10.0, 10.0];
assert_eq!(
histogram(&values2, 10),
vec![
(1.0, 5),
(2.0, 0),
(3.0, 0),
(4.0, 0),
(5.0, 0),
(6.0, 0),
(7.0, 0),
(8.0, 0),
(9.0, 0),
(10.0, 5)
]
);
assert_eq!(histogram(&values2, 2), vec![(1.0, 5), (10.0, 5)]);
}
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/tests/tests.rs | tests/tests.rs | use std::{
convert::Infallible,
error::Error as StdError,
fs::File,
future::Future,
io::Write,
net::{Ipv6Addr, SocketAddr},
str::FromStr,
sync::{Arc, OnceLock, atomic::AtomicU16},
};
use axum::{Router, extract::Path, response::Redirect, routing::get};
use bytes::Bytes;
use clap::Parser;
use http::{HeaderMap, Request, Response};
use http_body_util::BodyExt;
use http_mitm_proxy::MitmProxy;
use hyper::{
body::{Body, Incoming},
http,
service::{HttpService, service_fn},
};
use hyper_util::rt::{TokioExecutor, TokioIo};
use rstest::rstest;
use rstest_reuse::{self, *};
#[cfg(feature = "http3")]
mod common;
async fn run<'a>(args: impl Iterator<Item = &'a str>) {
let opts = oha::Opts::parse_from(
["oha", "--no-tui", "--output-format", "quiet"]
.into_iter()
.chain(args),
);
oha::run(opts).await.unwrap();
}
// Port 5111- is reserved for testing
static PORT: AtomicU16 = AtomicU16::new(5111);
fn next_port() -> u16 {
PORT.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
#[ctor::ctor]
fn install_crypto_provider() {
static INSTALL: OnceLock<()> = OnceLock::new();
INSTALL.get_or_init(|| {
let _ = rustls::crypto::CryptoProvider::install_default(
rustls::crypto::aws_lc_rs::default_provider(),
);
});
}
async fn bind_port(port: u16) -> tokio::net::TcpListener {
let addr = SocketAddr::new("127.0.0.1".parse().unwrap(), port);
tokio::net::TcpListener::bind(addr).await.unwrap()
}
async fn bind_port_and_increment() -> (tokio::net::TcpListener, u16) {
let port = next_port();
let listener = bind_port(port).await;
(listener, port)
}
async fn bind_port_ipv6(port: u16) -> tokio::net::TcpListener {
let addr = SocketAddr::new(std::net::IpAddr::V6(Ipv6Addr::LOCALHOST), port);
tokio::net::TcpListener::bind(addr).await.unwrap()
}
#[derive(Clone, Copy, PartialEq)]
enum HttpWorkType {
H1,
H2,
#[cfg(feature = "http3")]
H3,
}
fn http_work_type(args: &[&str]) -> HttpWorkType {
// Check for HTTP/2
if args.contains(&"--http2") || args.windows(2).any(|w| w == ["--http-version", "2"]) {
return HttpWorkType::H2;
}
// Check for HTTP/3 when the feature is enabled
#[cfg(feature = "http3")]
if args.contains(&"--http3") || args.windows(2).any(|w| w == ["--http-version", "3"]) {
return HttpWorkType::H3;
}
// Default to HTTP/1.1
HttpWorkType::H1
}
#[cfg(feature = "http3")]
#[template]
#[rstest]
#[case("1.1")]
#[case("2")]
#[case("3")]
fn test_all_http_versions(#[case] http_version_param: &str) {}
#[cfg(not(feature = "http3"))]
#[template]
#[rstest]
#[case("1.1")]
#[case("2")]
fn test_all_http_versions(#[case] http_version_param: &str) {}
async fn get_req(path: &str, args: &[&str]) -> Request<Bytes> {
let (tx, rx) = kanal::unbounded();
let port = next_port();
let work_type = http_work_type(args);
let listener = bind_port(port).await;
tokio::spawn(async move {
match work_type {
HttpWorkType::H2 => loop {
let (tcp, _) = listener.accept().await.unwrap();
let tx = tx.clone();
let _ = hyper::server::conn::http2::Builder::new(TokioExecutor::new())
.serve_connection(
TokioIo::new(tcp),
service_fn(move |req: Request<Incoming>| {
let tx = tx.clone();
async move {
let (parts, body) = req.into_parts();
let body_bytes = body.collect().await.unwrap().to_bytes();
let req = Request::from_parts(parts, body_bytes);
tx.send(req).unwrap();
Ok::<_, Infallible>(Response::new("Hello World".to_string()))
}
}),
)
.await;
},
HttpWorkType::H1 => {
let (tcp, _) = listener.accept().await.unwrap();
hyper::server::conn::http1::Builder::new()
.serve_connection(
TokioIo::new(tcp),
service_fn(move |req: Request<Incoming>| {
let tx = tx.clone();
async move {
let (parts, body) = req.into_parts();
let body_bytes = body.collect().await.unwrap().to_bytes();
let req = Request::from_parts(parts, body_bytes);
tx.send(req).unwrap();
Ok::<_, Infallible>(Response::new("Hello World".to_string()))
}
}),
)
.await
.unwrap();
}
#[cfg(feature = "http3")]
HttpWorkType::H3 => {
drop(listener);
common::h3_server(tx, port).await.unwrap();
}
}
});
let mut args = args.iter().map(|s| s.to_string()).collect::<Vec<String>>();
args.push("-n".to_string());
args.push("1".to_string());
match work_type {
HttpWorkType::H1 | HttpWorkType::H2 => {
args.push(format!("http://127.0.0.1:{port}{path}"));
}
#[cfg(feature = "http3")]
HttpWorkType::H3 => {
args.push("--insecure".to_string());
args.push(format!("https://127.0.0.1:{port}{path}"));
}
}
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().unwrap()
}
async fn redirect(n: usize, is_relative: bool, limit: usize) -> bool {
let (tx, rx) = kanal::unbounded();
let (listener, port) = bind_port_and_increment().await;
let app = Router::new().route(
"/{n}",
get(move |Path(x): Path<usize>| async move {
Ok::<_, Infallible>(if x == n {
tx.send(()).unwrap();
Redirect::permanent("/end")
} else if is_relative {
Redirect::permanent(&format!("/{}", x + 1))
} else {
Redirect::permanent(&format!("http://localhost:{}/{}", port, x + 1))
})
}),
);
tokio::spawn(async { axum::serve(listener, app).await });
let args = [
"-n".to_string(),
"1".to_string(),
"--redirect".to_string(),
limit.to_string(),
format!("http://127.0.0.1:{port}/0"),
];
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().is_some()
}
async fn get_host_with_connect_to(host: &'static str) -> String {
let (tx, rx) = kanal::unbounded();
let app = Router::new().route(
"/",
get(|header: HeaderMap| async move {
tx.send(header.get("host").unwrap().to_str().unwrap().to_string())
.unwrap();
"Hello World"
}),
);
let (listener, port) = bind_port_and_increment().await;
tokio::spawn(async { axum::serve(listener, app).await });
let args = [
"-n".to_string(),
"1".to_string(),
format!("http://{host}/"),
"--connect-to".to_string(),
format!("{host}:80:localhost:{port}"),
];
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().unwrap()
}
async fn get_host_with_connect_to_ipv6_target(host: &'static str) -> String {
let (tx, rx) = kanal::unbounded();
let app = Router::new().route(
"/",
get(|header: HeaderMap| async move {
tx.send(header.get("host").unwrap().to_str().unwrap().to_string())
.unwrap();
"Hello World"
}),
);
let port = next_port();
let listener = bind_port_ipv6(port).await;
tokio::spawn(async { axum::serve(listener, app).await });
let args = [
"-n".to_string(),
"1".to_string(),
format!("http://{host}/"),
"--connect-to".to_string(),
format!("{host}:80:[::1]:{port}"),
];
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().unwrap()
}
async fn get_host_with_connect_to_ipv6_requested() -> String {
let (tx, rx) = kanal::unbounded();
let app = Router::new().route(
"/",
get(|header: HeaderMap| async move {
tx.send(header.get("host").unwrap().to_str().unwrap().to_string())
.unwrap();
"Hello World"
}),
);
let (listener, port) = bind_port_and_increment().await;
tokio::spawn(async { axum::serve(listener, app).await });
let args = [
"-n".to_string(),
"1".to_string(),
"http://[::1]/".to_string(),
"--connect-to".to_string(),
format!("[::1]:80:localhost:{port}"),
];
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().unwrap()
}
async fn get_host_with_connect_to_redirect(host: &'static str) -> String {
let (tx, rx) = kanal::unbounded();
let app = Router::new()
.route(
"/source",
get(move || async move { Redirect::permanent(&format!("http://{host}/destination")) }),
)
.route(
"/destination",
get(move || async move {
tx.send(host.to_string()).unwrap();
"Hello World"
}),
);
let (listener, port) = bind_port_and_increment().await;
tokio::spawn(async { axum::serve(listener, app).await });
let args = [
"-n".to_string(),
"1".to_string(),
format!("http://{host}/source"),
"--connect-to".to_string(),
format!("{host}:80:localhost:{port}"),
];
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().unwrap()
}
async fn test_request_count(args: &[&str]) -> usize {
let (tx, rx) = kanal::unbounded();
let app = Router::new().route(
"/",
get(|| async move {
tx.send(()).unwrap();
"Success"
}),
);
let (listener, port) = bind_port_and_increment().await;
tokio::spawn(async { axum::serve(listener, app).await });
let mut args: Vec<String> = args.iter().map(|s| s.to_string()).collect();
args.push(format!("http://127.0.0.1:{port}"));
run(args.iter().map(|s| s.as_str())).await;
let mut count = 0;
while let Ok(Some(())) = rx.try_recv() {
count += 1;
}
count
}
// Randomly spread 100 requests on two matching --connect-to targets, and return a count for each
async fn distribution_on_two_matching_connect_to(host: &'static str) -> (i32, i32) {
let (tx1, rx1) = kanal::unbounded();
let (tx2, rx2) = kanal::unbounded();
let app1 = Router::new().route(
"/",
get(move || async move {
tx1.send(()).unwrap();
"Success1"
}),
);
let app2 = Router::new().route(
"/",
get(move || async move {
tx2.send(()).unwrap();
"Success2"
}),
);
let (listener1, port1) = bind_port_and_increment().await;
tokio::spawn(async { axum::serve(listener1, app1).await });
let (listener2, port2) = bind_port_and_increment().await;
tokio::spawn(async { axum::serve(listener2, app2).await });
let args = [
"--disable-keepalive".to_string(),
"-n".to_string(),
"100".to_string(),
format!("http://{host}/"),
"--connect-to".to_string(),
format!("{host}:80:localhost:{port1}"),
"--connect-to".to_string(),
format!("{host}:80:localhost:{port2}"),
];
run(args.iter().map(|s| s.as_str())).await;
let mut count1 = 0;
let mut count2 = 0;
loop {
if rx1.try_recv().unwrap().is_some() {
count1 += 1;
} else if rx2.try_recv().unwrap().is_some() {
count2 += 1;
} else {
break;
}
}
(count1, count2)
}
#[apply(test_all_http_versions)]
#[tokio::test(flavor = "multi_thread")]
async fn test_enable_compression_default(http_version_param: &str) {
let req = get_req("/", &["--http-version", http_version_param]).await;
let accept_encoding: Vec<&str> = req
.headers()
.get("accept-encoding")
.unwrap()
.to_str()
.unwrap()
.split(", ")
.collect();
assert!(accept_encoding.contains(&"gzip"));
assert!(accept_encoding.contains(&"br"));
}
#[apply(test_all_http_versions)]
#[tokio::test(flavor = "multi_thread")]
async fn test_setting_custom_header(http_version_param: &str) {
let req = get_req(
"/",
&["--http-version", http_version_param, "-H", "foo: bar"],
)
.await;
assert_eq!(req.headers().get("foo").unwrap().to_str().unwrap(), "bar");
}
#[tokio::test(flavor = "multi_thread")]
#[apply(test_all_http_versions)]
async fn test_setting_accept_header(http_version_param: &str) {
let req = get_req(
"/",
&["-A", "text/html", "--http-version", http_version_param],
)
.await;
assert_eq!(
req.headers().get("accept").unwrap().to_str().unwrap(),
"text/html"
);
let req = get_req(
"/",
&[
"-H",
"accept:text/html",
"--http-version",
http_version_param,
],
)
.await;
assert_eq!(
req.headers().get("accept").unwrap().to_str().unwrap(),
"text/html"
);
}
#[tokio::test(flavor = "multi_thread")]
#[apply(test_all_http_versions)]
async fn test_setting_body(http_version_param: &str) {
let req = get_req(
"/",
&["-d", "hello body", "--http-version", http_version_param],
)
.await;
assert_eq!(
req.into_body(),
&b"hello body"[..] /* This looks dirty... Any suggestion? */
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_setting_content_type_header() {
let req = get_req("/", &["-T", "text/html"]).await;
assert_eq!(
req.headers().get("content-type").unwrap().to_str().unwrap(),
"text/html"
);
let req = get_req("/", &["-H", "content-type:text/html"]).await;
assert_eq!(
req.headers().get("content-type").unwrap().to_str().unwrap(),
"text/html"
);
let req = get_req("/", &["--http2", "-T", "text/html"]).await;
assert_eq!(
req.headers().get("content-type").unwrap().to_str().unwrap(),
"text/html"
);
let req = get_req("/", &["--http2", "-H", "content-type:text/html"]).await;
assert_eq!(
req.headers().get("content-type").unwrap().to_str().unwrap(),
"text/html"
);
}
#[apply(test_all_http_versions)]
#[tokio::test(flavor = "multi_thread")]
async fn test_setting_basic_auth(http_version_param: &str) {
let req = get_req(
"/",
&["-a", "hatoo:pass", "--http-version", http_version_param],
)
.await;
assert_eq!(
req.headers()
.get("authorization")
.unwrap()
.to_str()
.unwrap(),
"Basic aGF0b286cGFzcw=="
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_setting_host() {
let req = get_req("/", &["--host", "hatoo.io"]).await;
assert_eq!(
req.headers().get("host").unwrap().to_str().unwrap(),
"hatoo.io"
);
let req = get_req("/", &["-H", "host:hatoo.io"]).await;
assert_eq!(
req.headers().get("host").unwrap().to_str().unwrap(),
"hatoo.io"
);
// You shouldn't set host header when using HTTP/2
// Use --connect-to instead
}
#[tokio::test(flavor = "multi_thread")]
async fn test_setting_method() {
assert_eq!(get_req("/", &[]).await.method(), http::method::Method::GET);
assert_eq!(
get_req("/", &["-m", "GET"]).await.method(),
http::method::Method::GET
);
assert_eq!(
get_req("/", &["-m", "POST"]).await.method(),
http::method::Method::POST
);
assert_eq!(
get_req("/", &["-m", "CONNECT"]).await.method(),
http::method::Method::CONNECT
);
assert_eq!(
get_req("/", &["-m", "DELETE"]).await.method(),
http::method::Method::DELETE
);
assert_eq!(
get_req("/", &["-m", "HEAD"]).await.method(),
http::method::Method::HEAD
);
assert_eq!(
get_req("/", &["-m", "OPTIONS"]).await.method(),
http::method::Method::OPTIONS
);
assert_eq!(
get_req("/", &["-m", "PATCH"]).await.method(),
http::method::Method::PATCH
);
assert_eq!(
get_req("/", &["-m", "PUT"]).await.method(),
http::method::Method::PUT
);
assert_eq!(
get_req("/", &["-m", "TRACE"]).await.method(),
http::method::Method::TRACE
);
assert_eq!(
get_req("/", &["--http2"]).await.method(),
http::method::Method::GET
);
assert_eq!(
get_req("/", &["--http2", "-m", "GET"]).await.method(),
http::method::Method::GET
);
assert_eq!(
get_req("/", &["--http2", "-m", "POST"]).await.method(),
http::method::Method::POST
);
assert_eq!(
get_req("/", &["--http2", "-m", "DELETE"]).await.method(),
http::method::Method::DELETE
);
assert_eq!(
get_req("/", &["--http2", "-m", "HEAD"]).await.method(),
http::method::Method::HEAD
);
assert_eq!(
get_req("/", &["--http2", "-m", "OPTIONS"]).await.method(),
http::method::Method::OPTIONS
);
assert_eq!(
get_req("/", &["--http2", "-m", "PATCH"]).await.method(),
http::method::Method::PATCH
);
assert_eq!(
get_req("/", &["--http2", "-m", "PUT"]).await.method(),
http::method::Method::PUT
);
assert_eq!(
get_req("/", &["--http2", "-m", "TRACE"]).await.method(),
http::method::Method::TRACE
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_query() {
assert_eq!(
get_req("/index?a=b&c=d", &[]).await.uri().to_string(),
"/index?a=b&c=d".to_string()
);
assert_eq!(
get_req("/index?a=b&c=d", &["--http2"])
.await
.uri()
.to_string()
.split('/')
.next_back()
.unwrap(),
"index?a=b&c=d".to_string()
);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_query_rand_regex() {
let req = get_req("/[a-z][0-9][a-z]", &["--rand-regex-url"]).await;
let chars = req
.uri()
.to_string()
.trim_start_matches('/')
.chars()
.collect::<Vec<char>>();
assert_eq!(chars.len(), 3);
assert!(chars[0].is_ascii_lowercase());
assert!(chars[1].is_ascii_digit());
assert!(chars[2].is_ascii_lowercase());
let req = get_req("/[a-z][0-9][a-z]", &["--http2", "--rand-regex-url"]).await;
let chars = req
.uri()
.to_string()
.split('/')
.next_back()
.unwrap()
.chars()
.collect::<Vec<char>>();
assert_eq!(chars.len(), 3);
assert!(chars[0].is_ascii_lowercase());
assert!(chars[1].is_ascii_digit());
assert!(chars[2].is_ascii_lowercase());
}
#[tokio::test(flavor = "multi_thread")]
async fn test_redirect() {
for n in 1..=5 {
assert!(redirect(n, true, 10).await);
assert!(redirect(n, false, 10).await);
}
for n in 11..=15 {
assert!(!redirect(n, true, 10).await);
assert!(!redirect(n, false, 10).await);
}
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to() {
assert_eq!(
get_host_with_connect_to("invalid.example.org").await,
"invalid.example.org"
)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to_randomness() {
let (count1, count2) = distribution_on_two_matching_connect_to("invalid.example.org").await;
assert!(count1 + count2 == 100);
assert!(count1 >= 10 && count2 >= 10); // should not be too flaky with 100 coin tosses
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to_ipv6_target() {
assert_eq!(
get_host_with_connect_to_ipv6_target("invalid.example.org").await,
"invalid.example.org"
)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to_ipv6_requested() {
assert_eq!(get_host_with_connect_to_ipv6_requested().await, "[::1]")
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to_redirect() {
assert_eq!(
get_host_with_connect_to_redirect("invalid.example.org").await,
"invalid.example.org"
)
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to_http_proxy_override() {
let (tx, rx) = kanal::unbounded();
let proxy_port = PORT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let listener = tokio::net::TcpListener::bind(("127.0.0.1", proxy_port))
.await
.unwrap();
tokio::spawn(async move {
let (stream, _) = listener.accept().await.unwrap();
let tx = tx.clone();
hyper::server::conn::http1::Builder::new()
.preserve_header_case(true)
.title_case_headers(true)
.serve_connection(
TokioIo::new(stream),
service_fn(move |req: Request<Incoming>| {
let tx = tx.clone();
async move {
let authority = req
.uri()
.authority()
.map(|a| a.to_string())
.expect("proxy received origin-form request");
let host = req
.headers()
.get("host")
.and_then(|v| v.to_str().ok())
.map(|s| s.to_string())
.unwrap_or_default();
tx.send((authority, host)).unwrap();
Ok::<_, Infallible>(Response::new("proxy".to_string()))
}
}),
)
.await
.unwrap();
});
let override_port = PORT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let args = [
"-n".to_string(),
"1".to_string(),
"-x".to_string(),
format!("http://127.0.0.1:{proxy_port}"),
"--connect-to".to_string(),
format!("example.test:80:127.0.0.1:{override_port}"),
"http://example.test/".to_string(),
];
run(args.iter().map(|s| s.as_str())).await;
let (authority, host) = rx.try_recv().unwrap().unwrap();
assert_eq!(authority, format!("127.0.0.1:{override_port}"));
assert_eq!(host, "example.test");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_connect_to_https_proxy_connect_override() {
let (connect_tx, connect_rx) = kanal::unbounded();
let (host_tx, host_rx) = kanal::unbounded();
let service = service_fn(move |req: Request<Incoming>| {
let host_tx = host_tx.clone();
async move {
let host = req
.headers()
.get("host")
.and_then(|h| h.to_str().ok())
.map(|s| s.to_string())
.unwrap_or_default();
host_tx.send(host).unwrap();
Ok::<_, Infallible>(Response::new("Hello World".to_string()))
}
});
let (proxy_port, proxy_serve) =
bind_proxy_with_recorder(service, false, connect_tx.clone()).await;
tokio::spawn(proxy_serve);
let override_port = PORT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let args = vec![
"-n".to_string(),
"1".to_string(),
"--insecure".to_string(),
"-x".to_string(),
format!("http://127.0.0.1:{proxy_port}"),
"--proxy-header".to_string(),
"proxy-authorization: test".to_string(),
"--connect-to".to_string(),
format!("example.test:443:127.0.0.1:{override_port}"),
"https://example.test/".to_string(),
];
run(args.iter().map(|s| s.as_str())).await;
let connect_target = connect_rx.try_recv().unwrap().unwrap();
assert_eq!(connect_target, format!("127.0.0.1:{override_port}"));
let host_header = host_rx.try_recv().unwrap().unwrap();
assert_eq!(host_header, "example.test");
}
#[tokio::test(flavor = "multi_thread")]
async fn test_ipv6() {
let (tx, rx) = kanal::unbounded();
let app = Router::new().route(
"/",
get(|| async move {
tx.send(()).unwrap();
"Hello World"
}),
);
let port = next_port();
let listener = bind_port_ipv6(port).await;
tokio::spawn(async { axum::serve(listener, app).await });
let args = [
"-n".to_string(),
"1".to_string(),
format!("http://[::1]:{port}/"),
];
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().unwrap();
}
#[tokio::test(flavor = "multi_thread")]
async fn test_query_limit() {
// burst 10 requests with delay of 2s and rate of 4
let mut args = vec!["-n", "10", "--burst-delay", "2s", "--burst-rate", "4"];
assert_eq!(test_request_count(args.as_slice()).await, 10);
args.push("--http2");
assert_eq!(test_request_count(args.as_slice()).await, 10);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_query_limit_with_time_limit() {
// 1.75 qps for 2sec = expect 4 requests at times 0, 0.571, 1.142, 1,714sec
assert_eq!(test_request_count(&["-z", "2s", "-q", "1.75"]).await, 4);
}
#[tokio::test(flavor = "multi_thread")]
async fn test_http_versions() {
assert_eq!(get_req("/", &[]).await.version(), http::Version::HTTP_11);
assert_eq!(
get_req("/", &["--http2"]).await.version(),
http::Version::HTTP_2
);
assert_eq!(
get_req("/", &["--http-version", "2"]).await.version(),
http::Version::HTTP_2
);
#[cfg(feature = "http3")]
assert_eq!(
get_req("/", &["--http-version", "3"]).await.version(),
http::Version::HTTP_3
);
}
#[cfg(unix)]
#[tokio::test(flavor = "multi_thread")]
async fn test_unix_socket() {
let (tx, rx) = kanal::unbounded();
let tmp = tempfile::tempdir().unwrap();
let path = tmp.path().join("socket");
let listener = std::os::unix::net::UnixListener::bind(&path).unwrap();
tokio::spawn(async move {
actix_web::HttpServer::new(move || {
let tx = actix_web::web::Data::new(tx.clone());
actix_web::App::new().service(actix_web::web::resource("/").to(move || {
let tx = tx.clone();
async move {
tx.send(()).unwrap();
"Hello World"
}
}))
})
.listen_uds(listener)
.unwrap()
.run()
.await
.unwrap();
});
let args = [
"-n".to_string(),
"1".to_string(),
"--unix-socket".to_string(),
path.to_str().unwrap().to_string(),
"http://unix-socket.invalid-tld/".to_string(),
];
run(args.iter().map(|s| s.as_str())).await;
rx.try_recv().unwrap().unwrap();
}
fn make_root_issuer() -> rcgen::Issuer<'static, rcgen::KeyPair> {
let mut params = rcgen::CertificateParams::default();
params.distinguished_name = rcgen::DistinguishedName::new();
params.distinguished_name.push(
rcgen::DnType::CommonName,
rcgen::DnValue::Utf8String("<HTTP-MITM-PROXY CA>".to_string()),
);
params.key_usages = vec![
rcgen::KeyUsagePurpose::KeyCertSign,
rcgen::KeyUsagePurpose::CrlSign,
];
params.is_ca = rcgen::IsCa::Ca(rcgen::BasicConstraints::Unconstrained);
let signing_key = rcgen::KeyPair::generate().unwrap();
rcgen::Issuer::new(params, signing_key)
}
async fn bind_proxy<S>(service: S, http2: bool) -> (u16, impl Future<Output = ()>)
where
S: HttpService<Incoming> + Clone + Send + 'static,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::ResBody: Send + Sync + 'static,
<S::ResBody as Body>::Data: Send,
<S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Future: Send,
{
let port = PORT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let tcp_listener = tokio::net::TcpListener::bind(("127.0.0.1", port))
.await
.unwrap();
let issuer = make_root_issuer();
let proxy = Arc::new(http_mitm_proxy::MitmProxy::new(Some(issuer), None));
let serve = async move {
let (stream, _) = tcp_listener.accept().await.unwrap();
let proxy = proxy.clone();
let service = service.clone();
let outer = service_fn(move |req| {
// Test --proxy-header option
assert_eq!(
req.headers()
.get("proxy-authorization")
.unwrap()
.to_str()
.unwrap(),
"test"
);
MitmProxy::wrap_service(proxy.clone(), service.clone()).call(req)
});
tokio::spawn(async move {
if http2 {
let _ = hyper::server::conn::http2::Builder::new(TokioExecutor::new())
.serve_connection(TokioIo::new(stream), outer)
.await;
} else {
let _ = hyper::server::conn::http1::Builder::new()
.preserve_header_case(true)
.title_case_headers(true)
.serve_connection(TokioIo::new(stream), outer)
.with_upgrades()
.await;
}
});
};
(port, serve)
}
async fn bind_proxy_with_recorder<S>(
service: S,
http2: bool,
recorder: kanal::Sender<String>,
) -> (u16, impl Future<Output = ()>)
where
S: HttpService<Incoming> + Clone + Send + 'static,
S::Error: Into<Box<dyn StdError + Send + Sync>>,
S::ResBody: Send + Sync + 'static,
<S::ResBody as Body>::Data: Send,
<S::ResBody as Body>::Error: Into<Box<dyn StdError + Send + Sync>>,
S::Future: Send,
{
let port = PORT.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
let tcp_listener = tokio::net::TcpListener::bind(("127.0.0.1", port))
.await
.unwrap();
let issuer = make_root_issuer();
let proxy = Arc::new(http_mitm_proxy::MitmProxy::new(Some(issuer), None));
let serve = async move {
let (stream, _) = tcp_listener.accept().await.unwrap();
let proxy = proxy.clone();
let service = service.clone();
let recorder = recorder.clone();
let outer = service_fn(move |req| {
let recorder = recorder.clone();
if req.method() == hyper::Method::CONNECT {
recorder.send(req.uri().to_string()).unwrap();
}
assert_eq!(
req.headers()
.get("proxy-authorization")
.unwrap()
.to_str()
.unwrap(),
"test"
);
MitmProxy::wrap_service(proxy.clone(), service.clone()).call(req)
});
tokio::spawn(async move {
if http2 {
let _ = hyper::server::conn::http2::Builder::new(TokioExecutor::new())
.serve_connection(TokioIo::new(stream), outer)
.await;
} else {
let _ = hyper::server::conn::http1::Builder::new()
.preserve_header_case(true)
.title_case_headers(true)
.serve_connection(TokioIo::new(stream), outer)
.with_upgrades()
.await;
}
});
};
(port, serve)
}
async fn test_proxy_with_setting(https: bool, http2: bool, proxy_http2: bool) {
let (proxy_port, proxy_serve) = bind_proxy(
service_fn(|_req| async {
let res = Response::new("Hello World".to_string());
Ok::<_, Infallible>(res)
}),
proxy_http2,
)
.await;
tokio::spawn(proxy_serve);
let mut args = Vec::new();
let scheme = if https { "https" } else { "http" };
args.extend(
[
"--no-tui",
"-n",
"1",
"--no-tui",
"--output-format",
"quiet",
"--insecure",
"-x",
]
.into_iter()
.map(|s| s.to_string()),
);
args.push(format!("http://127.0.0.1:{proxy_port}/"));
args.extend(
["--proxy-header", "proxy-authorization: test"]
.into_iter()
.map(|s| s.to_string()),
);
args.push(format!("{scheme}://example.com/"));
if http2 {
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | true |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/tests/common/mod.rs | tests/common/mod.rs | use std::{net::SocketAddr, sync::Arc};
use bytes::{Buf, Bytes};
use http::{Request, Response};
use kanal::Sender;
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
use h3::{quic::BidiStream, server::RequestStream};
use h3_quinn::quinn::{self, crypto::rustls::QuicServerConfig};
static ALPN: &[u8] = b"h3";
// This would be much cleaner if it took `process_request` as a callback, similar to the hyper service_fn.
pub async fn h3_server(
tx: Sender<Request<Bytes>>,
port: u16,
) -> Result<(), Box<dyn std::error::Error>> {
let listen = SocketAddr::new("127.0.0.1".parse().unwrap(), port);
// Get the directory of the current file
let current_file = file!();
let current_dir = std::path::Path::new(current_file)
.parent()
.unwrap_or_else(|| std::path::Path::new(""));
// Construct paths to cert and key files
let cert_path = current_dir.join("server.cert");
let key_path = current_dir.join("server.key");
// both cert and key must be DER-encoded
let cert = CertificateDer::from(std::fs::read(&cert_path)?);
let key = PrivateKeyDer::try_from(std::fs::read(&key_path)?)?;
let _ = rustls::crypto::CryptoProvider::install_default(
rustls::crypto::aws_lc_rs::default_provider(),
);
let mut tls_config = rustls::ServerConfig::builder()
.with_no_client_auth()
.with_single_cert(vec![cert], key)?;
tls_config.max_early_data_size = u32::MAX;
tls_config.alpn_protocols = vec![ALPN.into()];
let server_config =
quinn::ServerConfig::with_crypto(Arc::new(QuicServerConfig::try_from(tls_config)?));
let endpoint = quinn::Endpoint::server(server_config, listen)?;
// handle incoming connections and requests
while let Some(new_conn) = endpoint.accept().await {
let tx = tx.clone();
let _ = tokio::spawn(async move {
match new_conn.await {
Ok(conn) => {
let mut h3_conn = h3::server::Connection::new(h3_quinn::Connection::new(conn))
.await
.unwrap();
let tx = tx.clone();
match h3_conn.accept().await {
Ok(Some(request_resolver)) => {
let (req, stream) = request_resolver.resolve_request().await.unwrap();
process_request(req, stream, tx).await
}
// indicating no more streams to be received
Ok(None) => Ok(()),
Err(_err) => {
unimplemented!()
// error!("error on accept {}", err);
/*
match err.get_error_level() {
ErrorLevel::ConnectionError => break,
ErrorLevel::StreamError => continue,
}
*/
}
}
}
Err(_err) => Ok(()),
}
})
.await?;
}
// shut down gracefully
// wait for connections to be closed before exiting
endpoint.wait_idle().await;
Ok(())
}
async fn process_request<T>(
req: Request<()>,
mut stream: RequestStream<T, Bytes>,
tx: Sender<Request<Bytes>>,
) -> Result<(), h3::error::StreamError>
where
T: BidiStream<Bytes>,
{
let (parts, _) = req.into_parts();
let mut body_bytes = bytes::BytesMut::new();
while let Some(mut chunk) = stream.recv_data().await? {
let bytes = chunk.copy_to_bytes(chunk.remaining());
body_bytes.extend_from_slice(&bytes);
}
let body = body_bytes.freeze();
let req = Request::from_parts(parts, body);
tx.send(req).unwrap();
let resp = Response::new(());
stream.send_response(resp).await?;
stream.send_data("Hello world".into()).await?;
stream.finish().await
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
hatoo/oha | https://github.com/hatoo/oha/blob/dfe01a1e58bb1d7f4accb0f9b379d035f6856f89/pgo/server/src/main.rs | pgo/server/src/main.rs | use std::net::SocketAddr;
use tokio::net::TcpListener;
use axum::{routing::get, Router};
#[tokio::main]
async fn main() {
// build our application with a route
let app = Router::new()
// `GET /` goes to `root`
.route("/", get(root));
// run our app with hyper
// `axum::Server` is a re-export of `hyper::Server`
let addr = SocketAddr::from(([127, 0, 0, 1], 8888));
let listener = TcpListener::bind(&addr).await.unwrap();
axum::serve(listener, app).await.unwrap();
}
async fn root() -> &'static str {
"Hello, World!"
}
| rust | MIT | dfe01a1e58bb1d7f4accb0f9b379d035f6856f89 | 2026-01-04T15:44:25.967989Z | false |
johnthagen/min-sized-rust | https://github.com/johnthagen/min-sized-rust/blob/24233f98d9a8a484302408ed6b5431ed8f720321/src/main.rs | src/main.rs | fn main() {
println!("Hello, world!");
}
| rust | MIT | 24233f98d9a8a484302408ed6b5431ed8f720321 | 2026-01-04T15:44:57.818982Z | false |
johnthagen/min-sized-rust | https://github.com/johnthagen/min-sized-rust/blob/24233f98d9a8a484302408ed6b5431ed8f720321/no_std/nix/src/main.rs | no_std/nix/src/main.rs | #![no_std]
#![no_main]
extern crate libc;
#[no_mangle]
pub extern "C" fn main(_argc: isize, _argv: *const *const u8) -> isize {
// Since we are passing a C string the final null character is mandatory.
const HELLO: &'static str = "Hello, world!\n\0";
unsafe {
libc::printf(HELLO.as_ptr() as *const _);
}
0
}
#[panic_handler]
fn my_panic(_info: &core::panic::PanicInfo) -> ! {
loop {}
}
| rust | MIT | 24233f98d9a8a484302408ed6b5431ed8f720321 | 2026-01-04T15:44:57.818982Z | false |
johnthagen/min-sized-rust | https://github.com/johnthagen/min-sized-rust/blob/24233f98d9a8a484302408ed6b5431ed8f720321/no_std/win/src/main.rs | no_std/win/src/main.rs | #![no_main]
#![no_std]
#![windows_subsystem = "console"]
use core::ffi::c_void;
use core::panic::PanicInfo;
use windows_sys::Win32::System::Console::GetStdHandle;
use windows_sys::Win32::System::Console::WriteConsoleA;
use windows_sys::Win32::System::Console::STD_OUTPUT_HANDLE;
use windows_sys::Win32::System::Threading::ExitProcess;
#[panic_handler]
fn panic(_: &PanicInfo<'_>) -> ! {
unsafe {
ExitProcess(1);
}
}
#[allow(non_snake_case)]
#[no_mangle]
fn mainCRTStartup() -> ! {
let message = "Hello, world!\n";
unsafe {
let console = GetStdHandle(STD_OUTPUT_HANDLE);
WriteConsoleA(
console,
message.as_ptr().cast::<c_void>(),
message.len() as u32,
core::ptr::null_mut(),
core::ptr::null(),
);
ExitProcess(0)
}
}
| rust | MIT | 24233f98d9a8a484302408ed6b5431ed8f720321 | 2026-01-04T15:44:57.818982Z | false |
johnthagen/min-sized-rust | https://github.com/johnthagen/min-sized-rust/blob/24233f98d9a8a484302408ed6b5431ed8f720321/build_std/src/main.rs | build_std/src/main.rs | fn main() {
println!("Hello, world!");
}
| rust | MIT | 24233f98d9a8a484302408ed6b5431ed8f720321 | 2026-01-04T15:44:57.818982Z | false |
johnthagen/min-sized-rust | https://github.com/johnthagen/min-sized-rust/blob/24233f98d9a8a484302408ed6b5431ed8f720321/no_main/nix/src/main.rs | no_main/nix/src/main.rs | #![no_main]
use std::fs::File;
use std::io::Write;
use std::os::unix::io::FromRawFd;
fn stdout() -> File {
unsafe { File::from_raw_fd(1) }
}
#[no_mangle]
pub fn main(_argc: i32, _argv: *const *const u8) {
let mut stdout = stdout();
stdout.write(b"Hello, world!\n").unwrap();
}
| rust | MIT | 24233f98d9a8a484302408ed6b5431ed8f720321 | 2026-01-04T15:44:57.818982Z | false |
johnthagen/min-sized-rust | https://github.com/johnthagen/min-sized-rust/blob/24233f98d9a8a484302408ed6b5431ed8f720321/no_main/win/src/main.rs | no_main/win/src/main.rs | #![no_main]
use std::fs::File;
use std::io::Write as _;
use std::os::windows::{io::FromRawHandle as _, raw::HANDLE};
#[link(name = "kernel32")]
extern "system" {
pub fn GetStdHandle(nstdhandle: u32) -> HANDLE;
}
pub const STD_OUTPUT_HANDLE: u32 = 4294967285;
fn stdout() -> File {
unsafe { File::from_raw_handle(GetStdHandle(STD_OUTPUT_HANDLE)) }
}
#[no_mangle]
pub fn main(_argc: i32, _argv: *const *const u8) -> u32 {
let mut stdout = stdout();
stdout.write_all(b"Hello, world!\n").unwrap();
0
}
| rust | MIT | 24233f98d9a8a484302408ed6b5431ed8f720321 | 2026-01-04T15:44:57.818982Z | false |
linebender/druid | https://github.com/linebender/druid/blob/b27ea6a618c32f9ea0e8a56822f9487d23401c0d/druid-derive/src/attr.rs | druid-derive/src/attr.rs | // Copyright 2019 the Druid Authors
// SPDX-License-Identifier: Apache-2.0
//! parsing #[druid(attributes)]
use proc_macro2::{Ident, Literal, Span, TokenStream, TokenTree};
use syn::spanned::Spanned;
use syn::{Error, ExprPath, Meta, NestedMeta};
use quote::{quote, quote_spanned};
//show error to tell users of old API that it doesn't work anymore
const BASE_DRUID_DEPRECATED_ATTR_PATH: &str = "druid";
const BASE_DATA_ATTR_PATH: &str = "data";
const BASE_LENS_ATTR_PATH: &str = "lens";
const IGNORE_ATTR_PATH: &str = "ignore";
const DATA_SAME_FN_ATTR_PATH: &str = "same_fn";
const DATA_EQ_ATTR_PATH: &str = "eq";
const LENS_NAME_OVERRIDE_ATTR_PATH: &str = "name";
/// The fields for a struct or an enum variant.
#[derive(Debug)]
pub struct Fields<Attrs> {
pub kind: FieldKind,
fields: Vec<Field<Attrs>>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum FieldKind {
Named,
// this also covers Unit; we determine 'unit-ness' based on the number
// of fields.
Unnamed,
}
#[derive(Debug)]
pub enum FieldIdent {
Named(String),
Unnamed(usize),
}
impl FieldIdent {
pub fn unwrap_named(&self) -> syn::Ident {
if let FieldIdent::Named(s) = self {
syn::Ident::new(s, Span::call_site())
} else {
panic!("Unwrap named called on unnamed FieldIdent");
}
}
}
#[derive(Debug)]
pub struct Field<Attrs> {
pub ident: FieldIdent,
pub ty: syn::Type,
pub attrs: Attrs,
}
#[derive(Debug, PartialEq, Eq)]
pub enum DataAttr {
Empty,
Ignore,
SameFn(ExprPath),
Eq,
}
#[derive(Debug)]
pub struct LensAttrs {
/// `true` if this field should be ignored.
pub ignore: bool,
pub lens_name_override: Option<Ident>,
}
impl Fields<DataAttr> {
pub fn parse_ast(fields: &syn::Fields) -> Result<Self, Error> {
let kind = match fields {
syn::Fields::Named(_) => FieldKind::Named,
syn::Fields::Unnamed(_) | syn::Fields::Unit => FieldKind::Unnamed,
};
let fields = fields
.iter()
.enumerate()
.map(|(i, field)| Field::<DataAttr>::parse_ast(field, i))
.collect::<Result<Vec<_>, _>>()?;
Ok(Fields { kind, fields })
}
}
impl Fields<LensAttrs> {
pub fn parse_ast(fields: &syn::Fields) -> Result<Self, Error> {
let kind = match fields {
syn::Fields::Named(_) => FieldKind::Named,
syn::Fields::Unnamed(_) | syn::Fields::Unit => FieldKind::Unnamed,
};
let fields = fields
.iter()
.enumerate()
.map(|(i, field)| Field::<LensAttrs>::parse_ast(field, i))
.collect::<Result<Vec<_>, _>>()?;
Ok(Fields { kind, fields })
}
}
impl<Attrs> Fields<Attrs> {
pub fn len(&self) -> usize {
self.fields.len()
}
pub fn iter(&self) -> impl Iterator<Item = &Field<Attrs>> {
self.fields.iter()
}
}
impl Field<DataAttr> {
pub fn parse_ast(field: &syn::Field, index: usize) -> Result<Self, Error> {
let ident = match field.ident.as_ref() {
Some(ident) => FieldIdent::Named(ident.to_string().trim_start_matches("r#").to_owned()),
None => FieldIdent::Unnamed(index),
};
let ty = field.ty.clone();
let mut data_attr = DataAttr::Empty;
for attr in field.attrs.iter() {
if attr.path.is_ident(BASE_DRUID_DEPRECATED_ATTR_PATH) {
panic!(
"The 'druid' attribute has been replaced with separate \
'lens' and 'data' attributes.",
);
} else if attr.path.is_ident(BASE_DATA_ATTR_PATH) {
match attr.parse_meta()? {
Meta::List(meta) => {
assert!(
meta.nested.len() <= 1,
"only single data attribute is allowed"
);
if let Some(nested) = meta.nested.first() {
match nested {
NestedMeta::Meta(Meta::Path(path))
if path.is_ident(IGNORE_ATTR_PATH) =>
{
data_attr = DataAttr::Ignore;
}
NestedMeta::Meta(Meta::NameValue(meta))
if meta.path.is_ident(DATA_SAME_FN_ATTR_PATH) =>
{
let path = parse_lit_into_expr_path(&meta.lit)?;
data_attr = DataAttr::SameFn(path);
}
NestedMeta::Meta(Meta::Path(path))
if path.is_ident(DATA_EQ_ATTR_PATH) =>
{
data_attr = DataAttr::Eq;
}
other => return Err(Error::new(other.span(), "Unknown attribute")),
}
}
}
other => {
return Err(Error::new(
other.span(),
"Expected attribute list (the form #[data(one, two)])",
));
}
}
}
}
Ok(Field {
ident,
ty,
attrs: data_attr,
})
}
/// The tokens to be used as the function for 'same'.
pub fn same_fn_path_tokens(&self) -> TokenStream {
match &self.attrs {
DataAttr::SameFn(f) => quote!(#f),
DataAttr::Eq => quote!(::core::cmp::PartialEq::eq),
// this should not be called for DataAttr::Ignore
DataAttr::Ignore => quote!(compiler_error!),
DataAttr::Empty => {
let span = Span::call_site();
quote_spanned!(span=> druid::Data::same)
}
}
}
}
impl Field<LensAttrs> {
pub fn parse_ast(field: &syn::Field, index: usize) -> Result<Self, Error> {
let ident = match field.ident.as_ref() {
Some(ident) => FieldIdent::Named(ident.to_string().trim_start_matches("r#").to_owned()),
None => FieldIdent::Unnamed(index),
};
let ty = field.ty.clone();
let mut ignore = false;
let mut lens_name_override = None;
for attr in field.attrs.iter() {
if attr.path.is_ident(BASE_DRUID_DEPRECATED_ATTR_PATH) {
panic!(
"The 'druid' attribute has been replaced with separate \
'lens' and 'data' attributes.",
);
} else if attr.path.is_ident(BASE_LENS_ATTR_PATH) {
match attr.parse_meta()? {
Meta::List(meta) => {
for nested in meta.nested.iter() {
match nested {
NestedMeta::Meta(Meta::Path(path))
if path.is_ident(IGNORE_ATTR_PATH) =>
{
if ignore {
return Err(Error::new(
nested.span(),
"Duplicate attribute",
));
}
ignore = true;
}
NestedMeta::Meta(Meta::NameValue(meta))
if meta.path.is_ident(LENS_NAME_OVERRIDE_ATTR_PATH) =>
{
if lens_name_override.is_some() {
return Err(Error::new(meta.span(), "Duplicate attribute"));
}
let ident = parse_lit_into_ident(&meta.lit)?;
lens_name_override = Some(ident);
}
other => return Err(Error::new(other.span(), "Unknown attribute")),
}
}
}
other => {
return Err(Error::new(
other.span(),
"Expected attribute list (the form #[lens(one, two)])",
));
}
}
}
}
Ok(Field {
ident,
ty,
attrs: LensAttrs {
ignore,
lens_name_override,
},
})
}
}
impl<Attrs> Field<Attrs> {
pub fn ident_tokens(&self) -> TokenTree {
match self.ident {
FieldIdent::Named(ref s) => Ident::new(s, Span::call_site()).into(),
FieldIdent::Unnamed(num) => Literal::usize_unsuffixed(num).into(),
}
}
pub fn ident_string(&self) -> String {
match self.ident {
FieldIdent::Named(ref s) => s.clone(),
FieldIdent::Unnamed(num) => num.to_string(),
}
}
}
fn parse_lit_into_expr_path(lit: &syn::Lit) -> Result<ExprPath, Error> {
let string = if let syn::Lit::Str(lit) = lit {
lit
} else {
return Err(Error::new(
lit.span(),
"expected str, found... something else",
));
};
let tokens = syn::parse_str(&string.value())?;
syn::parse2(tokens)
}
fn parse_lit_into_ident(lit: &syn::Lit) -> Result<Ident, Error> {
let ident = if let syn::Lit::Str(lit) = lit {
Ident::new(&lit.value(), lit.span())
} else {
return Err(Error::new(
lit.span(),
"expected str, found... something else",
));
};
Ok(ident)
}
| rust | Apache-2.0 | b27ea6a618c32f9ea0e8a56822f9487d23401c0d | 2026-01-04T15:44:39.941670Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.