repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/windows-ext/src/Windows/Win32/mod.rs | lib/windows-ext/src/Windows/Win32/mod.rs | pub mod System;
pub mod Foundation; | rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/windows-ext/src/Windows/Win32/Foundation/mod.rs | lib/windows-ext/src/Windows/Win32/Foundation/mod.rs | pub use windows::Win32::Foundation::*;
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/windows-ext/src/Windows/Win32/System/mod.rs | lib/windows-ext/src/Windows/Win32/System/mod.rs | pub mod WinRT;
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/windows-ext/src/Windows/Win32/System/WinRT/mod.rs | lib/windows-ext/src/Windows/Win32/System/WinRT/mod.rs | pub mod Xaml;
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/windows-ext/src/Windows/Win32/System/WinRT/Xaml/mod.rs | lib/windows-ext/src/Windows/Win32/System/WinRT/Xaml/mod.rs | #[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IDesktopWindowXamlSourceNative(::windows_core::IUnknown);
impl IDesktopWindowXamlSourceNative {
pub unsafe fn AttachToWindow<P0>(&self, parentwnd: P0) -> ::windows_core::Result<()>
where
P0: ::windows_core::IntoParam<super::super::super::Foundation::HWND>,
{
(::windows_core::Interface::vtable(self)
.AttachToWindow)(
::windows_core::Interface::as_raw(self),
parentwnd.into_param().abi(),
)
.ok()
}
pub unsafe fn WindowHandle(
&self,
) -> ::windows_core::Result<super::super::super::Foundation::HWND> {
let mut result__ = ::std::mem::zeroed();
(::windows_core::Interface::vtable(self)
.WindowHandle)(::windows_core::Interface::as_raw(self), &mut result__)
.from_abi(result__)
}
}
::windows_core::imp::interface_hierarchy!(
IDesktopWindowXamlSourceNative, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for IDesktopWindowXamlSourceNative {
type Vtable = IDesktopWindowXamlSourceNative_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IDesktopWindowXamlSourceNative {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x3cbcf1bf_2f76_4e9c_96ab_e84b37972554,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IDesktopWindowXamlSourceNative_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub AttachToWindow: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
parentwnd: super::super::super::Foundation::HWND,
) -> ::windows_core::HRESULT,
pub WindowHandle: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
hwnd: *mut super::super::super::Foundation::HWND,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IDesktopWindowXamlSourceNative2(::windows_core::IUnknown);
impl IDesktopWindowXamlSourceNative2 {}
::windows_core::imp::interface_hierarchy!(
IDesktopWindowXamlSourceNative2, ::windows_core::IUnknown,
IDesktopWindowXamlSourceNative
);
unsafe impl ::windows_core::Interface for IDesktopWindowXamlSourceNative2 {
type Vtable = IDesktopWindowXamlSourceNative2_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IDesktopWindowXamlSourceNative2 {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0xe3dcd8c7_3057_4692_99c3_7b7720afda31,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IDesktopWindowXamlSourceNative2_Vtbl {
pub base__: IDesktopWindowXamlSourceNative_Vtbl,
PreTranslateMessage: usize,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IFindReferenceTargetsCallback(::windows_core::IUnknown);
impl IFindReferenceTargetsCallback {}
::windows_core::imp::interface_hierarchy!(
IFindReferenceTargetsCallback, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for IFindReferenceTargetsCallback {
type Vtable = IFindReferenceTargetsCallback_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IFindReferenceTargetsCallback {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x04b3486c_4687_4229_8d14_505ab584dd88,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IFindReferenceTargetsCallback_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub FoundTrackerTarget: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
target: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IReferenceTracker(::windows_core::IUnknown);
impl IReferenceTracker {}
::windows_core::imp::interface_hierarchy!(IReferenceTracker, ::windows_core::IUnknown);
unsafe impl ::windows_core::Interface for IReferenceTracker {
type Vtable = IReferenceTracker_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IReferenceTracker {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x11d3b13a_180e_4789_a8be_7712882893e6,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IReferenceTracker_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub ConnectFromTrackerSource: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub DisconnectFromTrackerSource: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub FindTrackerTargets: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
callback: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub GetReferenceTrackerManager: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
value: *mut *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub AddRefFromTrackerSource: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub ReleaseFromTrackerSource: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub PegFromTrackerSource: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IReferenceTrackerExtension(::windows_core::IUnknown);
impl IReferenceTrackerExtension {}
::windows_core::imp::interface_hierarchy!(
IReferenceTrackerExtension, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for IReferenceTrackerExtension {
type Vtable = IReferenceTrackerExtension_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IReferenceTrackerExtension {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x4e897caa_59d5_4613_8f8c_f7ebd1f399b0,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IReferenceTrackerExtension_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IReferenceTrackerHost(::windows_core::IUnknown);
impl IReferenceTrackerHost {}
::windows_core::imp::interface_hierarchy!(
IReferenceTrackerHost, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for IReferenceTrackerHost {
type Vtable = IReferenceTrackerHost_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IReferenceTrackerHost {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x29a71c6a_3c42_4416_a39d_e2825a07a773,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IReferenceTrackerHost_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub DisconnectUnusedReferenceSources: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
options: XAML_REFERENCETRACKER_DISCONNECT,
) -> ::windows_core::HRESULT,
pub ReleaseDisconnectedReferenceSources: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub NotifyEndOfReferenceTrackingOnThread: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub GetTrackerTarget: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
unknown: *mut ::core::ffi::c_void,
newreference: *mut *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub AddMemoryPressure: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
bytesallocated: u64,
) -> ::windows_core::HRESULT,
pub RemoveMemoryPressure: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
bytesallocated: u64,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IReferenceTrackerManager(::windows_core::IUnknown);
impl IReferenceTrackerManager {}
::windows_core::imp::interface_hierarchy!(
IReferenceTrackerManager, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for IReferenceTrackerManager {
type Vtable = IReferenceTrackerManager_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IReferenceTrackerManager {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x3cf184b4_7ccb_4dda_8455_7e6ce99a3298,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IReferenceTrackerManager_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub ReferenceTrackingStarted: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub FindTrackerTargetsCompleted: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
findfailed: u8,
) -> ::windows_core::HRESULT,
pub ReferenceTrackingCompleted: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub SetReferenceTrackerHost: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
value: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IReferenceTrackerTarget(::windows_core::IUnknown);
impl IReferenceTrackerTarget {}
::windows_core::imp::interface_hierarchy!(
IReferenceTrackerTarget, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for IReferenceTrackerTarget {
type Vtable = IReferenceTrackerTarget_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IReferenceTrackerTarget {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x64bd43f8_bfee_4ec4_b7eb_2935158dae21,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IReferenceTrackerTarget_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub AddRefFromReferenceTracker: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> u32,
pub ReleaseFromReferenceTracker: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> u32,
pub Peg: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub Unpeg: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct ISurfaceImageSourceManagerNative(::windows_core::IUnknown);
impl ISurfaceImageSourceManagerNative {}
::windows_core::imp::interface_hierarchy!(
ISurfaceImageSourceManagerNative, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for ISurfaceImageSourceManagerNative {
type Vtable = ISurfaceImageSourceManagerNative_Vtbl;
}
unsafe impl ::windows_core::ComInterface for ISurfaceImageSourceManagerNative {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x4c8798b7_1d88_4a0f_b59b_b93f600de8c8,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct ISurfaceImageSourceManagerNative_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub FlushAllSurfacesWithDevice: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
device: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct ISurfaceImageSourceNative(::windows_core::IUnknown);
impl ISurfaceImageSourceNative {}
::windows_core::imp::interface_hierarchy!(
ISurfaceImageSourceNative, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for ISurfaceImageSourceNative {
type Vtable = ISurfaceImageSourceNative_Vtbl;
}
unsafe impl ::windows_core::ComInterface for ISurfaceImageSourceNative {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0xf2e9edc1_d307_4525_9886_0fafaa44163c,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct ISurfaceImageSourceNative_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
SetDevice: usize,
BeginDraw: usize,
pub EndDraw: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct ISurfaceImageSourceNativeWithD2D(::windows_core::IUnknown);
impl ISurfaceImageSourceNativeWithD2D {}
::windows_core::imp::interface_hierarchy!(
ISurfaceImageSourceNativeWithD2D, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for ISurfaceImageSourceNativeWithD2D {
type Vtable = ISurfaceImageSourceNativeWithD2D_Vtbl;
}
unsafe impl ::windows_core::ComInterface for ISurfaceImageSourceNativeWithD2D {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x54298223_41e1_4a41_9c08_02e8256864a1,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct ISurfaceImageSourceNativeWithD2D_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub SetDevice: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
device: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub BeginDraw: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
updaterect: *const super::super::super::Foundation::RECT,
iid: *const ::windows_core::GUID,
updateobject: *mut *mut ::core::ffi::c_void,
offset: *mut super::super::super::Foundation::POINT,
) -> ::windows_core::HRESULT,
pub EndDraw: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub SuspendDraw: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub ResumeDraw: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct ISwapChainBackgroundPanelNative(::windows_core::IUnknown);
impl ISwapChainBackgroundPanelNative {}
::windows_core::imp::interface_hierarchy!(
ISwapChainBackgroundPanelNative, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for ISwapChainBackgroundPanelNative {
type Vtable = ISwapChainBackgroundPanelNative_Vtbl;
}
unsafe impl ::windows_core::ComInterface for ISwapChainBackgroundPanelNative {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0x43bebd4e_add5_4035_8f85_5608d08e9dc9,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct ISwapChainBackgroundPanelNative_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
SetSwapChain: usize,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct ISwapChainPanelNative(::windows_core::IUnknown);
impl ISwapChainPanelNative {}
::windows_core::imp::interface_hierarchy!(
ISwapChainPanelNative, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for ISwapChainPanelNative {
type Vtable = ISwapChainPanelNative_Vtbl;
}
unsafe impl ::windows_core::ComInterface for ISwapChainPanelNative {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0xf92f19d2_3ade_45a6_a20c_f6f1ea90554b,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct ISwapChainPanelNative_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
SetSwapChain: usize,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct ISwapChainPanelNative2(::windows_core::IUnknown);
impl ISwapChainPanelNative2 {}
::windows_core::imp::interface_hierarchy!(
ISwapChainPanelNative2, ::windows_core::IUnknown, ISwapChainPanelNative
);
unsafe impl ::windows_core::Interface for ISwapChainPanelNative2 {
type Vtable = ISwapChainPanelNative2_Vtbl;
}
unsafe impl ::windows_core::ComInterface for ISwapChainPanelNative2 {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0xd5a2f60c_37b2_44a2_937b_8d8eb9726821,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct ISwapChainPanelNative2_Vtbl {
pub base__: ISwapChainPanelNative_Vtbl,
pub SetSwapChainHandle: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
swapchainhandle: super::super::super::Foundation::HANDLE,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct ITrackerOwner(::windows_core::IUnknown);
impl ITrackerOwner {}
::windows_core::imp::interface_hierarchy!(ITrackerOwner, ::windows_core::IUnknown);
unsafe impl ::windows_core::Interface for ITrackerOwner {
type Vtable = ITrackerOwner_Vtbl;
}
unsafe impl ::windows_core::ComInterface for ITrackerOwner {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0xeb24c20b_9816_4ac7_8cff_36f67a118f4e,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct ITrackerOwner_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub CreateTrackerHandle: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
returnvalue: *mut TrackerHandle,
) -> ::windows_core::HRESULT,
pub DeleteTrackerHandle: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
handle: TrackerHandle,
) -> ::windows_core::HRESULT,
pub SetTrackerValue: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
handle: TrackerHandle,
value: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub TryGetSafeTrackerValue: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
handle: TrackerHandle,
returnvalue: *mut *mut ::core::ffi::c_void,
) -> u8,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IVirtualSurfaceImageSourceNative(::windows_core::IUnknown);
impl IVirtualSurfaceImageSourceNative {}
::windows_core::imp::interface_hierarchy!(
IVirtualSurfaceImageSourceNative, ::windows_core::IUnknown, ISurfaceImageSourceNative
);
unsafe impl ::windows_core::Interface for IVirtualSurfaceImageSourceNative {
type Vtable = IVirtualSurfaceImageSourceNative_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IVirtualSurfaceImageSourceNative {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0xe9550983_360b_4f53_b391_afd695078691,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IVirtualSurfaceImageSourceNative_Vtbl {
pub base__: ISurfaceImageSourceNative_Vtbl,
pub Invalidate: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
updaterect: super::super::super::Foundation::RECT,
) -> ::windows_core::HRESULT,
pub GetUpdateRectCount: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
count: *mut u32,
) -> ::windows_core::HRESULT,
pub GetUpdateRects: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
updates: *mut super::super::super::Foundation::RECT,
count: u32,
) -> ::windows_core::HRESULT,
pub GetVisibleBounds: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
bounds: *mut super::super::super::Foundation::RECT,
) -> ::windows_core::HRESULT,
pub RegisterForUpdatesNeeded: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
callback: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
pub Resize: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
newwidth: i32,
newheight: i32,
) -> ::windows_core::HRESULT,
}
#[repr(transparent)]
#[derive(
::core::cmp::PartialEq,
::core::cmp::Eq,
::core::fmt::Debug,
::core::clone::Clone
)]
pub struct IVirtualSurfaceUpdatesCallbackNative(::windows_core::IUnknown);
impl IVirtualSurfaceUpdatesCallbackNative {}
::windows_core::imp::interface_hierarchy!(
IVirtualSurfaceUpdatesCallbackNative, ::windows_core::IUnknown
);
unsafe impl ::windows_core::Interface for IVirtualSurfaceUpdatesCallbackNative {
type Vtable = IVirtualSurfaceUpdatesCallbackNative_Vtbl;
}
unsafe impl ::windows_core::ComInterface for IVirtualSurfaceUpdatesCallbackNative {
const IID: ::windows_core::GUID = ::windows_core::GUID::from_u128(
0xdbf2e947_8e6c_4254_9eee_7738f71386c9,
);
}
#[repr(C)]
#[doc(hidden)]
pub struct IVirtualSurfaceUpdatesCallbackNative_Vtbl {
pub base__: ::windows_core::IUnknown_Vtbl,
pub UpdatesNeeded: unsafe extern "system" fn(
this: *mut ::core::ffi::c_void,
) -> ::windows_core::HRESULT,
}
pub const E_SURFACE_CONTENTS_LOST: u32 = 2150301728u32;
pub const XAML_REFERENCETRACKER_DISCONNECT_DEFAULT: XAML_REFERENCETRACKER_DISCONNECT = XAML_REFERENCETRACKER_DISCONNECT(
0i32,
);
pub const XAML_REFERENCETRACKER_DISCONNECT_SUSPEND: XAML_REFERENCETRACKER_DISCONNECT = XAML_REFERENCETRACKER_DISCONNECT(
1i32,
);
#[repr(transparent)]
#[derive(::core::cmp::PartialEq, ::core::cmp::Eq)]
pub struct XAML_REFERENCETRACKER_DISCONNECT(pub i32);
impl ::core::marker::Copy for XAML_REFERENCETRACKER_DISCONNECT {}
impl ::core::clone::Clone for XAML_REFERENCETRACKER_DISCONNECT {
fn clone(&self) -> Self {
*self
}
}
impl ::core::default::Default for XAML_REFERENCETRACKER_DISCONNECT {
fn default() -> Self {
Self(0)
}
}
impl ::windows_core::TypeKind for XAML_REFERENCETRACKER_DISCONNECT {
type TypeKind = ::windows_core::CopyType;
}
impl ::core::fmt::Debug for XAML_REFERENCETRACKER_DISCONNECT {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.debug_tuple("XAML_REFERENCETRACKER_DISCONNECT").field(&self.0).finish()
}
}
#[repr(transparent)]
#[derive(::core::cmp::PartialEq, ::core::cmp::Eq)]
pub struct TrackerHandle(pub isize);
impl ::core::default::Default for TrackerHandle {
fn default() -> Self {
unsafe { ::core::mem::zeroed() }
}
}
impl ::core::clone::Clone for TrackerHandle {
fn clone(&self) -> Self {
*self
}
}
impl ::core::marker::Copy for TrackerHandle {}
impl ::core::fmt::Debug for TrackerHandle {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.debug_tuple("TrackerHandle").field(&self.0).finish()
}
}
impl ::windows_core::TypeKind for TrackerHandle {
type TypeKind = ::windows_core::CopyType;
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/windows-ext/src/Windows/System/mod.rs | lib/windows-ext/src/Windows/System/mod.rs | pub use windows::System::*;
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/codegen/src/constructors.rs | lib/codegen/src/constructors.rs | use std::collections::HashSet;
use quote::format_ident;
use syn::{parse_quote, Item};
use crate::utils::get_indent;
pub fn generate_constructors(items: &mut Vec<Item>, classes: &HashSet<String>) {
items.iter_mut().for_each(|item| match item {
Item::Impl(item) if item.trait_.is_none() => {
let ty = get_indent(&item.self_ty);
if classes.contains(&ty) {
let class = format_ident!("{}", ty);
let factory = format_ident!("I{}Factory", ty);
item.items.push(parse_quote! {
pub fn new() -> ::windows_core::Result<#class> {
Self::#factory(|this| unsafe {
let mut result__ = ::std::mem::zeroed();
(::windows_core::Interface::vtable(this).CreateInstance)(
::windows_core::Interface::as_raw(this),
::core::ptr::null_mut(),
&mut ::core::option::Option::<::windows::core::IInspectable>::None as *mut _ as _,
&mut result__,
)
.from_abi(result__)
})
}
});
item.items.push(parse_quote! {
#[doc(hidden)]
pub fn #factory <
R,
F: FnOnce(&#factory) -> ::windows_core::Result<R>,
>(
callback: F,
) -> ::windows_core::Result<R> {
static SHARED: ::windows_core::imp::FactoryCache<
#class,
#factory,
> = ::windows_core::imp::FactoryCache::new();
SHARED.call(callback)
}
})
}
}
_ => {}
});
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/codegen/src/exporter.rs | lib/codegen/src/exporter.rs | use std::fs::OpenOptions;
use std::io::Write;
use std::path::Path;
pub fn generate_export<P: AsRef<Path>>(module: &str, base: P) {
let mut export = String::from("pub use windows::");
let mut path = base.as_ref().join("Windows");
for comp in module.split("_") {
export.push_str(comp);
export.push_str("::");
path.push(comp);
if !path.exists() {
std::fs::create_dir(&path).unwrap();
std::fs::write(path.join("mod.rs"), "").unwrap();
append(path.parent().unwrap().join("mod.rs"), format!("pub mod {comp};")).unwrap();
}
}
export.push_str("*;\n");
path.push("mod.rs");
append(path, export).unwrap();
}
fn append<P: AsRef<Path>, C: AsRef<[u8]>>(path: P, content: C) -> std::io::Result<()> {
OpenOptions::new()
.append(true)
.open(path)?
.write_all(content.as_ref())
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/codegen/src/filter.rs | lib/codegen/src/filter.rs | use std::collections::HashSet;
use std::path::Path;
pub fn should_process_file(file: &Path, enabled: &HashSet<String>) -> bool {
match file.file_name().unwrap() {
name if name == "mod.rs" => {
let package = generate_package_name(file);
package.is_empty() || enabled.contains(&package)
}
name if name == "impl.rs" => enabled.contains("implement"),
_ => unreachable!()
}
}
fn generate_package_name(file: &Path) -> String {
let mut comps: Vec<String> = file
.iter()
.rev()
.skip(1)
.map(|s| s.to_str().unwrap())
.take_while(|c| *c != "Windows")
.map(String::from)
.collect();
comps.reverse();
comps.join("_")
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/codegen/src/utils.rs | lib/codegen/src/utils.rs | use proc_macro2::Ident;
use syn::{Path, Type};
pub trait IterExpect<T> {
fn continue_if(self, func: impl FnOnce(T) -> bool) -> Option<Self>
where
Self: Sized;
}
impl<T, I: Iterator<Item = T>> IterExpect<T> for I {
fn continue_if(mut self, func: impl FnOnce(T) -> bool) -> Option<Self>
where
Self: Sized
{
self.next().and_then(|i| func(i).then_some(self))
}
}
pub fn get_indent(ty: &Type) -> String {
match ty {
Type::Path(path) => simple_ident(&path.path)
.unwrap_or_else(|| panic!("Not an identifier: {:#?}", path))
.to_string(),
_ => panic!("Not a path")
}
}
pub fn simple_ident(path: &Path) -> Option<&Ident> {
if path.leading_colon.is_none() && path.segments.len() == 1 {
Some(&path.segments[0].ident)
} else {
None
}
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/codegen/src/attributes.rs | lib/codegen/src/attributes.rs | use std::collections::HashSet;
use syn::punctuated::Punctuated;
use syn::{Attribute, Expr, ExprLit, Fields, ImplItem, Item, Lit, Meta, Token};
pub fn strip_attributes(items: &mut Vec<Item>, enabled: &HashSet<String>, encountered: &mut HashSet<String>) {
items.retain_mut(|item| match item {
Item::Struct(item) => process_attribute_list(&mut item.attrs, enabled, encountered)
.then(|| process_fields(&mut item.fields, enabled, encountered))
.is_some(),
Item::Impl(item) => process_attribute_list(&mut item.attrs, enabled, encountered)
.then(|| {
item.items
.retain_mut(|item| strip_item_attribute(item, enabled, encountered))
})
.is_some(),
Item::Mod(item) => process_attribute_list(&mut item.attrs, enabled, encountered),
Item::Macro(item) => process_attribute_list(&mut item.attrs, enabled, encountered),
Item::Trait(item) => process_attribute_list(&mut item.attrs, enabled, encountered),
_ => true
})
}
fn strip_item_attribute(item: &mut ImplItem, enabled: &HashSet<String>, encountered: &mut HashSet<String>) -> bool {
match item {
ImplItem::Const(item) => process_attribute_list(&mut item.attrs, enabled, encountered),
ImplItem::Fn(item) => process_attribute_list(&mut item.attrs, enabled, encountered),
ImplItem::Type(item) => process_attribute_list(&mut item.attrs, enabled, encountered),
ImplItem::Macro(item) => process_attribute_list(&mut item.attrs, enabled, encountered),
_ => true
}
}
fn process_fields(field: &mut Fields, enabled: &HashSet<String>, encountered: &mut HashSet<String>) {
match field {
Fields::Named(field) => {
field.named = std::mem::take(&mut field.named)
.into_pairs()
.filter_map(|mut p| process_attribute_list(&mut p.value_mut().attrs, enabled, encountered).then_some(p))
.collect();
}
Fields::Unnamed(field) => {
field.unnamed = std::mem::take(&mut field.unnamed)
.into_pairs()
.filter_map(|mut p| process_attribute_list(&mut p.value_mut().attrs, enabled, encountered).then_some(p))
.collect();
}
Fields::Unit => {}
}
}
fn process_attribute_list(attributes: &mut Vec<Attribute>, enabled: &HashSet<String>, encountered: &mut HashSet<String>) -> bool {
let mut disabled = false;
attributes.retain_mut(|attrib| {
!is_doc_attrib(attrib)
&& is_enabled(attrib, enabled, encountered)
.map(|i| disabled |= !i)
.is_none()
});
!disabled
}
fn is_doc_attrib(attrib: &Attribute) -> bool {
match &attrib.meta {
Meta::NameValue(meta) => meta.path.is_ident("doc"),
_ => false
}
}
fn is_enabled(attrib: &Attribute, enabled: &HashSet<String>, encountered: &mut HashSet<String>) -> Option<bool> {
attrib.path().is_ident("cfg").then(|| {
parse_cfg(
attrib
.parse_args_with(Punctuated::parse_terminated)
.unwrap(),
enabled,
encountered
)
})
}
pub fn parse_cfg(parsed: Punctuated<Meta, Token![,]>, enabled: &HashSet<String>, encountered: &mut HashSet<String>) -> bool {
parsed.into_iter().all(|meta| match meta {
Meta::List(list) if list.path.is_ident("all") => parse_cfg(list.parse_args_with(Punctuated::parse_terminated).unwrap(), enabled, encountered),
Meta::List(list) if list.path.is_ident("not") => {
!parse_cfg(list.parse_args_with(Punctuated::parse_terminated).unwrap(), enabled, encountered)
}
Meta::NameValue(value) if value.path.is_ident("feature") => match value.value {
Expr::Lit(ExprLit { lit: Lit::Str(arg), .. }) => {
let val = arg.value();
let result = enabled.contains(&val);
encountered.insert(val);
result
}
e => panic!("unexspected expr: {:?}", e)
},
_ => panic!("unexpected meta: {:#?}", meta)
})
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/codegen/src/main.rs | lib/codegen/src/main.rs | mod attributes;
mod constructors;
mod exporter;
mod filter;
mod utils;
mod whitelist;
use std::collections::{HashMap, HashSet};
use std::ops::Sub;
use std::path::{Path, PathBuf};
use rayon::iter::{ParallelBridge, ParallelIterator};
use serde::{Deserialize, Serialize};
use walkdir::WalkDir;
use windows_bindgen::bindgen;
use crate::attributes::strip_attributes;
use crate::constructors::generate_constructors;
use crate::exporter::generate_export;
use crate::filter::should_process_file;
use crate::whitelist::apply_whitelist;
#[derive(Default, Serialize, Deserialize)]
struct Cache {
classes: HashSet<String>
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum WhiteList {
Subset(HashSet<String>),
All(bool)
}
impl Default for WhiteList {
fn default() -> Self {
Self::All(false)
}
}
impl WhiteList {
pub fn is_enabled(&self, name: &str) -> bool {
match self {
WhiteList::Subset(enabled) => enabled.contains(name),
WhiteList::All(r) => *r
}
}
pub fn all_enabled(&self) -> bool {
matches!(self, WhiteList::All(true))
}
pub fn add(&mut self, items: HashSet<String>) {
match self {
WhiteList::Subset(set) => set.extend(items.into_iter()),
WhiteList::All(false) => *self = WhiteList::Subset(items),
WhiteList::All(true) => {}
}
}
}
#[derive(Debug, Serialize, Deserialize)]
struct Config {
temp_dir: PathBuf,
classes: HashSet<String>,
features: HashSet<String>,
constructors: HashSet<String>,
white_list: HashMap<String, WhiteList>,
reexports: HashSet<String>
}
fn main() {
let config = {
let path = std::env::args()
.skip(1)
.next()
.expect("Missing path to target dir");
std::env::set_current_dir(path).expect("Failed to go to target dir");
let content = std::fs::read_to_string("Codegen.toml").expect("Failed to read config file");
toml::from_str::<Config>(&content)
.expect("Failed to parse config file")
.with_expanded_features()
};
{
let mut cached: Cache = std::fs::read_to_string(config.temp_dir.join("cache.toml"))
.map_err(|err| println!("Failed to read cache file: {err}"))
.ok()
.and_then(|f| {
toml::from_str(&f)
.map_err(|err| println!("Failed to parse cache file: {err}"))
.ok()
})
.unwrap_or_default();
if cached.classes != config.classes {
println!("Regenerating source files...");
if config.temp_dir.exists() {
std::fs::remove_dir_all(&config.temp_dir).unwrap();
}
std::fs::create_dir_all(config.temp_dir.join("src")).unwrap();
std::fs::write(config.temp_dir.join("Cargo.toml"), "").unwrap();
let mut args = vec![
String::from("--out"),
config
.temp_dir
.join("src")
.join("lib.rs")
.to_str()
.unwrap()
.to_string(),
String::from("--config"),
String::from("package"),
];
for class in &config.classes {
args.push(String::from("--filter"));
args.push(class.clone());
}
bindgen(args).unwrap();
cached.classes = config.classes.clone();
std::fs::write(config.temp_dir.join("cache.toml"), toml::to_string(&cached).unwrap()).unwrap();
}
}
let src_dir = config.temp_dir.join("src");
let target_dir = PathBuf::from("src");
if target_dir.join("Windows").exists() {
std::fs::remove_dir_all(target_dir.join("Windows")).unwrap();
}
let encountered = WalkDir::new(&src_dir)
.into_iter()
.map(Result::unwrap)
.par_bridge()
.filter(|e| e.file_type().is_file())
.map(|e| e.path().strip_prefix(&src_dir).unwrap().to_path_buf())
.flat_map(|e| transform(src_dir.join(&e), target_dir.join(&e), &config))
.collect::<HashSet<String>>();
let disabled_features = {
let mut f = Vec::from_iter(encountered.sub(&config.features).into_iter());
f.sort();
f
};
println!("disabled features: {:#?}", disabled_features);
config
.reexports
.iter()
.for_each(|m| generate_export(m, &target_dir));
//transform(
// PathBuf::from(base_path).join("Windows\\UI\\Xaml\\mod.rs"),
// PathBuf::from(target_path).join("Windows\\UI\\Xaml\\mod.rs"),
// &config,
// &mut encountered
//);
}
fn transform<I: AsRef<Path>, O: AsRef<Path>>(in_file: I, out_file: O, config: &Config) -> HashSet<String> {
if !should_process_file(in_file.as_ref(), &config.features) {
println!("Skipped: {:?}", in_file.as_ref());
return HashSet::new();
}
let src = std::fs::read_to_string(in_file.as_ref()).unwrap();
let mut file = syn::parse_file(&src).unwrap();
apply_whitelist(&mut file.items, &config.white_list);
let mut encountered = HashSet::new();
strip_attributes(&mut file.items, &config.features, &mut encountered);
generate_constructors(&mut file.items, &config.constructors);
//print_items(file.items);
//delete_impl(&mut file.items);
std::fs::create_dir_all(out_file.as_ref().parent().unwrap()).unwrap();
std::fs::write(out_file, prettyplease::unparse(&file)).unwrap();
println!("Processed: {:?}", in_file.as_ref());
encountered
}
impl Config {
fn with_expanded_features(mut self) -> Self {
let mut expanded = HashSet::new();
for feature in &self.features {
let mut current = String::new();
for comp in feature.split("_") {
if !current.is_empty() {
current.push('_');
}
current.push_str(comp);
expanded.insert(current.clone());
}
}
self.features = expanded;
self
}
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
sidit77/rusty-twinkle-tray | https://github.com/sidit77/rusty-twinkle-tray/blob/4c28fca78561979829fd2c88b2f02cc162a2ae41/lib/codegen/src/whitelist.rs | lib/codegen/src/whitelist.rs | use std::collections::{HashMap, HashSet};
use syn::{parse_quote, Expr, ImplItem, Item};
use crate::utils::get_indent;
use crate::WhiteList;
pub fn apply_whitelist(items: &mut Vec<Item>, white_list: &HashMap<String, WhiteList>) {
items.iter_mut().for_each(|item| match item {
Item::Impl(item) if item.trait_.is_none() => {
let ty = get_indent(&item.self_ty);
let mut white_list = white_list.get(&ty).cloned().unwrap_or_default();
if !white_list.all_enabled() {
let required = find_required_methods(&item.items);
if !required.is_empty() {
item.attrs.push(parse_quote!(#[allow(dead_code)]));
}
white_list.add(required);
}
clean_impl(&mut item.items, &white_list);
}
_ => {}
});
}
fn find_required_methods(items: &Vec<ImplItem>) -> HashSet<String> {
let mut required = HashSet::new();
for item in items {
match item {
ImplItem::Const(item) if item.ident == "VTABLE" => {
extract_required(&item.expr, &mut required);
}
_ => {}
}
}
required
}
fn extract_required(expr: &Expr, result: &mut HashSet<String>) {
match expr {
Expr::Struct(expr) => {
expr.fields
.iter()
.for_each(|e| extract_required(&e.expr, result));
}
Expr::Path(path) if path.path.segments[0].ident == "Self" => {
result.insert(path.path.segments[1].ident.to_string());
}
_ => unreachable!()
}
}
fn clean_impl(items: &mut Vec<ImplItem>, white_list: &WhiteList) {
items.retain_mut(|item| match item {
ImplItem::Fn(item) if !white_list.is_enabled(&item.sig.ident.to_string()) => false,
_ => true
})
}
| rust | MIT | 4c28fca78561979829fd2c88b2f02cc162a2ae41 | 2026-01-04T20:20:01.545651Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/chunked.rs | src/chunked.rs | use httparse::{InvalidChunkSize, parse_chunk_size};
use tk_bufstream::Buf;
// TODO(tailhook) review usizes here, probaby we may accept u64
#[derive(Debug, Clone, PartialEq)]
pub struct State {
buffered: usize,
pending: usize,
done: bool,
}
impl State {
pub fn new() -> State {
State {
buffered: 0,
pending: 0,
done: false,
}
}
pub fn parse(&mut self, buf: &mut Buf) -> Result<(), InvalidChunkSize> {
let State { ref mut buffered, ref mut pending, ref mut done } = *self;
if *done {
return Ok(());
}
while *buffered < buf.len() {
if *pending == 0 {
use httparse::Status::*;
match parse_chunk_size(&buf[*buffered..])? {
Complete((bytes, 0)) => {
buf.remove_range(
*buffered..*buffered+bytes);
*done = true;
}
Complete((bytes, chunk_size)) => {
// TODO(tailhook) optimized multiple removes
buf.remove_range(
*buffered..*buffered+bytes);
// TODO(tailhook) check that chunk_size < u32
*pending = chunk_size as usize;
}
Partial => {
return Ok(());
}
}
} else {
if *buffered + *pending + 2 <= buf.len() {
*buffered += *pending;
*pending = 0;
// TODO(tailhook) optimize this
buf.remove_range(*buffered..*buffered+2);
} else {
*pending -= buf.len() - *buffered;
*buffered = buf.len();
}
}
}
Ok(())
}
pub fn buffered(&self) -> usize {
self.buffered
}
pub fn is_done(&self) -> bool {
self.done
}
pub fn consume(&mut self, n: usize) {
assert!(self.buffered >= n);
self.buffered -= n;
}
}
#[cfg(test)]
mod test {
use super::State;
use tk_bufstream::Buf;
#[test]
fn simple() {
let mut state = State::new();
let mut buf = Buf::new();
buf.extend(b"4\r\nhell\r\n");
assert_eq!(state.parse(&mut buf), Ok(()));
assert_eq!(state, State { buffered: 4, pending: 0, done: false });
state.consume(4);
buf.consume(4);
assert_eq!(state.buffered, 0);
buf.extend(b"0\r\n");
assert_eq!(state.parse(&mut buf), Ok(()));
assert_eq!(state, State { buffered: 0, pending: 0, done: true });
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/headers.rs | src/headers.rs | #[allow(unused_imports)]
use std::ascii::AsciiExt;
// header value is byte sequence
// we need case insensitive comparison and strip out of the whitespace
pub fn is_close(val: &[u8]) -> bool {
if val.len() < "close".len() {
return false;
}
let mut iter = val.iter();
for (idx, &ch) in iter.by_ref().enumerate() {
match ch {
b'\r' | b'\n' | b' ' | b'\t' => continue,
b'c' | b'C' => {
if idx + "close".len() > val.len() {
return false;
}
break;
}
_ => return false,
}
}
for (idx, ch) in iter.by_ref().take(4).enumerate() {
if b"lose"[idx] != ch.to_ascii_lowercase() {
return false;
}
}
for &ch in iter {
if !matches!(ch, b'\r' | b'\n' | b' ' | b'\t') {
return false;
}
}
return true;
}
// header value is byte sequence
// we need case insensitive comparison and strip out of the whitespace
pub fn is_chunked(val: &[u8]) -> bool {
if val.len() < "chunked".len() {
return false;
}
let mut iter = val.iter();
for (idx, &ch) in iter.by_ref().enumerate() {
match ch {
b'\r' | b'\n' | b' ' | b'\t' => continue,
b'c' | b'C' => {
if idx + "chunked".len() > val.len() {
return false;
}
break;
}
_ => return false,
}
}
for (idx, ch) in iter.by_ref().take(6).enumerate() {
if b"hunked"[idx] != ch.to_ascii_lowercase() {
return false;
}
}
for &ch in iter {
if !matches!(ch, b'\r' | b'\n' | b' ' | b'\t') {
return false;
}
}
return true;
}
// header value is byte sequence
// we need case insensitive comparison and strip out of the whitespace
pub fn is_continue(val: &[u8]) -> bool {
if val.len() < "100-continue".len() {
return false;
}
let mut iter = val.iter();
for (idx, &ch) in iter.by_ref().enumerate() {
match ch {
b'\r' | b'\n' | b' ' | b'\t' => continue,
b'1' => {
if idx + "100-continue".len() > val.len() {
return false;
}
break;
}
_ => return false,
}
}
for (idx, ch) in iter.by_ref().take(11).enumerate() {
if b"00-continue"[idx] != ch.to_ascii_lowercase() {
return false;
}
}
for &ch in iter {
if !matches!(ch, b'\r' | b'\n' | b' ' | b'\t') {
return false;
}
}
return true;
}
#[cfg(test)]
mod test {
use super::{is_chunked, is_close, is_continue};
#[test]
fn test_chunked() {
assert!(is_chunked(b"chunked"));
assert!(is_chunked(b"Chunked"));
assert!(is_chunked(b"chuNKED"));
assert!(is_chunked(b"CHUNKED"));
assert!(is_chunked(b" CHUNKED"));
assert!(is_chunked(b" CHUNKED "));
assert!(is_chunked(b"chunked "));
assert!(is_chunked(b" CHUNKED"));
assert!(!is_chunked(b" CHUNKED 1 "));
}
#[test]
fn test_close() {
assert!(is_close(b"close"));
assert!(is_close(b"Close"));
assert!(is_close(b"clOSE"));
assert!(is_close(b"CLOSE"));
assert!(is_close(b" CLOSE"));
assert!(is_close(b" close "));
assert!(!is_close(b"Close 1 "));
assert!(!is_close(b" xclose "));
}
#[test]
fn test_continue() {
assert!(is_continue(b"100-continue"));
assert!(is_continue(b"100-Continue"));
assert!(is_continue(b"100-conTINUE"));
assert!(is_continue(b"100-CONTINUE"));
assert!(is_continue(b" 100-CONTINUE"));
assert!(is_continue(b" 100-continue "));
assert!(!is_continue(b"100-continue y "));
assert!(!is_continue(b"100-coztinue "));
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/body_parser.rs | src/body_parser.rs | use httparse::InvalidChunkSize;
use tk_bufstream::ReadBuf;
use chunked;
// TODO(tailhook) review usizes here, probaby we may accept u64
#[derive(Debug, Clone)]
pub enum BodyProgress {
Fixed(usize), // bytes left
Eof, // only for client implemementation
Chunked(chunked::State),
}
impl BodyProgress {
/// Returns useful number of bytes in buffer and "end" ("done") flag
pub fn check_buf<S>(&self, io: &ReadBuf<S>) -> (usize, bool) {
use self::BodyProgress::*;
match *self {
Fixed(x) if x <= io.in_buf.len() => (x, true),
Fixed(_) => (io.in_buf.len(), false),
Chunked(ref s) => (s.buffered(), s.is_done()),
Eof => (io.in_buf.len(), io.done()),
}
}
pub fn parse<S>(&mut self, io: &mut ReadBuf<S>)
-> Result<(), InvalidChunkSize>
{
use self::BodyProgress::*;
match *self {
Fixed(_) => {},
Chunked(ref mut s) => s.parse(&mut io.in_buf)?,
Eof => {}
}
Ok(())
}
pub fn consume<S>(&mut self, io: &mut ReadBuf<S>, n: usize) {
use self::BodyProgress::*;
io.in_buf.consume(n);
match *self {
Fixed(ref mut x) => {
assert!(*x >= n);
*x -= n;
}
Chunked(ref mut s) => s.consume(n),
Eof => {}
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/lib.rs | src/lib.rs | //! This crate contains implementation of HTTP/1.0 and HTTP/1.1 with
//! websockets support. (HTTP/2 support is planned)
//!
//! See [examples](https://github.com/swindon-rs/tk-http/tree/master/examples)
//! for usage examples.
//!
//! For client implementation it's recommended to use the library
//! together with [tk-pool](https://crates.io/crates/tk-pool).
//!
#![recursion_limit="200"]
#![warn(missing_docs)]
extern crate futures;
extern crate url;
extern crate sha1;
extern crate rand;
extern crate httparse;
extern crate tokio_core;
extern crate tokio_io;
extern crate netbuf;
extern crate tk_bufstream;
extern crate byteorder;
#[macro_use(quick_error)] extern crate quick_error;
#[macro_use] extern crate matches;
#[macro_use] extern crate log;
#[cfg(feature="date_header")]extern crate httpdate;
pub mod server;
pub mod client;
pub mod websocket;
mod enums;
mod headers;
mod base_serializer;
mod chunked;
mod body_parser;
pub use enums::{Version, Status};
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/base_serializer.rs | src/base_serializer.rs | //! This contains common part of serializer between client and server
//! implementation
use std::fmt::Display;
use std::io::Write;
#[allow(unused_imports)]
use std::ascii::AsciiExt;
use tk_bufstream::Buf;
use enums::Version;
quick_error! {
#[derive(Debug)]
pub enum HeaderError {
DuplicateContentLength {
description("Content-Length is added twice")
}
DuplicateTransferEncoding {
description("Transfer-Encoding is added twice")
}
InvalidHeaderName {
description("Header name contains invalid characters")
}
InvalidHeaderValue {
description("Header value contains invalid characters")
}
TransferEncodingAfterContentLength {
description("Transfer encoding added when Content-Length is \
already specified")
}
ContentLengthAfterTransferEncoding {
description("Content-Length added after Transfer-Encoding")
}
CantDetermineBodySize {
description("Neither Content-Length nor Transfer-Encoding \
is present in the headers")
}
BodyLengthHeader {
description("Content-Length and Transfer-Encoding must be set \
using the specialized methods")
}
RequireBodyless {
description("This message must not contain body length fields.")
}
}
}
/// This is a state of message that is fine both for requests and responses
///
/// Note: while we pass buffer to each method, we expect that the same buffer
/// is passed each time
#[derive(Debug)]
pub enum MessageState {
/// Nothing has been sent.
ResponseStart { version: Version, body: Body, close: bool },
/// A continuation line has been sent.
FinalResponseStart { version: Version, body: Body, close: bool },
/// Nothing has been sent.
#[allow(dead_code)] // until we implement client requests
RequestStart,
/// Status line is already in the buffer.
Headers { body: Body, close: bool },
/// The message contains a fixed size body.
FixedHeaders { is_head: bool, close: bool, content_length: u64 },
/// The message contains a chunked body.
ChunkedHeaders { is_head: bool, close: bool },
/// The message contains no body.
///
/// A request without a `Content-Length` or `Transfer-Encoding`
/// header field contains no body.
///
/// All 1xx (Informational), 204 (No Content),
/// and 304 (Not Modified) responses do not include a message body.
Bodyless,
/// The message contains a body with the given length.
FixedBody { is_head: bool, content_length: u64 },
/// The message contains a chunked body.
ChunkedBody { is_head: bool },
/// A message in final state.
Done,
}
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum Body {
/// Message contains a body.
Normal,
/// Message body is ignored: responses to HEAD requests.
Head,
/// Message must not have a body: all 1xx (Informational),
/// 204 (No Content), and 304 (Not Modified) responses
Denied,
/// The message is a request and always contains a body (maybe empty).
#[allow(dead_code)] // until we implement client requests
Request,
}
fn invalid_header(value: &[u8]) -> bool {
return value.iter().any(|&x| x == b'\r' || x == b'\n')
}
impl MessageState {
/// Write status line.
///
/// This puts status line into a buffer immediately. If you don't
/// continue with request it will be sent to the network shortly.
///
/// # Panics
///
/// When status line is already written. It's expected that your request
/// handler state machine will never call the method twice.
///
/// When the status code is 100 (Continue). 100 is not allowed
/// as a final status code.
pub fn response_status(&mut self, buf: &mut Buf, code: u16, reason: &str) {
use self::Body::*;
use self::MessageState::*;
match *self {
ResponseStart { version, mut body, close } |
FinalResponseStart { version, mut body, close } => {
// 100 (Continue) interim status code is not allowed as
// a final response status.
assert!(code != 100);
write!(buf, "{} {} {}\r\n",
version, code, reason).unwrap();
// Responses without body:
//
// * 1xx (Informational)
// * 204 (No Content)
// * 304 (Not Modified)
if (code >= 100 && code < 200) || code == 204 || code == 304 {
body = Denied
}
*self = Headers { body: body, close: close };
}
ref state => {
panic!("Called response_status() method on response \
in state {:?}", state)
}
}
}
/// Write request line.
///
/// This puts request line into a buffer immediately. If you don't
/// continue with request it will be sent to the network shortly.
///
/// # Panics
///
/// When request line is already written. It's expected that your request
/// handler state machine will never call the method twice.
pub fn request_line(&mut self, buf: &mut Buf,
method: &str, path: &str, version: Version)
{
use self::Body::*;
use self::MessageState::*;
match *self {
RequestStart => {
write!(buf, "{} {} {}\r\n",
method, path, version).unwrap();
// All requests may contain a body although it is uncommon for
// GET and HEAD requests to contain one.
*self = Headers { body: Request, close: false };
}
ref state => {
panic!("Called request_line() method on request in state {:?}",
state)
}
}
}
/// Write a 100 (Continue) response.
///
/// A server should respond with the 100 status code if it receives a
/// 100-continue expectation.
///
/// # Panics
///
/// When the response is already started. It's expected that your response
/// handler state machine will never call the method twice.
pub fn response_continue(&mut self, buf: &mut Buf) {
use self::MessageState::*;
match *self {
ResponseStart { version, body, close } => {
write!(buf, "{} 100 Continue\r\n\r\n", version).unwrap();
*self = FinalResponseStart { version: version,
body: body,
close: close }
}
ref state => {
panic!("Called continue_line() method on response in state {:?}",
state)
}
}
}
fn write_header(&mut self, buf: &mut Buf, name: &str, value: &[u8])
-> Result<(), HeaderError>
{
if invalid_header(name.as_bytes()) {
return Err(HeaderError::InvalidHeaderName);
}
let start = buf.len();
buf.write_all(name.as_bytes()).unwrap();
buf.write_all(b": ").unwrap();
let value_start = buf.len();
buf.write_all(value).unwrap();
if invalid_header(&buf[value_start..]) {
buf.remove_range(start..);
return Err(HeaderError::InvalidHeaderValue);
}
buf.write_all(b"\r\n").unwrap();
Ok(())
}
fn write_formatted<D: Display>(&mut self, buf: &mut Buf,
name: &str, value: D)
-> Result<(), HeaderError>
{
if invalid_header(name.as_bytes()) {
return Err(HeaderError::InvalidHeaderName);
}
let start = buf.len();
buf.write_all(name.as_bytes()).unwrap();
buf.write_all(b": ").unwrap();
let value_start = buf.len();
write!(buf, "{}", value).unwrap();
if invalid_header(&buf[value_start..]) {
buf.remove_range(start..);
return Err(HeaderError::InvalidHeaderValue);
}
buf.write_all(b"\r\n").unwrap();
Ok(())
}
/// Add a header to the message.
///
/// Header is written into the output buffer immediately. And is sent
/// as soon as the next loop iteration
///
/// `Content-Length` header must be send using the `add_length` method
/// and `Transfer-Encoding: chunked` must be set with the `add_chunked`
/// method. These two headers are important for the security of HTTP.
///
/// Note that there is currently no way to use a transfer encoding other
/// than chunked.
///
/// We return Result here to make implementing proxies easier. In the
/// application handler it's okay to unwrap the result and to get
/// a meaningful panic (that is basically an assertion).
///
/// # Panics
///
/// Panics when `add_header` is called in the wrong state.
pub fn add_header(&mut self, buf: &mut Buf, name: &str, value: &[u8])
-> Result<(), HeaderError>
{
use self::MessageState::*;
use self::HeaderError::*;
if name.eq_ignore_ascii_case("Content-Length")
|| name.eq_ignore_ascii_case("Transfer-Encoding") {
return Err(BodyLengthHeader)
}
match *self {
Headers { .. } | FixedHeaders { .. } | ChunkedHeaders { .. } => {
self.write_header(buf, name, value)?;
Ok(())
}
ref state => {
panic!("Called add_header() method on a message in state {:?}",
state)
}
}
}
/// Same as `add_header` but allows value to be formatted directly into
/// the buffer
///
/// Useful for dates and numeric headers, as well as some strongly typed
/// wrappers
pub fn format_header<D: Display>(&mut self, buf: &mut Buf,
name: &str, value: D)
-> Result<(), HeaderError>
{
use self::MessageState::*;
use self::HeaderError::*;
if name.eq_ignore_ascii_case("Content-Length")
|| name.eq_ignore_ascii_case("Transfer-Encoding") {
return Err(BodyLengthHeader)
}
match *self {
Headers { .. } | FixedHeaders { .. } | ChunkedHeaders { .. } => {
self.write_formatted(buf, name, value)?;
Ok(())
}
ref state => {
panic!("Called add_header() method on a message in state {:?}",
state)
}
}
}
/// Add a content length to the message.
///
/// The `Content-Length` header is written to the output buffer immediately.
/// It is checked that there are no other body length headers present in the
/// message. When the body is send the length is validated.
///
/// # Panics
///
/// Panics when `add_length` is called in the wrong state.
pub fn add_length(&mut self, buf: &mut Buf, n: u64)
-> Result<(), HeaderError> {
use self::MessageState::*;
use self::HeaderError::*;
use self::Body::*;
match *self {
FixedHeaders { .. } => Err(DuplicateContentLength),
ChunkedHeaders { .. } => Err(ContentLengthAfterTransferEncoding),
Headers { body: Denied, .. } => Err(RequireBodyless),
Headers { body, close } => {
self.write_formatted(buf, "Content-Length", n)?;
*self = FixedHeaders { is_head: body == Head,
close: close,
content_length: n };
Ok(())
}
ref state => {
panic!("Called add_length() method on message in state {:?}",
state)
}
}
}
/// Sets the transfer encoding to chunked.
///
/// Writes `Transfer-Encoding: chunked` to the output buffer immediately.
/// It is assured that there is only one body length header is present
/// and the body is written in chunked encoding.
///
/// # Panics
///
/// Panics when `add_chunked` is called in the wrong state.
pub fn add_chunked(&mut self, buf: &mut Buf)
-> Result<(), HeaderError> {
use self::MessageState::*;
use self::HeaderError::*;
use self::Body::*;
match *self {
FixedHeaders { .. } => Err(TransferEncodingAfterContentLength),
ChunkedHeaders { .. } => Err(DuplicateTransferEncoding),
Headers { body: Denied, .. } => Err(RequireBodyless),
Headers { body, close } => {
self.write_header(buf, "Transfer-Encoding", b"chunked")?;
*self = ChunkedHeaders { is_head: body == Head,
close: close };
Ok(())
}
ref state => {
panic!("Called add_chunked() method on message in state {:?}",
state)
}
}
}
/// Returns true if at least `status()` method has been called
///
/// This is mostly useful to find out whether we can build an error page
/// or it's already too late.
pub fn is_started(&self) -> bool {
!matches!(*self,
MessageState::RequestStart |
MessageState::ResponseStart { .. } |
MessageState::FinalResponseStart { .. })
}
/// Closes the HTTP header and returns `true` if entity body is expected.
///
/// Specifically `false` is returned when status is 1xx, 204, 304 or in
/// the response to a `HEAD` request but not if the body has zero-length.
///
/// Similarly to `add_header()` it's fine to `unwrap()` here, unless you're
/// doing some proxying.
///
/// # Panics
///
/// Panics when the response is in a wrong state.
pub fn done_headers(&mut self, buf: &mut Buf)
-> Result<bool, HeaderError>
{
use self::Body::*;
use self::MessageState::*;
if matches!(*self,
Headers { close: true, .. } |
FixedHeaders { close: true, .. } |
ChunkedHeaders { close: true, .. }) {
self.add_header(buf, "Connection", b"close").unwrap();
}
let expect_body = match *self {
Headers { body: Denied, .. } => {
*self = Bodyless;
false
}
Headers { body: Request, .. } => {
*self = FixedBody { is_head: false, content_length: 0 };
true
}
Headers { body: Normal, .. } => {
return Err(HeaderError::CantDetermineBodySize);
}
FixedHeaders { is_head, content_length, .. } => {
*self = FixedBody { is_head: is_head,
content_length: content_length };
!is_head
}
ChunkedHeaders { is_head, .. } => {
*self = ChunkedBody { is_head: is_head };
!is_head
}
ref state => {
panic!("Called done_headers() method on in state {:?}",
state)
}
};
buf.write(b"\r\n").unwrap();
Ok(expect_body)
}
/// Write a chunk of the message body.
///
/// Works both for fixed-size body and chunked body.
///
/// For the chunked body each chunk is put into the buffer immediately
/// prefixed by chunk size. Empty chunks are ignored.
///
/// For both modes chunk is put into the buffer, but is only sent when
/// rotor-stream state machine is reached. So you may put multiple chunks
/// into the buffer quite efficiently.
///
/// You may write a body in responses to HEAD requests just like in real
/// requests but the data is not sent to the network. Of course it is
/// more efficient to not construct the message body at all.
///
/// # Panics
///
/// When response is in wrong state. Or there is no headers which
/// determine response body length (either Content-Length or
/// Transfer-Encoding).
pub fn write_body(&mut self, buf: &mut Buf, data: &[u8]) {
use self::MessageState::*;
match *self {
Bodyless => panic!("Message must not contain body."),
FixedBody { is_head, ref mut content_length } => {
if data.len() as u64 > *content_length {
panic!("Fixed size response error. \
Bytes left {} but got additional {}",
content_length, data.len());
}
if !is_head {
buf.write(data).unwrap();
}
*content_length -= data.len() as u64;
}
ChunkedBody { is_head } => if !is_head && data.len() > 0 {
write!(buf, "{:x}\r\n", data.len()).unwrap();
buf.write(data).unwrap();
buf.write(b"\r\n").unwrap();
},
ref state => {
panic!("Called write_body() method on message \
in state {:?}", state)
}
}
}
/// Returns true if headers are already sent (buffered)
pub fn is_after_headers(&self) -> bool {
use self::MessageState::*;
matches!(*self, Bodyless | Done |
FixedBody {..} | ChunkedBody {..})
}
/// Returns true if `done()` method is already called-
pub fn is_complete(&self) -> bool {
matches!(*self, MessageState::Done)
}
/// Writes needed finalization data into the buffer and asserts
/// that response is in the appropriate state for that.
///
/// The method may be called multiple times.
///
/// # Panics
///
/// When the message is in the wrong state or the body is not finished.
pub fn done(&mut self, buf: &mut Buf) {
use self::MessageState::*;
match *self {
Bodyless => *self = Done,
// Don't check for responses to HEAD requests if body was actually sent.
FixedBody { is_head: true, .. } |
ChunkedBody { is_head: true } => *self = Done,
FixedBody { is_head: false, content_length: 0 } => *self = Done,
FixedBody { is_head: false, content_length } =>
panic!("Tried to close message with {} bytes remaining.",
content_length),
ChunkedBody { is_head: false } => {
buf.write(b"0\r\n\r\n").unwrap();
*self = Done;
}
Done => {} // multiple invocations are okay.
ref state => {
panic!("Called done() method on response in state {:?}",
state);
}
}
}
}
#[cfg(test)]
mod test {
use tk_bufstream::{Buf};
use super::{MessageState, Body};
use enums::Version;
#[test]
fn message_size() {
// Just to keep track of size of structure
assert_eq!(::std::mem::size_of::<MessageState>(), 16);
}
fn do_request<F>(fun: F) -> Buf
where F: FnOnce(MessageState, &mut Buf)
{
let mut buf = Buf::new();
fun(MessageState::RequestStart, &mut buf);
buf
}
fn do_response10<F>(fun: F) -> Buf
where F: FnOnce(MessageState, &mut Buf)
{
let mut buf = Buf::new();
fun(MessageState::ResponseStart {
version: Version::Http10,
body: Body::Normal,
close: false,
}, &mut buf);
buf
}
fn do_response11<F>(close: bool, fun: F) -> Buf
where F: FnOnce(MessageState, &mut Buf)
{
let mut buf = Buf::new();
fun(MessageState::ResponseStart {
version: Version::Http11,
body: Body::Normal,
close: close,
}, &mut buf);
buf
}
fn do_head_response11<F>(close: bool, fun: F)
-> Buf
where F: FnOnce(MessageState, &mut Buf)
{
let mut buf = Buf::new();
fun(MessageState::ResponseStart {
version: Version::Http11,
body: Body::Head,
close: close,
}, &mut buf);
buf
}
#[test]
fn minimal_request() {
assert_eq!(&do_request(|mut msg, buf| {
msg.request_line(buf, "GET", "/", Version::Http10);
msg.done_headers(buf).unwrap();
})[..], "GET / HTTP/1.0\r\n\r\n".as_bytes());
}
#[test]
fn minimal_response() {
assert_eq!(&do_response10(|mut msg, buf| {
msg.response_status(buf, 200, "OK");
msg.add_length(buf, 0).unwrap();
msg.done_headers(buf).unwrap();
})[..], "HTTP/1.0 200 OK\r\nContent-Length: 0\r\n\r\n".as_bytes());
}
#[test]
fn minimal_response11() {
assert_eq!(&do_response11(false, |mut msg, buf| {
msg.response_status(buf, 200, "OK");
msg.add_length(buf, 0).unwrap();
msg.done_headers(buf, ).unwrap();
})[..], "HTTP/1.1 200 OK\r\nContent-Length: 0\r\n\r\n".as_bytes());
}
#[test]
fn close_response11() {
assert_eq!(&do_response11(true, |mut msg, buf| {
msg.response_status(buf, 200, "OK");
msg.add_length(buf, 0).unwrap();
msg.done_headers(buf).unwrap();
})[..], concat!("HTTP/1.1 200 OK\r\nContent-Length: 0\r\n",
"Connection: close\r\n\r\n").as_bytes());
}
#[test]
fn head_request() {
assert_eq!(&do_request(|mut msg, buf| {
msg.request_line(buf, "HEAD", "/", Version::Http11);
msg.add_length(buf, 5).unwrap();
msg.done_headers(buf, ).unwrap();
msg.write_body(buf, b"Hello");
})[..], "HEAD / HTTP/1.1\r\nContent-Length: 5\r\n\r\nHello".as_bytes());
}
#[test]
fn head_response() {
// The response to a HEAD request may contain the real body length.
assert_eq!(&do_head_response11(false, |mut msg, buf| {
msg.response_status(buf, 200, "OK");
msg.add_length(buf, 500).unwrap();
msg.done_headers(buf).unwrap();
})[..], "HTTP/1.1 200 OK\r\nContent-Length: 500\r\n\r\n".as_bytes());
}
#[test]
fn informational_response() {
// No response with an 1xx status code may contain a body length.
assert_eq!(&do_response11(false, |mut msg, buf| {
msg.response_status(buf, 142, "Foo");
msg.add_length(buf, 500).unwrap_err();
msg.done_headers(buf).unwrap();
})[..], "HTTP/1.1 142 Foo\r\n\r\n".as_bytes());
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/config.rs | src/server/config.rs | use std::time::Duration;
use std::sync::Arc;
use server::{Config};
impl Config {
/// Create a config with defaults
pub fn new() -> Config {
Config {
inflight_request_limit: 2,
inflight_request_prealloc: 0,
first_byte_timeout: Duration::new(5, 0),
keep_alive_timeout: Duration::new(90, 0),
headers_timeout: Duration::new(10, 0),
input_body_byte_timeout: Duration::new(15, 0),
input_body_whole_timeout: Duration::new(3600, 0),
output_body_byte_timeout: Duration::new(15, 0),
output_body_whole_timeout: Duration::new(3600, 0),
}
}
/// A number of inflight requests until we stop reading more requests
pub fn inflight_request_limit(&mut self, value: usize) -> &mut Self {
self.inflight_request_limit = value;
self
}
/// Size of the queue that is preallocated for holding requests
///
/// Should be smaller than `inflight_request_limit`.
pub fn inflight_request_prealoc(&mut self, value: usize) -> &mut Self {
self.inflight_request_prealloc = value;
self
}
/// Create a Arc'd config clone to pass to the constructor
///
/// This is just a convenience method.
pub fn done(&mut self) -> Arc<Config> {
Arc::new(self.clone())
}
/// Timeout receiving very first byte over connection
pub fn first_byte_timeout(&mut self, value: Duration) -> &mut Self {
self.first_byte_timeout = value;
self
}
/// Timeout of idle connection (when no request has been sent yet)
pub fn keep_alive_timeout(&mut self, value: Duration) -> &mut Self {
self.keep_alive_timeout = value;
self
}
/// Timeout of receiving whole request headers
///
/// This timeout starts when first byte of headers is received
pub fn headers_timeout(&mut self, value: Duration) -> &mut Self {
self.headers_timeout = value;
self
}
/// Maximum delay between any two bytes of input request received
pub fn input_body_byte_timeout(&mut self, value: Duration) -> &mut Self {
self.input_body_byte_timeout = value;
self
}
/// Timeout of whole request body received
///
/// This timeout might be adjusted on per-request basis in
/// `headers_received`.
pub fn input_body_whole_timeout(&mut self, value: Duration) -> &mut Self {
self.input_body_whole_timeout = value;
self
}
/// Maximum delay between any two bytes of the output request could be
/// sent
pub fn output_body_byte_timeout(&mut self, value: Duration) -> &mut Self {
self.output_body_byte_timeout = value;
self
}
/// Timeout for the whole response body to be send to the client
///
/// This timeout is taken literally for any response, so it must be as
/// large as needed for slowest client fetching slowest file. I.e. it
/// might be as big as a hour or day for some applications, but consider
/// short timeouts if you don't serve large files to prevent DoS attacks.
pub fn output_body_whole_timeout(&mut self, value: Duration) -> &mut Self {
self.output_body_whole_timeout = value;
self
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/headers.rs | src/server/headers.rs | use std::str::from_utf8;
use std::slice::Iter as SliceIter;
#[allow(unused_imports)]
use std::ascii::AsciiExt;
use std::borrow::Cow;
use httparse::{self, EMPTY_HEADER, Request, Header};
use tk_bufstream::Buf;
use server::error::{Error, ErrorEnum};
use super::{RequestTarget, Dispatcher};
use super::codec::BodyKind;
use super::encoder::ResponseConfig;
use super::websocket::{self, WebsocketHandshake};
use super::request_target;
use headers;
use {Version};
/// Number of headers to allocate on a stack
const MIN_HEADERS: usize = 16;
/// A hard limit on the number of headers
const MAX_HEADERS: usize = 1024;
struct RequestConfig<'a> {
body: BodyKind,
#[allow(dead_code)] // TODO(tailhook) implement Expect support
expect_continue: bool,
connection_close: bool,
connection: Option<Cow<'a, str>>,
host: Option<&'a str>,
target: RequestTarget<'a>,
/// If this is true, then Host header differs from host value in
/// request-target (first line). Note, specification allows throwing
/// the header value by proxy in this case. But you might consider
/// returning 400 Bad Request.
conflicting_host: bool,
}
/// A borrowed structure that represents request headers
///
/// It's passed to `Codec::headers_received` and you are free to store or
/// discard any needed fields and headers from it.
#[derive(Debug)]
pub struct Head<'a> {
method: &'a str,
raw_target: &'a str,
target: RequestTarget<'a>,
host: Option<&'a str>,
conflicting_host: bool,
version: Version,
headers: &'a [Header<'a>],
body_kind: BodyKind,
connection_close: bool,
connection_header: Option<Cow<'a, str>>,
}
/// Iterator over all meaningful headers for the request
///
/// This iterator is created by `Head::headers`. And iterates over all
/// headers except hop-by-hop ones and `Host`.
///
/// Note: duplicate headers are not glued together neither they are sorted
pub struct HeaderIter<'a> {
head: &'a Head<'a>,
iter: SliceIter<'a, Header<'a>>,
}
impl<'a> Head<'a> {
/// Returns a HTTP method
pub fn method(&self) -> &str {
self.method
}
/// Request-target (the middle part of the first line of request)
pub fn request_target(&self) -> &RequestTarget<'a> {
&self.target
}
/// Returns a raw request target as string
pub fn raw_request_target(&self) -> &str {
self.raw_target
}
/// Returns path portion of request uri
///
/// Note: this may return something not starting from a slash when
/// full uri is used as request-target
///
/// If the request target is in asterisk form this returns None
pub fn path(&self) -> Option<&str> {
use super::RequestTarget::*;
match self.target {
Origin(x) => Some(x),
Absolute { path, .. } => Some(path),
Authority(..) => None,
Asterisk => None,
}
}
/// Return host of a request
///
/// Note: this might be extracted from request-target portion of
/// request headers (first line).
///
/// If both `Host` header exists and doesn't match host in request-target
/// then this method returns host from request-target and
/// `has_conflicting_host()` method returns true.
pub fn host(&self) -> Option<&str> {
self.host
}
/// Returns true if `Host` header conflicts with host in request-uri
///
/// By spec this fact may be ignored in proxy, but better to reply
/// BadRequest in this case
pub fn has_conflicting_host(&self) -> bool {
self.conflicting_host
}
/// Version of HTTP request
pub fn version(&self) -> Version {
self.version
}
/// Iterator over the headers of HTTP request
///
/// This iterator strips the following kinds of headers:
///
/// 1. Hop-by-hop headers (`Connection` itself, and ones it enumerates)
/// 2. `Content-Length` and `Transfer-Encoding`
/// 3. `Host` header
/// 4. `Upgrade` header regardless of whether it's in `Connection`
///
/// You may use `all_headers()` if you really need to access to all of
/// them (mostly useful for debugging puproses). But you may want to
/// consider:
///
/// 1. Host of the target request can be received using `host()` method,
/// which is also parsed from `target` path of request if that is
/// in absolute form (so conforming to the spec)
/// 2. Payload size can be fetched using `body_length()` method. Note:
/// this also includes cases where length is implicitly set to zero.
/// 3. `Connection` header might be discovered with `connection_close()`
/// or `connection_header()`
/// 4. `Upgrade` might be discovered with `get_websocket_upgrade()` or
/// only looked in `all_headers()` if `upgrade` presents in
/// `connection_header()`
pub fn headers(&self) -> HeaderIter {
HeaderIter {
head: self,
iter: self.headers.iter(),
}
}
/// All headers of HTTP request
///
/// Unlike `self.headers()` this does include hop-by-hop headers. This
/// method is here just for completeness, you shouldn't need it.
pub fn all_headers(&self) -> &'a [Header<'a>] {
self.headers
}
/// Return `true` if `Connection: close` header exists
pub fn connection_close(&self) -> bool {
self.connection_close
}
/// Returns the value of the `Connection` header (all of them, if multiple)
pub fn connection_header(&'a self) -> Option<&'a str> {
self.connection_header.as_ref().map(|x| &x[..])
}
/// Returns true if there was transfer-encoding or content-length != 0
///
/// I.e. `false` may mean either `Content-Length: 0` or there were no
/// content length. This is mostly important to check for requests which
/// must not have body (`HEAD`, `CONNECT`, `Upgrade: websocket` ...)
pub fn has_body(&self) -> bool {
self.body_kind != BodyKind::Fixed(0)
}
/// Returns size of the request body if either `Content-Length` is set
/// or it is safe to assume that request body is zero-length
///
/// If request length can't be determined in advance (such as when there
/// is a `Transfer-Encoding`) `None` is returned
pub fn body_length(&self) -> Option<u64> {
match self.body_kind {
BodyKind::Fixed(x) => Some(x),
_ => None,
}
}
/// Check if connection is a websocket and return hanshake info
///
/// `Err(())` is returned when there was handshake but where was something
/// wrong with it (so you should return `BadRequest` even if you support
/// plain http on the resource).
///
/// `Ok(None)` is returned when it's a plain HTTP request (no upgrade).
///
/// Note: this method computes handshake again, so it's better not to
/// call it multiple times.
pub fn get_websocket_upgrade(&self)
-> Result<Option<WebsocketHandshake>, ()>
{
websocket::get_handshake(self)
}
}
fn scan_headers<'x>(raw_request: &'x Request)
-> Result<RequestConfig<'x>, ErrorEnum>
{
// Implements the body length algorithm for requests:
// http://httpwg.github.io/specs/rfc7230.html#message.body.length
//
// The length of a request body is determined by one of the following
// (in order of precedence):
//
// 1. If the request contains a valid `Transfer-Encoding` header
// with `chunked` as the last encoding the request is chunked
// (3rd option in RFC).
// 2. If the request contains a valid `Content-Length` header
// the request has the given length in octets
// (5th option in RFC).
// 3. If neither `Transfer-Encoding` nor `Content-Length` are
// present the request has an empty body
// (6th option in RFC).
// 4. In all other cases the request is a bad request.
use super::codec::BodyKind::*;
use server::error::ErrorEnum::*;
let mut has_content_length = false;
let mut close = raw_request.version.unwrap() == 0;
let mut expect_continue = false;
let mut body = Fixed(0);
let mut connection = None::<Cow<_>>;
let mut host_header = false;
let target = request_target::parse(raw_request.path.unwrap())
.ok_or(BadRequestTarget)?;
let mut conflicting_host = false;
let mut host = match target {
RequestTarget::Authority(x) => Some(x),
RequestTarget::Absolute { authority, .. } => Some(authority),
_ => None,
};
for header in raw_request.headers.iter() {
if header.name.eq_ignore_ascii_case("Transfer-Encoding") {
if let Some(enc) = header.value.split(|&x| x == b',').last() {
if headers::is_chunked(enc) {
if has_content_length {
// override but don't allow keep-alive
close = true;
}
body = Chunked;
}
}
} else if header.name.eq_ignore_ascii_case("Content-Length") {
if has_content_length {
// duplicate content_length
return Err(DuplicateContentLength);
}
has_content_length = true;
if body != Chunked {
let s = from_utf8(header.value)
.map_err(|_| ContentLengthInvalid)?;
let len = s.parse().map_err(|_| ContentLengthInvalid)?;
body = Fixed(len);
} else {
// transfer-encoding has preference and don't allow keep-alive
close = true;
}
} else if header.name.eq_ignore_ascii_case("Connection") {
let strconn = from_utf8(header.value)
.map_err(|_| ConnectionInvalid)?.trim();
connection = match connection {
Some(x) => Some(x + ", " + strconn),
None => Some(strconn.into()),
};
if header.value.split(|&x| x == b',').any(headers::is_close) {
close = true;
}
} else if header.name.eq_ignore_ascii_case("Host") {
if host_header {
return Err(DuplicateHost);
}
host_header = true;
let strhost = from_utf8(header.value)
.map_err(|_| HostInvalid)?.trim();
if host.is_none() { // if host is not in uri
// TODO(tailhook) additional validations for host
host = Some(strhost);
} else if host != Some(strhost) {
conflicting_host = true;
}
} else if header.name.eq_ignore_ascii_case("Expect") {
if headers::is_continue(header.value) {
expect_continue = true;
}
}
}
if raw_request.method.unwrap() == "CONNECT" {
body = Unsupported;
}
Ok(RequestConfig {
body: body,
expect_continue: expect_continue,
connection: connection,
host: host,
target: target,
connection_close: close,
conflicting_host: conflicting_host,
})
}
pub fn parse_headers<S, D>(buffer: &mut Buf, disp: &mut D)
-> Result<Option<(BodyKind, D::Codec, ResponseConfig)>, Error>
where D: Dispatcher<S>,
{
let (body_kind, codec, cfg, bytes) = {
let mut vec;
let mut headers = [EMPTY_HEADER; MIN_HEADERS];
let mut raw = Request::new(&mut headers);
let mut result = raw.parse(&buffer[..]);
if matches!(result, Err(httparse::Error::TooManyHeaders)) {
vec = vec![EMPTY_HEADER; MAX_HEADERS];
raw = Request::new(&mut vec);
result = raw.parse(&buffer[..]);
}
match result.map_err(ErrorEnum::ParseError)? {
httparse::Status::Complete(bytes) => {
let cfg = scan_headers(&raw)?;
let ver = raw.version.unwrap();
let head = Head {
method: raw.method.unwrap(),
raw_target: raw.path.unwrap(),
target: cfg.target,
version: if ver == 1
{ Version::Http11 } else { Version::Http10 },
host: cfg.host,
conflicting_host: cfg.conflicting_host,
headers: raw.headers,
body_kind: cfg.body,
// For HTTP/1.0 we could implement
// Connection: Keep-Alive but hopefully it's rare
// enough to ignore nowadays
connection_close: cfg.connection_close || ver == 0,
connection_header: cfg.connection,
};
let codec = disp.headers_received(&head)?;
// TODO(tailhook) send 100-expect response headers
let response_config = ResponseConfig::from(&head);
(cfg.body, codec, response_config, bytes)
}
_ => return Ok(None),
}
};
buffer.consume(bytes);
Ok(Some((body_kind, codec, cfg)))
}
impl<'a> Iterator for HeaderIter<'a> {
type Item = (&'a str, &'a [u8]);
fn next(&mut self) -> Option<(&'a str, &'a [u8])> {
while let Some(header) = self.iter.next() {
if header.name.eq_ignore_ascii_case("Connection") ||
header.name.eq_ignore_ascii_case("Transfer-Encoding") ||
header.name.eq_ignore_ascii_case("Content-Length") ||
header.name.eq_ignore_ascii_case("Upgrade") ||
header.name.eq_ignore_ascii_case("Host")
{
continue;
}
if let Some(ref conn) = self.head.connection_header {
let mut conn_headers = conn.split(',').map(|x| x.trim());
if conn_headers.any(|x| x.eq_ignore_ascii_case(header.name)) {
continue;
}
}
return Some((header.name, header.value));
}
return None;
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/encoder.rs | src/server/encoder.rs | use std::io;
use std::fmt::Display;
use futures::{Future, Poll, Async};
use tk_bufstream::{WriteBuf, WriteRaw, FutureWriteRaw};
use tokio_io::AsyncWrite;
use base_serializer::{MessageState, HeaderError};
use enums::{Version, Status};
use super::headers::Head;
/// This a response writer that you receive in `Codec`
///
/// Methods of this structure ensure that everything you write into a buffer
/// is consistent and valid protocol
pub struct Encoder<S> {
state: MessageState,
io: WriteBuf<S>,
}
/// This structure returned from `Encoder::done` and works as a continuation
/// that should be returned from the future that writes request.
pub struct EncoderDone<S> {
buf: WriteBuf<S>,
}
/// This structure contains all needed info to start response of the request
/// in a correct manner
///
/// This is ought to be used in serializer only
#[derive(Debug, Clone, Copy)]
pub struct ResponseConfig {
/// Whether request is a HEAD request
pub is_head: bool,
/// Is `Connection: close` in request or HTTP version == 1.0
pub do_close: bool,
/// Version of HTTP request
pub version: Version,
}
/// A future that yields `RawBody` after buffer is empty
///
/// This future is created by `Encoder::raw_body()``
pub struct FutureRawBody<S>(FutureWriteRaw<S>);
/// A future that yields `Encoder` again after buffer has less bytes
///
/// This future is created by `Encoder::wait_flush(x)``
pub struct WaitFlush<S>(Option<Encoder<S>>, usize);
/// The actual raw body
///
/// The object is used to write some data directly to the socket without any
/// buffering/copying. Note that chunked encoding must be handled manually
/// in this case.
///
/// This is a tiny wrapper around `WriteRaw` which is basically tiny wrapper
/// around TcpStream or whatever `S` represents. Wrappers are used to
/// reconstruct original object, `EncoderDone` in this case.
pub struct RawBody<S> {
io: WriteRaw<S>,
}
// TODO: Support responses to CONNECT and `Upgrade: websocket` requests.
impl<S> Encoder<S> {
/// Write a 100 (Continue) response.
///
/// A server should respond with the 100 status code if it receives a
/// 100-continue expectation.
///
/// # Panics
///
/// When the response is already started. It's expected that your response
/// handler state machine will never call the method twice.
pub fn response_continue(&mut self) {
self.state.response_continue(&mut self.io.out_buf)
}
/// Write status line using `Status` enum
///
/// This puts status line into a buffer immediately. If you don't
/// continue with request it will be sent to the network shortly.
///
/// # Panics
///
/// When status line is already written. It's expected that your request
/// handler state machine will never call the method twice.
///
/// When the status code is 100 (Continue). 100 is not allowed
/// as a final status code.
pub fn status(&mut self, status: Status) {
self.state.response_status(&mut self.io.out_buf,
status.code(), status.reason())
}
/// Write custom status line
///
/// # Panics
///
/// When status line is already written. It's expected that your request
/// handler state machine will never call the method twice.
///
/// When the status code is 100 (Continue). 100 is not allowed
/// as a final status code.
pub fn custom_status(&mut self, code: u16, reason: &str) {
self.state.response_status(&mut self.io.out_buf, code, reason)
}
/// Add a header to the message.
///
/// Header is written into the output buffer immediately. And is sent
/// as soon as the next loop iteration
///
/// `Content-Length` header must be send using the `add_length` method
/// and `Transfer-Encoding: chunked` must be set with the `add_chunked`
/// method. These two headers are important for the security of HTTP.
///
/// Note that there is currently no way to use a transfer encoding other
/// than chunked.
///
/// We return Result here to make implementing proxies easier. In the
/// application handler it's okay to unwrap the result and to get
/// a meaningful panic (that is basically an assertion).
///
/// # Panics
///
/// Panics when `add_header` is called in the wrong state.
pub fn add_header<V: AsRef<[u8]>>(&mut self, name: &str, value: V)
-> Result<(), HeaderError>
{
self.state.add_header(&mut self.io.out_buf, name, value.as_ref())
}
/// Same as `add_header` but allows value to be formatted directly into
/// the buffer
///
/// Useful for dates and numeric headers, as well as some strongly typed
/// wrappers
pub fn format_header<D: Display>(&mut self, name: &str, value: D)
-> Result<(), HeaderError>
{
self.state.format_header(&mut self.io.out_buf, name, value)
}
/// Add a content length to the message.
///
/// The `Content-Length` header is written to the output buffer immediately.
/// It is checked that there are no other body length headers present in the
/// message. When the body is send the length is validated.
///
/// # Panics
///
/// Panics when `add_length` is called in the wrong state.
pub fn add_length(&mut self, n: u64)
-> Result<(), HeaderError>
{
self.state.add_length(&mut self.io.out_buf, n)
}
/// Sets the transfer encoding to chunked.
///
/// Writes `Transfer-Encoding: chunked` to the output buffer immediately.
/// It is assured that there is only one body length header is present
/// and the body is written in chunked encoding.
///
/// # Panics
///
/// Panics when `add_chunked` is called in the wrong state.
pub fn add_chunked(&mut self)
-> Result<(), HeaderError>
{
self.state.add_chunked(&mut self.io.out_buf)
}
/// Add a date header with the current date
///
/// This is barely a shortcut for:
///
/// ```ignore
/// enc.format_header("Date", HttpDate::from(SystemTime::now()));
/// ```
#[cfg(feature="date_header")]
pub fn add_date(&mut self) {
use httpdate::HttpDate;
use std::time::SystemTime;
self.format_header("Date", HttpDate::from(SystemTime::now()))
.expect("always valid to add a date")
}
/// Returns true if at least `status()` method has been called
///
/// This is mostly useful to find out whether we can build an error page
/// or it's already too late.
pub fn is_started(&self) -> bool {
self.state.is_started()
}
/// Closes the HTTP header and returns `true` if entity body is expected.
///
/// Specifically `false` is returned when status is 1xx, 204, 304 or in
/// the response to a `HEAD` request but not if the body has zero-length.
///
/// Similarly to `add_header()` it's fine to `unwrap()` here, unless you're
/// doing some proxying.
///
/// # Panics
///
/// Panics when the response is in a wrong state.
pub fn done_headers(&mut self) -> Result<bool, HeaderError> {
self.state.done_headers(&mut self.io.out_buf)
}
/// Write a chunk of the message body.
///
/// Works both for fixed-size body and chunked body.
///
/// For the chunked body each chunk is put into the buffer immediately
/// prefixed by chunk size. Empty chunks are ignored.
///
/// For both modes chunk is put into the buffer, but is only sent when
/// rotor-stream state machine is reached. So you may put multiple chunks
/// into the buffer quite efficiently.
///
/// You may write a body in responses to HEAD requests just like in real
/// requests but the data is not sent to the network. Of course it is
/// more efficient to not construct the message body at all.
///
/// # Panics
///
/// When response is in wrong state. Or there is no headers which
/// determine response body length (either Content-Length or
/// Transfer-Encoding).
pub fn write_body(&mut self, data: &[u8]) {
self.state.write_body(&mut self.io.out_buf, data)
}
/// Returns true if `done()` method is already called and everything
/// was okay.
pub fn is_complete(&self) -> bool {
self.state.is_complete()
}
/// Writes needed finalization data into the buffer and asserts
/// that response is in the appropriate state for that.
///
/// The method may be called multiple times.
///
/// # Panics
///
/// When the response is in the wrong state.
pub fn done(mut self) -> EncoderDone<S> {
self.state.done(&mut self.io.out_buf);
EncoderDone { buf: self.io }
}
/// Returns a raw body for zero-copy writing techniques
///
/// Note: we don't assert on the format of the body if you're using this
/// interface.
///
/// Note 2: RawBody (returned by this future) locks the underlying BiLock,
/// which basically means reading from this socket is not possible while
/// you're writing to the raw body.
///
/// Good idea is to use interface like this:
///
/// 1. Set appropriate content-length
/// 2. Write exactly this number of bytes or exit with error
///
/// This is specifically designed for using with `sendfile`
///
/// # Panics
///
/// This method panics if it's called when headers are not written yet.
pub fn raw_body(self) -> FutureRawBody<S> {
assert!(self.state.is_after_headers());
FutureRawBody(self.io.borrow_raw())
}
/// Flush the data to underlying socket
///
/// If the whole buffer could not be flushed it schedules a wakeup of
/// the current task when the the socket is writable.
///
/// You can find out how many bytes are left using `bytes_buffered()`
/// method
pub fn flush(&mut self) -> Result<(), io::Error>
where S: AsyncWrite
{
self.io.flush()
}
/// Returns bytes currently lying in the buffer
///
/// It's possible that these bytes are left from the previous request if
/// pipelining is enabled.
pub fn bytes_buffered(&mut self) -> usize {
self.io.out_buf.len()
}
/// Returns future which yield encoder back when buffer is flushed
///
/// More specifically when `butes_buffered()` < `watermark`
pub fn wait_flush(self, watermark: usize) -> WaitFlush<S> {
WaitFlush(Some(self), watermark)
}
}
impl<S> RawBody<S> {
/// Returns `EncoderDone` object that might be passed back to the HTTP
/// protocol
pub fn done(self) -> EncoderDone<S> {
EncoderDone { buf: self.io.into_buf() }
}
}
impl<S> io::Write for Encoder<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// TODO(tailhook) we might want to propatage error correctly
// rather than panic
self.write_body(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
impl<S: AsyncWrite> AsyncWrite for Encoder<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
panic!("Can't shutdown request encoder");
}
}
impl<S: AsyncWrite> io::Write for RawBody<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.get_mut().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.io.get_mut().flush()
}
}
impl<S: AsyncWrite> AsyncWrite for RawBody<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
panic!("Can't shutdown request body");
}
}
pub fn get_inner<S>(e: EncoderDone<S>) -> WriteBuf<S> {
e.buf
}
pub fn new<S>(io: WriteBuf<S>, cfg: ResponseConfig) -> Encoder<S> {
use base_serializer::Body::*;
// TODO(tailhook) implement Connection: Close,
// (including explicit one in HTTP/1.0) and maybe others
Encoder {
state: MessageState::ResponseStart {
body: if cfg.is_head { Head } else { Normal },
version: cfg.version,
close: cfg.do_close || cfg.version == Version::Http10,
},
io: io,
}
}
impl ResponseConfig {
pub fn from(req: &Head) -> ResponseConfig {
ResponseConfig {
version: req.version(),
is_head: req.method() == "HEAD",
do_close: req.connection_close(),
}
}
}
impl<S: AsyncWrite> Future for FutureRawBody<S> {
type Item = RawBody<S>;
type Error = io::Error;
fn poll(&mut self) -> Poll<RawBody<S>, io::Error> {
self.0.poll().map(|x| x.map(|y| RawBody { io: y }))
}
}
impl<S: AsyncWrite> Future for WaitFlush<S> {
type Item = Encoder<S>;
type Error = io::Error;
fn poll(&mut self) -> Result<Async<Encoder<S>>, io::Error> {
let bytes_left = {
let enc = self.0.as_mut().expect("future is polled twice");
enc.flush()?;
enc.io.out_buf.len()
};
if bytes_left < self.1 {
Ok(Async::Ready(self.0.take().unwrap()))
} else {
Ok(Async::NotReady)
}
}
}
#[cfg(feature="sendfile")]
mod sendfile {
extern crate tk_sendfile;
use std::io;
use futures::{Async};
use self::tk_sendfile::{Destination, FileOpener, Sendfile};
use super::RawBody;
impl<T: Destination> Destination for RawBody<T> {
fn write_file<O: FileOpener>(&mut self, file: &mut Sendfile<O>)
-> Result<usize, io::Error>
{
// TODO(tailhook) check the data written
self.io.get_mut().write_file(file)
}
fn poll_write(&self) -> Async<()> {
self.io.get_ref().poll_write()
}
}
}
#[cfg(test)]
mod test {
use tk_bufstream::{MockData, IoBuf};
use {Status};
use base_serializer::{MessageState, Body};
use super::{Encoder, EncoderDone};
use enums::Version;
fn do_response11_str<F>(fun: F) -> String
where F: FnOnce(Encoder<MockData>) -> EncoderDone<MockData>
{
let mock = MockData::new();
let done = fun(Encoder {
state: MessageState::ResponseStart {
body: Body::Normal,
version: Version::Http11,
close: false,
},
io: IoBuf::new(mock.clone()).split().0,
});
{done}.buf.flush().unwrap();
String::from_utf8_lossy(&mock.output(..)).to_string()
}
#[test]
fn date_header() {
assert!(do_response11_str(|mut enc| {
enc.status(Status::Ok);
enc.add_date();
enc.add_length(0).unwrap();
enc.done_headers().unwrap();
enc.done()
}).starts_with("HTTP/1.1 200 OK\r\nDate: "));
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/websocket.rs | src/server/websocket.rs | #[allow(unused_imports)]
use std::ascii::AsciiExt;
use std::str::{from_utf8};
use super::{Head};
use websocket::Accept;
/// Contains all the imporant parts of a websocket handshake
#[derive(Debug)]
pub struct WebsocketHandshake {
/// The destination value of `Sec-WebSocket-Accept`
pub accept: Accept,
/// List of `Sec-WebSocket-Protocol` tokens
pub protocols: Vec<String>,
/// List of `Sec-WebSocket-Extensions` tokens
pub extensions: Vec<String>,
}
fn bytes_trim(mut x: &[u8]) -> &[u8] {
while x.len() > 0 && matches!(x[0], b'\r' | b'\n' | b' ' | b'\t') {
x = &x[1..];
}
while x.len() > 0 && matches!(x[x.len()-1], b'\r' | b'\n' | b' ' | b'\t')
{
x = &x[..x.len()-1];
}
return x;
}
pub fn get_handshake(req: &Head) -> Result<Option<WebsocketHandshake>, ()> {
let conn_upgrade = req.connection_header().map(|x| {
x.split(',').any(|tok| tok.trim().eq_ignore_ascii_case("upgrade"))
});
if !conn_upgrade.unwrap_or(false) {
return Ok(None);
}
if req.path().is_none() {
debug!("Invalid request-target for websocket request");
return Err(());
}
let mut upgrade = false;
let mut version = false;
let mut accept = None;
let mut protocols = Vec::new();
let mut extensions = Vec::new();
for h in req.all_headers() {
if h.name.eq_ignore_ascii_case("Sec-WebSocket-Key") {
if accept.is_some() {
debug!("Duplicate Sec-WebSocket-Key");
return Err(());
}
accept = Some(Accept::from_key_bytes(bytes_trim(h.value)));
} else if h.name.eq_ignore_ascii_case("Sec-WebSocket-Version") {
// Only version 13 is supported
if bytes_trim(h.value) != b"13" {
debug!("Bad websocket version {:?}",
String::from_utf8_lossy(h.value));
return Err(());
} else {
version = true;
}
} else if h.name.eq_ignore_ascii_case("Sec-WebSocket-Protocol") {
let tokens = from_utf8(h.value)
.map_err(|_| debug!("Bad utf-8 in Sec-Websocket-Protocol"))?;
protocols.extend(tokens.split(",")
.map(|x| x.trim())
.filter(|x| x.len() > 0)
.map(|x| x.to_string()));
} else if h.name.eq_ignore_ascii_case("Sec-WebSocket-Extensions") {
let tokens = from_utf8(h.value)
.map_err(|_| debug!("Bad utf-8 in Sec-Websocket-Extensions"))?;
extensions.extend(tokens.split(",")
.map(|x| x.trim())
.filter(|x| x.len() > 0)
.map(|x| x.to_string()));
} else if h.name.eq_ignore_ascii_case("Upgrade") {
if !h.value.eq_ignore_ascii_case(b"websocket") {
return Ok(None); // Consider this not a websocket
} else {
upgrade = true;
}
}
}
if req.has_body() {
debug!("Websocket handshake has payload");
return Err(());
}
if !upgrade {
debug!("No upgrade header for a websocket");
return Err(());
}
if !version || accept.is_none() {
debug!("No required headers for a websocket");
return Err(());
}
Ok(Some(WebsocketHandshake {
accept: accept.take().unwrap(),
protocols: protocols,
extensions: extensions,
}))
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/codec.rs | src/server/codec.rs | use futures::{Async, Future};
use tk_bufstream::{ReadBuf, WriteBuf};
use super::{Error, Encoder, EncoderDone, Head};
use super::RecvMode;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum BodyKind {
Fixed(u64),
Chunked,
Unsupported,
}
/// This is a low-level interface to the http server
pub trait Dispatcher<S> {
/// The codec type for this dispatcher
///
/// In many cases the type is just `Box<Codec<S>>`, but it left as
/// associated type make different types of middleware cheaper.
type Codec: Codec<S>;
/// Received headers of a request
///
/// At this point we already extracted all the headers and other data
/// that we need to ensure correctness of the protocol. If you need
/// to handle some data from the headers you need to store them somewhere
/// (for example on `self`) for further processing.
fn headers_received(&mut self, headers: &Head)
-> Result<Self::Codec, Error>;
}
/// The type represents a consumer of a single request and yields a writer of
/// a response (the latter is a ``ResponseFuture``
pub trait Codec<S> {
/// This is a future returned by `start_response`
///
/// It's fine if it's just `Box<Future<Item=EncoderDone<S>, Error>>` in
/// most cases.
type ResponseFuture: Future<Item=EncoderDone<S>, Error=Error>;
/// Return a mode which will be used to receive request body
///
///
/// Note: this mode not only influences the size of chunks that
/// `data_received` recieves and amount of buffering, but also it
/// constrains the sequence between calls of `start_response()`
/// and `data_received()`.
///
/// Called once, right after `headers_received`
fn recv_mode(&mut self) -> RecvMode;
/// Chunk of the response body received
///
/// `end` equals to `true` for the last chunk of the data.
///
/// Method returns `Async::Ready(x)` to denote that it has consumed `x`
/// bytes. If there are some bytes left in the buffer they will be passed
/// again on the call.
///
/// If the response is empty, or last chunk arrives later and it's empty
/// we call `c.data_received(b"", true)` on every wakeup,
/// until `Async::Ready(0)` is returned (this helps to drive future that
/// might complete on response completion without spawning another ones,
/// but note that next response can't start writing in the meantime).
///
/// Protocol panics if returned number of bytes larger than `data.len()`.
///
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>;
/// Start writing a response
///
/// This method is called when there all preceding requests are either
/// send to the network or already buffered. It can be called before
/// `data_received()` but not before `headers_received()` (that would not
/// make sense).
///
/// Everything you write into a buffer might be flushed to the network
/// immediately (or as fast as you yield to main loop). On the other
/// hand we might buffer/pipeline multiple responses at once.
fn start_response(&mut self, e: Encoder<S>) -> Self::ResponseFuture;
/// Called after future retunrted by `start_response` done if recv mode
/// is `Hijack`
///
/// Note: both input and output buffers can contain some data.
fn hijack(&mut self, _output: WriteBuf<S>, _input: ReadBuf<S>) {
panic!("`Codec::recv_mode` returned `Hijack` but \
no hijack() method implemented");
}
}
impl<S, F> Codec<S> for Box<Codec<S, ResponseFuture=F>>
where F: Future<Item=EncoderDone<S>, Error=Error>,
{
type ResponseFuture = F;
fn recv_mode(&mut self) -> RecvMode {
(**self).recv_mode()
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
(**self).data_received(data, end)
}
fn start_response(&mut self, e: Encoder<S>) -> Self::ResponseFuture {
(**self).start_response(e)
}
fn hijack(&mut self, output: WriteBuf<S>, input: ReadBuf<S>) {
(**self).hijack(output, input)
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/request_target.rs | src/server/request_target.rs | /// A middle part of the request line
///
/// Most people get used to having path there or maybe asterisk. But in the
/// real protocol there are all four options.
///
/// You don't have to implement all of them though. Using `Head::path()`
/// provides good shortcut for handling `Origin` and `Absolute` in uniform
/// way and `Authority` is useful only for (non-reverse, ) proxies.
#[derive(Debug)]
pub enum RequestTarget<'a> {
/// Usual form of `/hello?name=world`
Origin(&'a str),
/// Full url: `http://example.com:8080/hello`
///
/// Note in this case (unlike in Origin) path may not start with a slash
Absolute {
/// Scheme, might be `http` or `https` (latter is useless for now)
scheme: &'a str,
/// Authority is basically `host[:port]`
authority: &'a str,
/// Path relative to root, it's not normalized, so might be empty
path: &'a str
},
/// Only hostname `example.com:8080`, only useful for `CONNECT` method
Authority(&'a str),
/// Asterisk `*`
Asterisk,
}
// Authority can't contain `/` or `?` or `#`, user and password
// is not supported in HTTP either (so no `@` but otherwise we accept
// anything as rules are quite complex)
fn authority_end_char(&x: &u8) -> bool {
x == b'/' || x == b'?' || x == b'#' || x == b'@'
}
pub fn parse(s: &str) -> Option<RequestTarget> {
use self::RequestTarget::*;
if s.len() == 0 {
return None;
}
if s.starts_with("/") {
return Some(Origin(s));
}
if s.starts_with("http://") {
let auth_end = s[7..].as_bytes().iter()
.position(authority_end_char)
.unwrap_or(s.len()-7);
return Some(Absolute {
scheme: "http",
authority: &s[7..7+auth_end],
path: &s[7+auth_end..],
});
}
if s.starts_with("https://") {
let auth_end = s[8..].as_bytes().iter()
.position(authority_end_char)
.unwrap_or(s.len()-8);
return Some(Absolute {
scheme: "https",
authority: &s[8..8+auth_end],
path: &s[8+auth_end..],
});
}
if s == "*" {
return Some(Asterisk);
}
if s.as_bytes().iter().position(authority_end_char).is_none() {
return Some(Authority(s));
}
return None;
}
#[cfg(test)]
mod test {
use super::RequestTarget::*;
use super::parse;
#[test]
fn test_empty() {
assert_matches!(parse(""), None);
}
#[test]
fn test_path() {
assert_matches!(parse("/hello"),
Some(Origin("/hello")));
}
#[test]
fn test_path_query() {
assert_matches!(parse("/hello?xxx"),
Some(Origin("/hello?xxx")));
}
#[test]
fn test_star() {
assert_matches!(parse("*"), Some(Asterisk));
}
#[test]
fn test_strange_path() {
assert_matches!(parse("/http://x"),
Some(Origin("/http://x")));
}
#[test]
fn test_plain_authority_uri() {
assert_matches!(parse("http://x"),
Some(Absolute { scheme: "http", authority: "x",
path: "" }));
}
#[test]
fn test_uri() {
assert_matches!(parse("http://x/"),
Some(Absolute { scheme: "http", authority: "x",
path: "/" }));
}
#[test]
fn test_bigger_uri() {
assert_matches!(parse("http://x:932/hello?world"),
Some(Absolute { scheme: "http", authority: "x:932",
path: "/hello?world" }));
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/buffered.rs | src/server/buffered.rs | //! Higher-level interface for serving fully buffered requests
//!
use std::net::SocketAddr;
use std::sync::Arc;
use std::marker::PhantomData;
use futures::{Async, Future, IntoFuture};
use futures::future::FutureResult;
use tokio_core::reactor::Handle;
use tk_bufstream::{ReadBuf, WriteBuf, ReadFramed, WriteFramed};
use websocket::{ServerCodec as WebsocketCodec};
use super::{Error, Encoder, EncoderDone, Dispatcher, Codec, Head, RecvMode};
use super::{WebsocketHandshake};
use {Version};
/// Buffered request struct
///
/// some known headers may be moved to upper structure (ie, Host)
// TODO(tailhook) hide internal structure?
#[derive(Debug)]
pub struct Request {
peer_addr: SocketAddr,
method: String,
path: String,
host: Option<String>,
version: Version,
headers: Vec<(String, Vec<u8>)>,
body: Vec<u8>,
websocket_handshake: Option<WebsocketHandshake>,
}
/// A dispatcher that allows to process request and return response using
/// a one single function
pub struct BufferedDispatcher<S, N: NewService<S>> {
addr: SocketAddr,
max_request_length: usize,
service: N,
handle: Handle,
phantom: PhantomData<S>,
}
/// A codec counterpart of the BufferedDispatcher, might be used with your
/// own dispatcher too
pub struct BufferedCodec<R> {
max_request_length: usize,
service: R,
request: Option<Request>,
handle: Handle,
}
/// A helper to create a simple websocket (and HTTP) service
///
/// It's internally created by `BufferedDispatcher::new_with_websockets()`
pub struct WebsocketFactory<H, I> {
service: Arc<H>,
websockets: Arc<I>,
}
/// An instance of websocket factory, created by WebsocketFactory itself
pub struct WebsocketService<H, I, T, U> {
service: Arc<H>,
websockets: Arc<I>,
phantom: PhantomData<(T, U)>,
}
/// A trait that you must implement to reply on requests, usually a function
pub trait NewService<S> {
/// Future returned by the service (an actual function serving request)
type Future: Future<Item=EncoderDone<S>, Error=Error>;
/// Instance of the service, created for each request
type Instance: Service<S, Future=Self::Future>;
/// Constructor of the instance of the service, created for each request
fn new(&self) -> Self::Instance;
}
/// An instance of a NewService for a single request, usually just a function
pub trait Service<S> {
/// A future returned by `call()`
type Future: Future<Item=EncoderDone<S>, Error=Error>;
/// A future returned by `start_websocket`, it's spawned on the main loop
/// hence needed to be static.
type WebsocketFuture: Future<Item=(), Error=()> + 'static;
/// A method which is called when request arrives, including the websocket
/// negotiation request.
///
/// See examples for a way to negotiate both websockets and services
fn call(&mut self, request: Request, encoder: Encoder<S>) -> Self::Future;
/// A method which is called when websocket connection established
fn start_websocket(&mut self, output: WriteFramed<S, WebsocketCodec>,
input: ReadFramed<S, WebsocketCodec>)
-> Self::WebsocketFuture;
}
impl<H, I, T, U, S> NewService<S> for WebsocketFactory<H, I>
where H: Fn(Request, Encoder<S>) -> T,
I: Fn(WriteFramed<S, WebsocketCodec>,
ReadFramed<S, WebsocketCodec>) -> U,
T: Future<Item=EncoderDone<S>, Error=Error>,
U: Future<Item=(), Error=()> + 'static,
{
type Future = T;
type Instance = WebsocketService<H, I, T, U>;
fn new(&self) -> Self::Instance {
WebsocketService {
service: self.service.clone(),
websockets: self.websockets.clone(),
phantom: PhantomData,
}
}
}
impl<S, H, I, T, U> Service<S> for WebsocketService<H, I, T, U>
where H: Fn(Request, Encoder<S>) -> T,
I: Fn(WriteFramed<S, WebsocketCodec>,
ReadFramed<S, WebsocketCodec>) -> U,
T: Future<Item=EncoderDone<S>, Error=Error>,
U: Future<Item=(), Error=()> + 'static,
{
type Future = T;
type WebsocketFuture = U;
fn call(&mut self, request: Request, encoder: Encoder<S>) -> T {
(self.service)(request, encoder)
}
fn start_websocket(&mut self, output: WriteFramed<S, WebsocketCodec>,
input: ReadFramed<S, WebsocketCodec>)
-> U
{
(self.websockets)(output, input)
}
}
impl Request {
/// Returns peer address that initiated HTTP connection
pub fn peer_addr(&self) -> SocketAddr {
self.peer_addr
}
/// Returns method of a request
pub fn method(&self) -> &str {
&self.method
}
/// Returns path of a request
pub fn path(&self) -> &str {
&self.path
}
/// Returns the host header of a request
pub fn host(&self) -> Option<&str> {
self.host.as_ref().map(|s| s.as_ref())
}
/// Returns HTTP version used in request
pub fn version(&self) -> Version {
self.version
}
/// Returns request headers
pub fn headers(&self) -> &[(String, Vec<u8>)] {
&self.headers
}
/// Returns request body
pub fn body(&self) -> &[u8] {
&self.body
}
/// Returns websocket handshake if exists
pub fn websocket_handshake(&self) -> Option<&WebsocketHandshake> {
self.websocket_handshake.as_ref()
}
}
impl<S, T, R> NewService<S> for T
where T: Fn() -> R,
R: Service<S>,
{
type Future = R::Future;
type Instance = R;
fn new(&self) -> R {
(self)()
}
}
impl<S, T, F> Service<S> for T
where T: Fn(Request, Encoder<S>) -> F,
F: Future<Item=EncoderDone<S>, Error=Error>,
{
type Future = F;
type WebsocketFuture = FutureResult<(), ()>;
fn call(&mut self, request: Request, encoder: Encoder<S>) -> F
{
(self)(request, encoder)
}
fn start_websocket(&mut self, _output: WriteFramed<S, WebsocketCodec>,
_input: ReadFramed<S, WebsocketCodec>)
-> Self::WebsocketFuture
{
// Basically no websockets
Ok(()).into_future()
}
}
impl<S, N: NewService<S>> BufferedDispatcher<S, N> {
/// Create an instance of bufferd dispatcher
pub fn new(addr: SocketAddr, handle: &Handle, service: N)
-> BufferedDispatcher<S, N>
{
BufferedDispatcher {
addr: addr,
max_request_length: 10_485_760,
service: service,
handle: handle.clone(),
phantom: PhantomData,
}
}
/// Sets max request length
pub fn max_request_length(&mut self, value: usize) {
self.max_request_length = value;
}
}
impl<S, H, I, T, U> BufferedDispatcher<S, WebsocketFactory<H, I>>
where H: Fn(Request, Encoder<S>) -> T,
I: Fn(WriteFramed<S, WebsocketCodec>,
ReadFramed<S, WebsocketCodec>) -> U,
T: Future<Item=EncoderDone<S>, Error=Error>,
U: Future<Item=(), Error=()> + 'static,
{
/// Creates a dispatcher with two functions: one serving http requests and
/// websockets.
pub fn new_with_websockets(addr: SocketAddr, handle: &Handle,
http: H, websockets: I)
-> BufferedDispatcher<S, WebsocketFactory<H, I>>
{
BufferedDispatcher {
addr: addr,
max_request_length: 10_485_760,
service: WebsocketFactory {
service: Arc::new(http),
websockets: Arc::new(websockets),
},
handle: handle.clone(),
phantom: PhantomData,
}
}
}
impl<S, N: NewService<S>> Dispatcher<S> for BufferedDispatcher<S, N> {
type Codec = BufferedCodec<N::Instance>;
fn headers_received(&mut self, headers: &Head)
-> Result<Self::Codec, Error>
{
// TODO(tailhook) strip hop-by-hop headers
let up = headers.get_websocket_upgrade();
Ok(BufferedCodec {
max_request_length: self.max_request_length,
service: self.service.new(),
request: Some(Request {
peer_addr: self.addr,
method: headers.method().to_string(),
// TODO(tailhook) process other forms of path
path: headers.path().unwrap().to_string(),
host: headers.host().map(|x| x.to_string()),
version: headers.version(),
headers: headers.headers().map(|(name, value)| {
(name.to_string(), value.to_vec())
}).collect(),
body: Vec::new(),
websocket_handshake: up.unwrap_or(None),
}),
handle: self.handle.clone(),
})
}
}
impl<S, R: Service<S>> Codec<S> for BufferedCodec<R> {
type ResponseFuture = R::Future;
fn recv_mode(&mut self) -> RecvMode {
if self.request.as_ref().unwrap().websocket_handshake.is_some() {
RecvMode::hijack()
} else {
RecvMode::buffered_upfront(self.max_request_length)
}
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
assert!(end);
self.request.as_mut().unwrap().body = data.to_vec();
Ok(Async::Ready(data.len()))
}
fn start_response(&mut self, e: Encoder<S>) -> R::Future {
self.service.call(self.request.take().unwrap(), e)
}
fn hijack(&mut self, write_buf: WriteBuf<S>, read_buf: ReadBuf<S>){
let inp = read_buf.framed(WebsocketCodec);
let out = write_buf.framed(WebsocketCodec);
self.handle.spawn(self.service.start_websocket(out, inp));
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/error.rs | src/server/error.rs | use std::io;
use std::convert::From;
use httparse;
quick_error! {
/// HTTP server error
#[derive(Debug)]
pub enum Error wraps pub ErrorEnum {
/// Socket IO error
Io(err: io::Error) {
description("I/O error")
display("I/O error: {}", err)
from()
}
/// Error parsing http headers
ParseError(err: httparse::Error) {
description("parse error")
display("parse error: {:?}", err)
from()
}
/// Error parsing http chunk
ChunkParseError(err: httparse::InvalidChunkSize) {
description("chunk size parse error")
from()
}
/// Connection reset
ConnectionReset {
description("connection reset")
}
/// Bad request target (middle line of the request line)
BadRequestTarget {
description("error parsing request target")
}
/// Host header is invalid (non-utf-8 for example)
HostInvalid {
description("invalid host header")
}
/// Duplicate host header in request
DuplicateHost {
description("duplicate host header")
}
/// Connection header is invalid (non-utf-8 for example)
ConnectionInvalid {
description("invalid connection header")
}
/// Content length header is invalid (non-integer, or > 64bit)
ContentLengthInvalid {
description("invalid content-length header")
}
/// Duplicate content-length header, this is prohibited due to security
DuplicateContentLength {
description("duplicate content length header")
}
/// Unsupported kind of request body
///
/// We allow CONNECT requests in the library but drop them if you
/// don't `Hijack` connection right after headers.
UnsupportedBody {
description("this kind of request body is not supported (CONNECT)")
}
/// Request body is larger than x in `RecvMode::Buffered(x)` or >64bit
RequestTooLong {
description("request body is too big")
}
Timeout {
description("timeout while reading or writing request")
}
Custom(err: Box<::std::error::Error + Send + Sync>) {
description("custom error")
display("custom error: {}", err)
cause(&**err)
}
}
}
impl Error {
/// Create an error instance wrapping custom error
pub fn custom<E: Into<Box<::std::error::Error + Send + Sync>>>(err: E)
-> Error
{
Error(ErrorEnum::Custom(err.into()))
}
}
impl From<io::Error> for Error {
fn from(v: io::Error) -> Error {
ErrorEnum::from(v).into()
}
}
#[test]
fn send_sync() {
fn send_sync<T: Send+Sync>(_: T) {}
send_sync(Error::from(ErrorEnum::Timeout));
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/mod.rs | src/server/mod.rs | //! HTTP server protocol implementation
//!
mod config;
mod error;
mod codec;
mod proto;
mod encoder;
mod request_target;
mod headers;
mod websocket;
mod recv_mode;
pub mod buffered;
pub use self::error::Error;
pub use self::encoder::{Encoder, EncoderDone};
pub use self::encoder::{WaitFlush, FutureRawBody, RawBody};
pub use self::codec::{Codec, Dispatcher};
pub use self::proto::Proto;
pub use self::headers::{Head, HeaderIter};
pub use self::request_target::RequestTarget;
pub use self::websocket::{WebsocketHandshake};
use std::time::Duration;
/// Fine-grained configuration of the HTTP server
#[derive(Debug, Clone)]
pub struct Config {
inflight_request_limit: usize,
inflight_request_prealloc: usize,
first_byte_timeout: Duration,
keep_alive_timeout: Duration,
headers_timeout: Duration,
input_body_byte_timeout: Duration,
input_body_whole_timeout: Duration,
output_body_byte_timeout: Duration,
output_body_whole_timeout: Duration,
}
/// This type is returned from `headers_received` handler of either
/// client client or server protocol handler
///
/// The marker is used to denote whether you want to have the whole response
/// buffered for you or read chunk by chunk.
///
/// The `Progressive` (chunk by chunk) mode is mostly useful for proxy servers.
/// Or it may be useful if your handler is able to parse data without holding
/// everything in the memory.
///
/// Otherwise, it's best to use `Buffered` mode (for example, comparing with
/// using your own buffering). We do our best to optimize it for you.
#[derive(Debug, Clone)]
pub struct RecvMode {
mode: recv_mode::Mode,
timeout: Option<Duration>,
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/recv_mode.rs | src/server/recv_mode.rs | use std::time::Duration;
use server::RecvMode;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Mode {
BufferedUpfront(usize),
Progressive(usize),
Hijack,
}
impl RecvMode {
/// Download whole message body (request or response) into the memory
/// before starting response
///
/// The argument is maximum size of the body. The Buffered variant
/// works equally well for Chunked encoding and for read-util-end-of-stream
/// mode of HTTP/1.0, so sometimes you can't know the size of the request
/// in advance. Note this is just an upper limit it's neither buffer size
/// nor *minimum* size of the body.
pub fn buffered_upfront(max_body_size: usize) -> RecvMode {
RecvMode {
mode: Mode::BufferedUpfront(max_body_size),
timeout: None,
}
}
/// Fetch data chunk-by-chunk.
///
/// Note, your response handler can start either before or after
/// progressive body has started or ended to read. I mean they are
/// completely independent, and actual sequence of events depends on other
/// requests coming in and performance of a client.
///
/// The parameter denotes minimum number of bytes that may be passed
/// to the protocol handler. This is for performance tuning (i.e. less
/// wake-ups of protocol parser). But it's not an input buffer size. The
/// use of `Progressive(1)` is perfectly okay (for example if you use http
/// request body as a persistent connection for sending multiple messages
/// on-demand)
pub fn progressive(min_chunk_size_hint: usize) -> RecvMode {
RecvMode {
mode: Mode::Progressive(min_chunk_size_hint),
timeout: None,
}
}
/// Don't read request body and hijack connection after response headers
/// are sent. Useful for connection upgrades, including websockets and
/// for CONNECT method.
///
/// Note: `data_received` method of Codec is never called for `Hijack`d
/// connection.
pub fn hijack() -> RecvMode {
RecvMode { mode: Mode::Hijack, timeout: None }
}
/// Change timeout for reading the whole request body to this value
/// instead of configured default
///
/// This might be useful if you have some specific slow routes and you
/// can authenticate that request is valid enough. This is also useful for
/// streaming large bodies and similar things.
///
/// Or vice versa if you what shorter timeouts for suspicious host.
pub fn body_read_timeout(mut self, duration: Duration) -> RecvMode {
self.timeout = Some(duration);
self
}
}
pub fn get_mode(mode: &RecvMode) -> Mode {
mode.mode
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/server/proto.rs | src/server/proto.rs | use std::mem;
use std::sync::Arc;
use std::collections::VecDeque;
use std::time::Instant;
use futures::{Future, Poll, Async};
use tk_bufstream::{IoBuf, WriteBuf, ReadBuf};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::reactor::{Handle, Timeout};
use super::encoder::{self, get_inner, ResponseConfig};
use super::{Dispatcher, Codec, Config};
use super::headers::parse_headers;
use super::codec::BodyKind;
use server::error::{ErrorEnum, Error};
use server::recv_mode::{Mode, get_mode};
use chunked;
use body_parser::BodyProgress;
enum OutState<S, F, C> {
Idle(WriteBuf<S>),
Write(F),
Switch(F, C),
Void,
}
struct BodyState<C> {
mode: Mode,
progress: BodyProgress,
response_config: ResponseConfig,
codec: C,
}
enum InState<C> {
Connected,
KeepAlive,
Headers,
Body(BodyState<C>),
Hijack,
Closed,
}
pub struct PureProto<S, D: Dispatcher<S>> {
dispatcher: D,
inbuf: Option<ReadBuf<S>>, // it's optional only for hijacking
reading: InState<D::Codec>,
waiting: VecDeque<(ResponseConfig, D::Codec)>,
writing: OutState<S, <D::Codec as Codec<S>>::ResponseFuture, D::Codec>,
config: Arc<Config>,
last_byte_read: Instant,
last_byte_written: Instant,
/// Long-term deadline for reading (headers- or input body_whole- timeout)
read_deadline: Instant,
response_deadline: Instant,
}
/// A low-level HTTP/1.x server protocol handler
pub struct Proto<S, D: Dispatcher<S>> {
proto: PureProto<S, D>,
handle: Handle,
timeout: Timeout,
}
fn new_body(mode: BodyKind, recv_mode: Mode)
-> Result<BodyProgress, ErrorEnum>
{
use super::codec::BodyKind as B;
use super::recv_mode::Mode as M;
use body_parser::BodyProgress as P;
match (mode, recv_mode) {
// TODO(tailhook) check size < usize
(B::Unsupported, _) => Err(ErrorEnum::UnsupportedBody),
(B::Fixed(x), M::BufferedUpfront(b)) if x > b as u64 => {
Err(ErrorEnum::RequestTooLong)
}
(B::Fixed(x), _) => Ok(P::Fixed(x as usize)),
(B::Chunked, _) => Ok(P::Chunked(chunked::State::new())),
}
}
impl<S: AsyncRead+AsyncWrite, D: Dispatcher<S>> Proto<S, D> {
/// Create a new protocol implementation from a TCP connection and a config
///
/// You should use this protocol as a `Sink`
pub fn new(conn: S, cfg: &Arc<Config>, dispatcher: D,
handle: &Handle)
-> Proto<S, D>
{
return Proto {
proto: PureProto::new(conn, cfg, dispatcher),
handle: handle.clone(),
timeout: Timeout::new(cfg.first_byte_timeout, handle)
.expect("can always add a timeout"),
}
}
}
impl<S, D: Dispatcher<S>> PureProto<S, D> {
pub fn new(conn: S, cfg: &Arc<Config>, dispatcher: D)
-> PureProto<S, D>
where S: AsyncRead + AsyncWrite
{
let (cout, cin) = IoBuf::new(conn).split();
PureProto {
dispatcher: dispatcher,
inbuf: Some(cin),
reading: InState::Connected,
waiting: VecDeque::with_capacity(
cfg.inflight_request_prealloc),
writing: OutState::Idle(cout),
config: cfg.clone(),
last_byte_read: Instant::now(),
last_byte_written: Instant::now(),
read_deadline: Instant::now() + cfg.first_byte_timeout,
response_deadline: Instant::now(), // irrelevant at start
}
}
/// Resturns Ok(true) if new data has been read
fn do_reads(&mut self) -> Result<bool, Error>
where S: AsyncRead
{
use self::InState::*;
let mut changed = false;
let mut inbuf = self.inbuf.as_mut();
let inbuf = if let Some(ref mut inbuf) = inbuf {
inbuf
} else {
// Buffer has been stolen
return Ok(false);
};
loop {
let limit = match self.reading {
Headers| Connected | KeepAlive
=> self.config.inflight_request_limit,
Body(..) => self.config.inflight_request_limit-1,
Closed | Hijack => return Ok(changed),
};
if self.waiting.len() <= limit {
// TODO(tailhook) Do reads after parse_headers() [optimization]
if inbuf.read().map_err(ErrorEnum::Io)? > 0 {
self.last_byte_read = Instant::now();
}
}
let (next, cont) = match mem::replace(&mut self.reading, Closed) {
KeepAlive | Connected if inbuf.in_buf.len() > 0 => {
self.read_deadline = Instant::now()
+ self.config.headers_timeout;
(Headers, true)
}
Connected => (Connected, false),
KeepAlive => (KeepAlive, false),
Headers => {
match parse_headers(&mut inbuf.in_buf,
&mut self.dispatcher)?
{
Some((body, mut codec, cfg)) => {
changed = true;
let mode = codec.recv_mode();
if get_mode(&mode) == Mode::Hijack {
self.waiting.push_back((cfg, codec));
(Hijack, true)
} else {
let timeo = mode.timeout.unwrap_or(
self.config.input_body_whole_timeout);
self.read_deadline = Instant::now() + timeo;
(Body(BodyState {
mode: get_mode(&mode),
response_config: cfg,
progress: new_body(body, get_mode(&mode))?,
codec: codec }),
true)
}
}
None => (Headers, false),
}
}
Body(mut body) => {
body.progress.parse(inbuf)
.map_err(ErrorEnum::ChunkParseError)?;
let (bytes, done) = body.progress.check_buf(inbuf);
let operation = if done {
Some(body.codec.data_received(
&inbuf.in_buf[..bytes], true)?)
} else if inbuf.done() {
return Err(ErrorEnum::ConnectionReset.into());
} else if matches!(body.mode, Mode::Progressive(x) if x <= bytes) {
Some(body.codec.data_received(
&inbuf.in_buf[..bytes], false)?)
} else {
None
};
match operation {
Some(Async::Ready(consumed)) => {
body.progress.consume(inbuf, consumed);
if done && consumed == bytes {
changed = true;
self.waiting.push_back(
(body.response_config, body.codec));
self.read_deadline = Instant::now()
+ self.config.keep_alive_timeout;
(KeepAlive, true)
} else {
(Body(body), true) // TODO(tailhook) check
}
}
Some(Async::NotReady) => {
if matches!(body.mode, Mode::Progressive(x) if x > bytes) {
(Body(body), false)
} else {
(Body(body), true) // TODO(tailhook) check
}
}
None => (Body(body), false),
}
}
Hijack => (Hijack, false),
Closed => unreachable!(),
};
self.reading = next;
if !cont {
break;
}
}
Ok(changed)
}
fn do_writes(&mut self) -> Result<(), Error>
where S: AsyncWrite
{
use self::OutState::*;
use self::InState::*;
use server::recv_mode::Mode::{BufferedUpfront, Progressive};
loop {
let (next, cont) = match mem::replace(&mut self.writing, Void) {
Idle(mut io) => {
let old_len = io.out_buf.len();
if old_len > 0 {
io.flush().map_err(ErrorEnum::Io)?;
if io.out_buf.len() < old_len {
self.last_byte_written = Instant::now();
}
}
if let Some((rc, mut codec)) = self.waiting.pop_front() {
self.response_deadline = Instant::now()
+ self.config.output_body_whole_timeout;
let e = encoder::new(io, rc);
if matches!(self.reading, Hijack) {
(Switch(codec.start_response(e), codec), true)
} else {
(Write(codec.start_response(e)), true)
}
} else {
match self.reading {
Body(BodyState { mode: BufferedUpfront(..), ..})
| Closed | Headers | Connected | KeepAlive
=> {
(Idle(io), false)
}
Body(BodyState { mode: Mode::Hijack, ..}) => {
unreachable!();
}
Body(BodyState {
mode: Progressive(_),
codec: ref mut _codec, ..})
=> {
self.response_deadline = Instant::now()
+ self.config.output_body_whole_timeout;
// TODO(tailhook) start writing now
unimplemented!();
}
Hijack => unreachable!(),
}
}
}
Write(mut f) => {
match f.poll()? {
Async::Ready(x) => {
self.read_deadline = Instant::now()
+ self.config.keep_alive_timeout;
(Idle(get_inner(x)), true)
}
Async::NotReady => {
(Write(f), false)
}
}
}
Switch(mut f, mut codec) => {
match f.poll()? {
Async::Ready(x) => {
let wr = get_inner(x);
let rd = self.inbuf.take()
.expect("can hijack only once");
codec.hijack(wr, rd);
return Ok(());
}
Async::NotReady => {
(Switch(f, codec), false)
}
}
}
Void => unreachable!(),
};
self.writing = next;
if !cont {
return Ok(());
}
}
}
}
impl<S: AsyncRead+AsyncWrite, D: Dispatcher<S>> PureProto<S, D> {
/// Does all needed processing and returns Ok(true) if connection is fine
/// and Ok(false) if it needs to be closed
fn process(&mut self) -> Result<bool, Error> {
self.do_writes()?;
while self.do_reads()? {
self.do_writes()?;
}
if self.inbuf.as_ref().map(|x| x.done()).unwrap_or(true) {
Ok(false)
} else {
Ok(true)
}
}
fn timeout(&mut self) -> Option<Instant> {
use self::OutState::*;
match self.writing {
Idle(..) => {}
Write(..) => return Some(self.response_deadline),
Switch(..) => return None, // TODO(tailhook) is it right?
Void => return None, // TODO(tailhook) is it reachable?
}
if self.waiting.len() > 0 { // if there are requests processing now
// we don't have a read timeout
return None;
}
return Some(self.read_deadline);
}
}
impl<S: AsyncRead+AsyncWrite, D: Dispatcher<S>> Future for Proto<S, D> {
type Item = ();
type Error = Error;
fn poll(&mut self) -> Poll<(), Error> {
match self.proto.process() {
Ok(false) => Ok(Async::Ready(())),
Ok(true) => {
// TODO(tailhook) schedule notification with timeout
match self.proto.timeout() {
Some(new_timeout) => {
let now = Instant::now();
if now > new_timeout {
Err(ErrorEnum::Timeout.into())
} else {
self.timeout = Timeout::new(new_timeout - now,
&self.handle)
.expect("can always add a timeout");
let timeo = self.timeout.poll()
.expect("timeout can't fail on poll");
match timeo {
Async::Ready(()) => {
Err(ErrorEnum::Timeout.into())
}
Async::NotReady => Ok(Async::NotReady),
}
}
}
None => {
// No timeout. This means we are waiting for request
// handler to do it's work. Request handler should have
// some timeout handler itself.
Ok(Async::NotReady)
}
}
}
Err(e) => Err(e),
}
}
}
#[cfg(test)]
mod test {
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, Ordering};
use futures::{Empty, Async, empty};
use tk_bufstream::{MockData, ReadBuf, WriteBuf};
use super::PureProto;
use server::{Config, Dispatcher, Codec};
use server::{Head, RecvMode, Error, Encoder, EncoderDone};
struct MockDisp<'a> {
counter: &'a AtomicUsize,
}
struct MockWs<'a> {
websockets: &'a AtomicUsize,
}
struct MockCodec<'a> {
counter: &'a AtomicUsize,
}
impl<'a> Dispatcher<MockData> for MockDisp<'a> {
type Codec = MockCodec<'a>;
fn headers_received(&mut self, _headers: &Head)
-> Result<Self::Codec, Error>
{
Ok(MockCodec { counter: self.counter })
}
}
impl<'a> Dispatcher<MockData> for MockWs<'a> {
type Codec = MockCodec<'a>;
fn headers_received(&mut self, headers: &Head)
-> Result<Self::Codec, Error>
{
if headers.get_websocket_upgrade().unwrap().is_some() {
self.websockets.fetch_add(1, Ordering::SeqCst);
Ok(MockCodec { counter: self.websockets })
} else {
Ok(MockCodec { counter: self.websockets })
}
}
}
impl<'a> Codec<MockData> for MockCodec<'a> {
type ResponseFuture = Empty<EncoderDone<MockData>, Error>;
fn recv_mode(&mut self) -> RecvMode {
RecvMode::buffered_upfront(1024)
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
assert!(end);
assert_eq!(data.len(), 0);
Ok(Async::Ready(0))
}
fn start_response(&mut self, _e: Encoder<MockData>)
-> Self::ResponseFuture
{
self.counter.fetch_add(1, Ordering::SeqCst);
empty()
}
fn hijack(&mut self, _write_buf: WriteBuf<MockData>,
_read_buf: ReadBuf<MockData>){
unimplemented!();
}
}
#[test]
fn simple_get_request() {
let counter = AtomicUsize::new(0);
let mock = MockData::new();
let mut proto = PureProto::new(mock.clone(),
&Arc::new(Config::new()), MockDisp { counter: &counter });
proto.process().unwrap();
mock.add_input("GET / HTTP/1.0\r\n\r\n");
proto.process().unwrap();
assert_eq!(counter.load(Ordering::SeqCst), 1);
}
#[test]
#[should_panic(expected="Version")]
fn failing_get_request() {
let counter = AtomicUsize::new(0);
let mock = MockData::new();
let mut proto = PureProto::new(mock.clone(),
&Arc::new(Config::new()),
MockDisp { counter: &counter });
proto.process().unwrap();
mock.add_input("GET / TTMP/2.0\r\n\r\n");
proto.process().unwrap();
}
#[test]
fn simple_get_request_with_limit_one() {
let counter = AtomicUsize::new(0);
let mock = MockData::new();
let mut proto = PureProto::new(mock.clone(),
&Config::new().inflight_request_limit(1).done(),
MockDisp { counter: &counter });
proto.process().unwrap();
mock.add_input("GET / HTTP/1.0\r\n\r\n");
proto.process().unwrap();
assert_eq!(counter.load(Ordering::SeqCst), 1);
}
#[test]
fn websocket() {
let counter = AtomicUsize::new(0);
let mock = MockData::new();
let mut proto = PureProto::new(mock.clone(),
&Config::new().inflight_request_limit(1).done(),
MockWs { websockets: &counter });
proto.process().unwrap();
mock.add_input("GET /chat HTTP/1.1\r\n\
Host: server.example.com\r\n\
Upgrade: websocket\r\n\
Connection: Upgrade\r\n\
Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\
Sec-WebSocket-Protocol: chat, superchat\r\n\
Sec-WebSocket-Version: 13\r\n\
Origin: http://example.com\r\n\r\n");
proto.process().unwrap();
// counts as a request and as a websocket
assert_eq!(counter.load(Ordering::SeqCst), 2);
}
#[test]
fn websocket_with_keepalive() {
let counter = AtomicUsize::new(0);
let mock = MockData::new();
let mut proto = PureProto::new(mock.clone(),
&Config::new().inflight_request_limit(1).done(),
MockWs { websockets: &counter });
proto.process().unwrap();
mock.add_input("GET /chat HTTP/1.1\r\n\
Host: server.example.com\r\n\
Upgrade: websocket\r\n\
Connection: keep-alive, Upgrade\r\n\
Keep-Alive: some thing\r\n\
Sec-WebSocket-Key: x3JJHMbDL1EzLkh9GBhXDw==\r\n\
Sec-WebSocket-Protocol: chat, superchat\r\n\
Sec-WebSocket-Version: 13\r\n\
Origin: http://example.com\r\n\r\n");
proto.process().unwrap();
// counts as a request and as a websocket
assert_eq!(counter.load(Ordering::SeqCst), 2);
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/config.rs | src/client/config.rs | use std::sync::Arc;
use std::time::Duration;
use client::{Config};
impl Config {
/// Create a config with defaults
pub fn new() -> Config {
Config {
inflight_request_limit: 1,
inflight_request_prealloc: 1,
keep_alive_timeout: Duration::new(4, 0),
safe_pipeline_timeout: Duration::from_millis(300),
max_request_timeout: Duration::new(15, 0),
}
}
/// A number of inflight requests until we start returning
/// `NotReady` from `start_send`
///
/// Note we always return `NotReady` if some *request* is streaming
/// currently. Use `Sink::buffered` to prevent that.
///
/// Note 2: you might also need to tweak `safe_pipeline_timeout` to
/// make pipelining work.
pub fn inflight_request_limit(&mut self, value: usize) -> &mut Self {
self.inflight_request_limit = value;
self
}
/// Size of the queue that is preallocated for holding requests
///
/// Should be smaller than `inflight_request_limit`.
pub fn inflight_request_prealoc(&mut self, value: usize) -> &mut Self {
self.inflight_request_prealloc = value;
self
}
/// Keep-alive timeout
///
/// This is maximum time connection is kept alive when idle. We can't
/// reliably detect when server closed connection at the remote end, in
/// some cases (e.g. when remote server crashed).
///
/// Also, there is a race condition between server closing the connection
/// and client sending new request. So this timeout should usually be less
/// than keep-alive timeout at server side.
///
/// Note: default is very much conservative (currently 4 seconds, but we
/// might change it).
pub fn keep_alive_timeout(&mut self, dur: Duration) -> &mut Self {
self.keep_alive_timeout = dur;
self
}
/// Maximum time peer doesn't answer request before we consider this
/// connection can't be used for pipelining more requests.
///
/// Note: when this timeout is reached more requests can already be
/// sent using this connection. This number only prevents further ones.
/// You must disable pipelining at all if loosing or retrying requests is
/// destructive for your application.
///
/// Note: we have a very conservative default (currently 300 ms, but we
/// might change it in future). There are two reasons for this, both
/// might not apply to your specific setup:
///
/// 1. We think that pipelining only makes sense in very high performance
/// situation where latency is comparable to network latency
/// 2. In rust world everybody thinks peformance is fabulous
pub fn safe_pipeline_timeout(&mut self, dur: Duration) -> &mut Self {
self.safe_pipeline_timeout = dur;
self
}
/// Absolute maximum time of whole request can work
///
/// The rule of thumb: this must be the maximum time any your request
/// can take from first byte sent to the last byte received.
///
/// # Details
///
/// This timeout is a subject of two contradictory goals:
///
/// 1. Limit number of open (and hanging) connections
/// 2. Tolerate peak load on the server (and don't let requests repeat,
/// when unneccessary)
///
/// Note: while you can limit time you're waiting for each individual
/// request by dropping a future, we don't have cancellation of already
/// sent requests. So while your business logic will not hang for too
/// long, connection hangs for `max_request_timeout` time and occupies
/// connection pool's slot for this time.
///
/// Default timeout is 15 seconds (which is both too large for many
/// applications and too small for some ones)
pub fn max_request_timeout(&mut self, dur: Duration) -> &mut Self {
self.max_request_timeout = dur;
self
}
/// Create a Arc'd config clone to pass to the constructor
///
/// This is just a convenience method.
pub fn done(&mut self) -> Arc<Config> {
Arc::new(self.clone())
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/errors.rs | src/client/errors.rs | use std::io;
use std::convert::From;
use futures::sync::mpsc::SendError;
use httparse::Error as HttpError;
use httparse::InvalidChunkSize;
quick_error! {
#[derive(Debug)]
/// HTTP client error
pub enum Error wraps pub ErrorEnum {
/// I/O (basically networking) error occured during request
Io(err: io::Error) {
description("IO error")
display("IO error: {}", err)
from()
}
/// Bad response headers received
Header(err: HttpError) {
description("bad headers")
display("bad headers: {}", err)
from()
}
/// Bad chunk size received
ChunkSize(err: InvalidChunkSize) {
description("invalid chunk size")
display("invalid chunk size: {}", err)
from()
}
/// Bad `Content-Length` header
BadContentLength {
description("bad content length")
}
/// Duplicate `Content-Length` header
DuplicateContentLength {
description("duplicate content length")
}
/// Connection reset by peer when reading response headers
ResetOnResponseHeaders {
description("connection closed prematurely while reading headers")
}
/// Connection reset by peer when response body
ResetOnResponseBody {
description("connection closed prematurely while reading body")
}
/// Response headers are received while we had no request sent yet
PrematureResponseHeaders {
description("response headers received \
before request has been written")
}
/// This means connection is busy (over the limit or not yet
/// established when trying to send request
Busy {
description("request can't be sent because connection is busy")
}
/// The channel for receiving response is canceled. This probably means
/// that connection to server was closed before being able to fulfil
/// the request. But it's unlikely that this error is related to this
/// request itself.
Canceled {
description("request canceled")
}
/// Connection closed normally
///
/// This error should be catched by connection poolm and not shown
/// to the end users
Closed {
description("connection closed normally")
}
/// Invalid URL specified
InvalidUrl {
description("requesting an invalid url")
}
/// Error sending a request to a connection pool
PoolError {
description("error sending a request to a connection pool")
}
/// Request body is too big (happens only in buffered mode)
ResponseBodyTooLong {
description("response body too long")
}
/// Connection header is invalid
ConnectionInvalid {
description("invalid connection header in response")
}
/// Unsupported status returned by server
///
/// You have to write your own Codec to handle unsupported status codes
InvalidStatus {
description("unsupported status")
}
/// Request timed out
RequestTimeout {
description("request timed out")
}
/// Connection timed out on keep alive
KeepAliveTimeout {
description("connection timed out being on keep-alive")
}
Custom(err: Box<::std::error::Error + Send + Sync>) {
description("custom error")
display("custom error: {}", err)
cause(&**err)
}
}
}
impl<T> From<SendError<T>> for ErrorEnum {
fn from(_: SendError<T>) -> ErrorEnum {
ErrorEnum::PoolError
}
}
impl Error {
/// Create an error instance wrapping custom error
pub fn custom<E: Into<Box<::std::error::Error + Send + Sync>>>(err: E)
-> Error
{
Error(ErrorEnum::Custom(err.into()))
}
/// Tries to catch all the conditions where this isn't error
///
/// Currently catches these conditions:
///
/// 1. Connection timed out while being on keep-alive (no inprogress
/// requests)
/// 2. Connection is closed after `Connection: close` header
///
/// More conditions may be added in future. This should be commonly used
/// to skip logging of useless errors.
pub fn is_graceful(&self) -> bool {
match self.0 {
ErrorEnum::Closed => true,
ErrorEnum::KeepAliveTimeout => true,
_ => false,
}
}
}
#[test]
fn send_sync() {
fn send_sync<T: Send+Sync>(_: T) {}
send_sync(Error::from(ErrorEnum::Canceled));
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/encoder.rs | src/client/encoder.rs | use std::io;
use std::fmt::Display;
#[allow(unused_imports)]
use std::ascii::AsciiExt;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
use tk_bufstream::WriteBuf;
use futures::{Future, Async};
use tokio_io::AsyncWrite;
use enums::Version;
use headers::is_close;
use base_serializer::{MessageState, HeaderError};
pub enum RequestState {
Empty = 0,
StartedHead = 1,
StartedNormal = 2,
}
/// This a request writer that you receive in `Codec`
///
/// Methods of this structure ensure that everything you write into a buffer
/// is consistent and valid protocol
pub struct Encoder<S> {
message: MessageState,
buf: WriteBuf<S>,
// TODO(tailhook) we could use smaller atomic, but they are unstable
state: Arc<AtomicUsize>,
close_signal: Arc<AtomicBool>,
}
/// This structure returned from `Encoder::done` and works as a continuation
/// that should be returned from the future that writes request.
pub struct EncoderDone<S> {
buf: WriteBuf<S>,
}
/// A future that yields `Encoder` again after buffer has less bytes
///
/// This future is created by `Encoder::wait_flush(x)``
pub struct WaitFlush<S>(Option<Encoder<S>>, usize);
pub fn get_inner<S>(e: EncoderDone<S>) -> WriteBuf<S> {
e.buf
}
impl<S> Encoder<S> {
/// Write request line.
///
/// This puts request line into a buffer immediately. If you don't
/// continue with request it will be sent to the network shortly.
///
/// # Panics
///
/// When request line is already written. It's expected that your request
/// handler state machine will never call the method twice.
pub fn request_line(&mut self, method: &str, path: &str, version: Version)
{
self.message.request_line(&mut self.buf.out_buf,
method, path, version);
let nstatus = if method.eq_ignore_ascii_case("HEAD") {
RequestState::StartedHead as usize
} else {
RequestState::StartedNormal as usize
};
if self.state.swap(nstatus, Ordering::SeqCst) != 0 {
// Actually this panic happens in `message.request_line` we just
// duplicate it here for clarity
panic!("Request line in wrong state");
}
}
/// Add a header to the message.
///
/// Header is written into the output buffer immediately. And is sent
/// as soon as the next loop iteration
///
/// `Content-Length` header must be send using the `add_length` method
/// and `Transfer-Encoding: chunked` must be set with the `add_chunked`
/// method. These two headers are important for the security of HTTP.
///
/// Note that there is currently no way to use a transfer encoding other
/// than chunked.
///
/// We return Result here to make implementing proxies easier. In the
/// application handler it's okay to unwrap the result and to get
/// a meaningful panic (that is basically an assertion).
///
/// # Panics
///
/// Panics when `add_header` is called in the wrong state.
pub fn add_header<V: AsRef<[u8]>>(&mut self, name: &str, value: V)
-> Result<(), HeaderError>
{
if name.eq_ignore_ascii_case("Connection") && is_close(value.as_ref())
{
self.close_signal.store(true, Ordering::SeqCst);
}
self.message.add_header(&mut self.buf.out_buf, name, value.as_ref())
}
/// Same as `add_header` but allows value to be formatted directly into
/// the buffer
///
/// Useful for dates and numeric headers, as well as some strongly typed
/// wrappers
pub fn format_header<D: Display>(&mut self, name: &str, value: D)
-> Result<(), HeaderError>
{
if name.eq_ignore_ascii_case("Connection") {
unimplemented!();
}
self.message.format_header(&mut self.buf.out_buf, name, value)
}
/// Add a content length to the message.
///
/// The `Content-Length` header is written to the output buffer
/// immediately. It is checked that there are no other body length
/// headers present in the message. When the body is send the length is
/// validated.
///
/// # Panics
///
/// Panics when `add_length` is called in the wrong state.
pub fn add_length(&mut self, n: u64)
-> Result<(), HeaderError>
{
self.message.add_length(&mut self.buf.out_buf, n)
}
/// Sets the transfer encoding to chunked.
///
/// Writes `Transfer-Encoding: chunked` to the output buffer immediately.
/// It is assured that there is only one body length header is present
/// and the body is written in chunked encoding.
///
/// # Panics
///
/// Panics when `add_chunked` is called in the wrong state.
pub fn add_chunked(&mut self)
-> Result<(), HeaderError>
{
self.message.add_chunked(&mut self.buf.out_buf)
}
/// Closes the HTTP header
///
/// Similarly to `add_header()` it's fine to `unwrap()` here, unless you're
/// doing some proxying.
///
/// # Panics
///
/// Panics when the request is in a wrong state.
pub fn done_headers(&mut self) -> Result<(), HeaderError> {
self.message.done_headers(&mut self.buf.out_buf)
.map(|always_support_body| assert!(always_support_body))
}
/// Write a chunk of body
///
/// If `add_chunked` was specified before the data will be written as
/// a chunk (prefixed with length). Otherwise encoder will ensure that
/// data fits content-length
///
/// # Panics
///
/// Panics when data is larger than what was specified in `add_length` or
/// when no body is allowed in this kind of request.
pub fn write_body(&mut self, data: &[u8]) {
self.message.write_body(&mut self.buf.out_buf, data)
}
/// Finish writing request and return `EncoderDone` which can be moved to
///
/// # Panics
///
/// Panics when the request is in a wrong state.
pub fn done(mut self) -> EncoderDone<S> {
self.message.done(&mut self.buf.out_buf);
EncoderDone { buf: self.buf }
}
/// Flush the data to underlying socket
///
/// If the whole buffer could not be flushed it schedules a wakeup of
/// the current task when the the socket is writable.
///
/// You can find out how many bytes are left using `bytes_buffered()`
/// method
pub fn flush(&mut self) -> Result<(), io::Error>
where S: AsyncWrite
{
self.buf.flush()
}
/// Returns bytes currently lying in the buffer
///
/// It's possible that these bytes are left from the previous request if
/// pipelining is enabled.
pub fn bytes_buffered(&mut self) -> usize {
self.buf.out_buf.len()
}
/// Returns future which yield encoder back when buffer is flushed
///
/// More specifically when `butes_buffered()` < `watermark`
pub fn wait_flush(self, watermark: usize) -> WaitFlush<S> {
WaitFlush(Some(self), watermark)
}
}
impl<S: AsyncWrite> Future for WaitFlush<S> {
type Item = Encoder<S>;
type Error = io::Error;
fn poll(&mut self) -> Result<Async<Encoder<S>>, io::Error> {
let bytes_left = {
let enc = self.0.as_mut().expect("future is polled twice");
enc.flush()?;
enc.buf.out_buf.len()
};
if bytes_left < self.1 {
Ok(Async::Ready(self.0.take().unwrap()))
} else {
Ok(Async::NotReady)
}
}
}
pub fn new<S>(io: WriteBuf<S>,
state: Arc<AtomicUsize>, close_signal: Arc<AtomicBool>)
-> Encoder<S>
{
Encoder {
message: MessageState::RequestStart,
buf: io,
state: state,
close_signal: close_signal,
}
}
impl<S> io::Write for Encoder<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// TODO(tailhook) we might want to propatage error correctly
// rather than panic
self.write_body(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/parser.rs | src/client/parser.rs | use std::sync::Arc;
use std::borrow::Cow;
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
use std::str::from_utf8;
#[allow(unused_imports)]
use std::ascii::AsciiExt;
use futures::{Future, Async, Poll};
use httparse;
use tk_bufstream::{ReadBuf, Buf};
use tokio_io::AsyncRead;
use enums::Version;
use client::client::{BodyKind};
use client::errors::ErrorEnum;
use client::recv_mode::Mode;
use headers;
use chunked;
use body_parser::BodyProgress;
use client::encoder::RequestState;
use client::{Codec, Error, Head};
/// Number of headers to allocate on a stack
const MIN_HEADERS: usize = 16;
/// A hard limit on the number of headers
const MAX_HEADERS: usize = 1024;
#[derive(Debug, Clone)]
enum State {
Headers {
request_state: Arc<AtomicUsize>,
close_signal: Arc<AtomicBool>,
},
Body {
mode: Mode,
progress: BodyProgress,
},
}
pub struct Parser<S, C: Codec<S>> {
io: Option<ReadBuf<S>>,
codec: C,
close: bool,
state: State,
}
fn scan_headers<'x>(is_head: bool, code: u16, headers: &'x [httparse::Header])
-> Result<(BodyKind, Option<Cow<'x, str>>, bool), ErrorEnum>
{
/// Implements the body length algorithm for requests:
/// http://httpwg.github.io/specs/rfc7230.html#message.body.length
///
/// Algorithm:
///
/// 1. For HEAD, 1xx, 204, 304 -- no body
/// 2. If last transfer encoding is chunked -> Chunked
/// 3. If Content-Length -> Fixed
/// 4. Else Eof
use client::client::BodyKind::*;
use client::errors::ErrorEnum::ConnectionInvalid;
let mut has_content_length = false;
let mut connection = None::<Cow<_>>;
let mut close = false;
if is_head || (code > 100 && code < 200) || code == 204 || code == 304 {
for header in headers.iter() {
// TODO(tailhook) check for transfer encoding and content-length
if header.name.eq_ignore_ascii_case("Connection") {
let strconn = from_utf8(header.value)
.map_err(|_| ConnectionInvalid)?.trim();
connection = match connection {
Some(x) => Some(x + ", " + strconn),
None => Some(strconn.into()),
};
if header.value.split(|&x| x == b',').any(headers::is_close) {
close = true;
}
}
}
return Ok((Fixed(0), connection, close))
}
let mut result = BodyKind::Eof;
for header in headers.iter() {
if header.name.eq_ignore_ascii_case("Transfer-Encoding") {
if let Some(enc) = header.value.split(|&x| x == b',').last() {
if headers::is_chunked(enc) {
if has_content_length {
// override but don't allow keep-alive
close = true;
}
result = Chunked;
}
}
} else if header.name.eq_ignore_ascii_case("Content-Length") {
if has_content_length {
// duplicate content_length
return Err(ErrorEnum::DuplicateContentLength);
}
has_content_length = true;
if result != Chunked {
let s = from_utf8(header.value)
.map_err(|_| ErrorEnum::BadContentLength)?;
let len = s.parse()
.map_err(|_| ErrorEnum::BadContentLength)?;
result = Fixed(len);
} else {
// tralsfer-encoding has preference and don't allow keep-alive
close = true;
}
} else if header.name.eq_ignore_ascii_case("Connection") {
let strconn = from_utf8(header.value)
.map_err(|_| ConnectionInvalid)?.trim();
connection = match connection {
Some(x) => Some(x + ", " + strconn),
None => Some(strconn.into()),
};
if header.value.split(|&x| x == b',').any(headers::is_close) {
close = true;
}
}
}
Ok((result, connection, close))
}
fn new_body(mode: BodyKind, recv_mode: Mode)
-> Result<BodyProgress, ErrorEnum>
{
use super::client::BodyKind as B;
use super::recv_mode::Mode as M;
use client::errors::ErrorEnum::*;
use body_parser::BodyProgress as P;
match (mode, recv_mode) {
// TODO(tailhook) check size < usize
(B::Fixed(x), M::Buffered(b)) if x > b as u64 => {
Err(ResponseBodyTooLong)
}
(B::Fixed(x), _) => Ok(P::Fixed(x as usize)),
(B::Chunked, _) => Ok(P::Chunked(chunked::State::new())),
(B::Eof, _) => Ok(P::Eof),
}
}
fn parse_headers<S, C: Codec<S>>(
buffer: &mut Buf, codec: &mut C, is_head: bool)
-> Result<Option<(State, bool)>, Error>
{
let (mode, body, close, bytes) = {
let mut vec;
let mut headers = [httparse::EMPTY_HEADER; MIN_HEADERS];
let (ver, code, reason, headers, bytes) = {
let mut raw = httparse::Response::new(&mut headers);
let mut result = raw.parse(&buffer[..]);
if matches!(result, Err(httparse::Error::TooManyHeaders)) {
vec = vec![httparse::EMPTY_HEADER; MAX_HEADERS];
raw = httparse::Response::new(&mut vec);
result = raw.parse(&buffer[..]);
}
match result.map_err(ErrorEnum::Header)? {
httparse::Status::Complete(bytes) => {
let ver = raw.version.unwrap();
let code = raw.code.unwrap();
(ver, code, raw.reason.unwrap(), raw.headers, bytes)
}
_ => return Ok(None),
}
};
let (body, conn, close) = try!(scan_headers(is_head, code, &headers));
let head = Head {
version: if ver == 1
{ Version::Http11 } else { Version::Http10 },
code: code,
reason: reason,
headers: headers,
body_kind: body,
connection_header: conn,
// For HTTP/1.0 we could implement Connection: Keep-Alive
// but hopefully it's rare enough to ignore nowadays
connection_close: close || ver == 0,
};
let mode = codec.headers_received(&head)?;
(mode, body, close, bytes)
};
buffer.consume(bytes);
Ok(Some((
State::Body {
mode: mode.mode,
progress: new_body(body, mode.mode)?,
},
close,
)))
}
impl<S, C: Codec<S>> Parser<S, C> {
pub fn new(io: ReadBuf<S>, codec: C,
request_state: Arc<AtomicUsize>, close_signal: Arc<AtomicBool>)
-> Parser<S, C>
{
Parser {
io: Some(io),
codec: codec,
close: false,
state: State::Headers {
request_state: request_state,
close_signal: close_signal,
},
}
}
fn read_and_parse(&mut self) -> Poll<(), Error>
where S: AsyncRead
{
use self::State::*;
use client::recv_mode::Mode::*;
let mut io = self.io.as_mut().expect("buffer is still here");
self.state = if let Headers {
ref request_state,
ref close_signal,
} = self.state
{
let state;
loop {
if io.read().map_err(ErrorEnum::Io)? == 0 {
if io.done() {
return Err(ErrorEnum::ResetOnResponseHeaders.into());
} else {
return Ok(Async::NotReady);
}
}
let reqs = request_state.load(Ordering::SeqCst);
if reqs == RequestState::Empty as usize {
return Err(ErrorEnum::PrematureResponseHeaders.into());
}
let is_head = reqs == RequestState::StartedHead as usize;
match parse_headers(&mut io.in_buf, &mut self.codec, is_head)? {
None => continue,
Some((body, close)) => {
if close {
close_signal.store(true, Ordering::SeqCst);
self.close = true;
}
state = body;
break
},
}
};
state
} else {
// TODO(tailhook) optimize this
self.state.clone()
};
loop {
match self.state {
Headers {..} => unreachable!(),
Body { ref mode, ref mut progress } => {
progress.parse(&mut io).map_err(ErrorEnum::ChunkSize)?;
let (bytes, done) = progress.check_buf(&io);
let operation = if done {
Some(self.codec.data_received(
&io.in_buf[..bytes], true)?)
} else if io.done() {
// If it's ReadUntilEof it will be detected in
// check_buf so we can safefully put error here
return Err(ErrorEnum::ResetOnResponseBody.into());
} else if matches!(*mode, Progressive(x) if x <= bytes) {
Some(self.codec.data_received(
&io.in_buf[..bytes], false)?)
} else {
None
};
match operation {
Some(Async::Ready(consumed)) => {
progress.consume(&mut io, consumed);
if done && consumed == bytes {
return Ok(Async::Ready(()));
}
}
Some(Async::NotReady) => {
if matches!(*mode, Progressive(x) if x > bytes) {
return Ok(Async::NotReady);
}
}
None => {} // Read more
}
}
}
if io.read().map_err(ErrorEnum::Io)? == 0 {
if io.done() {
continue;
} else {
return Ok(Async::NotReady);
}
}
}
}
}
impl<S: AsyncRead, C: Codec<S>> Future for Parser<S, C> {
type Item = Option<ReadBuf<S>>;
type Error = Error;
/// Returns None if response contains `Connection: close`
fn poll(&mut self) -> Poll<Option<ReadBuf<S>>, Error> {
match self.read_and_parse()? {
Async::Ready(()) => {
let io = self.io.take().expect("buffer still here");
if self.close {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(io)))
}
}
Async::NotReady => Ok(Async::NotReady),
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/client.rs | src/client/client.rs | use futures::sink::Sink;
use futures::future::FutureResult;
use futures::{Async, AsyncSink, Future, IntoFuture};
use client::{Error, Encoder, EncoderDone, Head, RecvMode};
use client::errors::ErrorEnum;
use client::buffered;
#[derive(Debug, Copy, Clone, PartialEq, Eq)]
pub enum BodyKind {
Fixed(u64),
Chunked,
Eof,
}
/// This is a low-level interface to the http client
///
/// Your requests starts by sending a codec into a connection Sink or a
/// connection pool. And then it's driven by a callbacks here.
///
/// If you don't have any special needs you might want to use
/// `client::buffered::Buffered` codec implementation instead of implemeting
/// this trait manually.
pub trait Codec<S> {
/// Future that `start_write()` returns
type Future: Future<Item=EncoderDone<S>, Error=Error>;
/// Start writing a request
///
/// This method is called when there is an open connection and there
/// is some space in the output buffer.
///
/// Everything you write into a buffer might be flushed to the network
/// immediately (or as fast as you yield to main loop). On the other
/// hand we might buffer/pipeline multiple requests at once.
fn start_write(&mut self, e: Encoder<S>)
-> Self::Future;
/// Received headers of a response
///
/// At this point we already extracted all the headers and other data
/// that we need to ensure correctness of the protocol. If you need
/// to handle some data from the headers you need to store them somewhere
/// (for example on `self`) for further processing.
///
/// Note: headers might be received after `request_line` is written, but
/// we don't ensure that request is fully written. You should write the
/// state machine as if request and response might be streamed a the
/// same time (including request headers (!) if your `start_write` future
/// writes them incrementally)
fn headers_received(&mut self, headers: &Head) -> Result<RecvMode, Error>;
/// Chunk of the response body received
///
/// `end` equals to `true` for the last chunk of the data.
///
/// Method returns `Async::Ready(x)` to denote that it has consumed `x`
/// bytes. If there are some bytes left in the buffer they will be passed
/// again on the call.
///
/// If the response is empty, or last chunk arrives later and it's empty
/// we call `c.data_received(b"", true)` on every wakeup,
/// until `Async::Ready(0)` is returned (this helps to drive future that
/// might complete on request completion without spawning another ones,
/// but note that next request can't start reading in the meantime).
///
/// Protocol panics if returned number of bytes larger than `data.len()`.
///
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>;
}
impl<S, F> Codec<S> for Box<Codec<S, Future=F>>
where F: Future<Item=EncoderDone<S>, Error=Error>
{
type Future = F;
fn start_write(&mut self, e: Encoder<S>) -> F {
(**self).start_write(e)
}
fn headers_received(&mut self, headers: &Head) -> Result<RecvMode, Error> {
(**self).headers_received(headers)
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
(**self).data_received(data, end)
}
}
impl<S, F> Codec<S> for Box<Codec<S, Future=F>+Send>
where F: Future<Item=EncoderDone<S>, Error=Error>
{
type Future = F;
fn start_write(&mut self, e: Encoder<S>) -> F {
(**self).start_write(e)
}
fn headers_received(&mut self, headers: &Head) -> Result<RecvMode, Error> {
(**self).headers_received(headers)
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
(**self).data_received(data, end)
}
}
/// A marker trait that applies to a Sink that is essentially a HTTP client
///
/// It may apply to a single connection or a connection pool. For a single
/// connection the `client::Proto` implements this interface.
///
/// We expect a boxed codec here because we assume that different kinds of
/// requests may be executed though same connection pool. If you want to avoid
/// boxing or have fine grained control, use `Proto` (which is a `Sink`)
/// directly.
///
pub trait Client<S, F>: Sink<SinkItem=Box<Codec<S, Future=F>>>
where F: Future<Item=EncoderDone<S>, Error=Error>,
{
/// Simple fetch helper
fn fetch_url(&mut self, url: &str)
-> Box<Future<Item=buffered::Response, Error=Error>>
where <Self as Sink>::SinkError: Into<Error>;
}
impl<T, S> Client<S, FutureResult<EncoderDone<S>, Error>> for T
where T: Sink<SinkItem=Box<
Codec<S, Future=FutureResult<EncoderDone<S>, Error>>
>>,
{
fn fetch_url(&mut self, url: &str)
-> Box<Future<Item=buffered::Response, Error=Error>>
where <Self as Sink>::SinkError: Into<Error>
{
let url = match url.parse() {
Ok(u) => u,
Err(_) => {
return Box::new(Err(ErrorEnum::InvalidUrl.into())
.into_future());
}
};
let (codec, receiver) = buffered::Buffered::get(url);
match self.start_send(Box::new(codec)) {
Ok(AsyncSink::NotReady(_)) => {
Box::new(Err(ErrorEnum::Busy.into()).into_future())
}
Ok(AsyncSink::Ready) => {
Box::new(receiver
.map_err(|_| ErrorEnum::Canceled.into())
.and_then(|res| res))
}
Err(e) => {
Box::new(Err(e.into()).into_future())
}
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/buffered.rs | src/client/buffered.rs | //! Simple to use wrappers for dealing with fully buffered requests
//!
//! By "fully buffered" I mean two things:
//!
//! * No request or response streaming
//! * All headers and body are allocated on the heap
//!
//! Raw interface allows more granular control to make things more efficient,
//! but requires more boilerplate. You can mix and match different
//! styles on single HTTP connection.
//!
use url::Url;
use futures::Async;
use futures::future::{FutureResult, ok};
use futures::sync::oneshot::{channel, Sender, Receiver};
use enums::Status;
use enums::Version;
use client::{Error, Codec, Encoder, EncoderDone, Head, RecvMode};
use client::errors::ErrorEnum;
/// Fully buffered (in-memory) writing request and reading response
///
/// This coded should be used when you don't have any special needs
pub struct Buffered {
method: &'static str,
url: Url,
sender: Option<Sender<Result<Response, Error>>>,
response: Option<Response>,
max_response_length: usize,
}
#[derive(Debug)]
/// A buffered response holds contains a body as contiguous chunk of data
pub struct Response {
status: Status,
headers: Vec<(String, Vec<u8>)>,
body: Vec<u8>,
}
impl Response {
/// Get response status
pub fn status(&self) -> Status {
self.status
}
/// Get response headers
pub fn headers(&self) -> &[(String, Vec<u8>)] {
&self.headers
}
/// Get response body
pub fn body(&self) -> &[u8] {
&self.body
}
}
impl<S> Codec<S> for Buffered {
type Future = FutureResult<EncoderDone<S>, Error>;
fn start_write(&mut self, mut e: Encoder<S>) -> Self::Future {
e.request_line(self.method, self.url.path(), Version::Http11);
self.url.host_str().map(|x| {
e.add_header("Host", x).unwrap();
});
e.done_headers().unwrap();
ok(e.done())
}
fn headers_received(&mut self, headers: &Head) -> Result<RecvMode, Error> {
let status = headers.status()
.ok_or(ErrorEnum::InvalidStatus)?;
self.response = Some(Response {
status: status,
headers: headers.headers().map(|(k, v)| {
(k.to_string(), v.to_vec())
}).collect(),
body: Vec::new(),
});
Ok(RecvMode::buffered(self.max_response_length))
}
fn data_received(&mut self, data: &[u8], end: bool)
-> Result<Async<usize>, Error>
{
assert!(end);
let mut response = self.response.take().unwrap();
response.body = data.to_vec();
self.sender.take().unwrap().send(Ok(response))
.map_err(|_| debug!("Unused HTTP response")).ok();
Ok(Async::Ready(data.len()))
}
}
impl Buffered {
/// Fetch data from url using GET method, fully buffered
pub fn get(url: Url) -> (Buffered, Receiver<Result<Response, Error>>) {
let (tx, rx) = channel();
(Buffered {
method: "GET",
url: url,
sender: Some(tx),
max_response_length: 10_485_760,
response: None,
},
rx)
}
/// Set max response length for this buffered reader
pub fn max_response_length(&mut self, value: usize) {
self.max_response_length = value;
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/mod.rs | src/client/mod.rs | //! The HTTP/1.x client protocol implementation
//!
mod client;
mod config;
mod encoder;
mod errors;
mod head;
mod parser;
mod proto;
mod recv_mode;
pub mod buffered;
pub use self::errors::Error;
pub use self::client::{Client, Codec};
pub use self::encoder::{Encoder, EncoderDone, WaitFlush};
pub use self::proto::{Proto};
use std::borrow::Cow;
use std::time::Duration;
use httparse::Header;
use self::client::BodyKind;
use {Version};
/// Fine-grained configuration of the HTTP connection
#[derive(Debug, Clone)]
pub struct Config {
inflight_request_limit: usize,
inflight_request_prealloc: usize,
keep_alive_timeout: Duration,
safe_pipeline_timeout: Duration,
max_request_timeout: Duration,
}
/// A borrowed structure that represents response headers
///
/// It's passed to `Codec::headers_received` and you are free to store or
/// discard any needed fields and headers from it.
///
#[derive(Debug)]
pub struct Head<'a> {
version: Version,
code: u16,
reason: &'a str,
headers: &'a [Header<'a>],
body_kind: BodyKind,
connection_header: Option<Cow<'a, str>>,
connection_close: bool,
}
/// This type is returned from `headers_received` handler of either
/// client client or server protocol handler
///
/// The marker is used to denote whether you want to have the whole request
/// buffered for you or read chunk by chunk.
///
/// The `Progressive` (chunk by chunk) mode is mostly useful for proxy servers.
/// Or it may be useful if your handler is able to parse data without holding
/// everything in the memory.
///
/// Otherwise, it's best to use `Buffered` mode (for example, comparing with
/// using your own buffering). We do our best to optimize it for you.
#[derive(Debug, Clone)]
pub struct RecvMode {
mode: recv_mode::Mode,
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/head.rs | src/client/head.rs | #[allow(unused_imports)]
use std::ascii::AsciiExt;
use std::slice::Iter as SliceIter;
use httparse::Header;
use enums::{Status};
use client::Head;
/// Iterator over all meaningful headers for the response
///
/// This iterator is created by `Head::headers`. And iterates over all
/// headers except hop-by-hop ones.
///
/// Note: duplicate headers are not glued together neither they are sorted
pub struct HeaderIter<'a> {
head: &'a Head<'a>,
iter: SliceIter<'a, Header<'a>>,
}
impl<'a> Head<'a> {
/// Returns status if it is one of the supported statuses otherwise None
///
/// Note: this method does not consider "reason" string at all just
/// status code. Which is fine as specification states.
pub fn status(&self) -> Option<Status> {
Status::from(self.code)
}
/// Returns raw status code and reason as received even
///
/// This returns something even if `status()` returned `None`.
///
/// Note: the reason string may not match the status code or may even be
/// an empty string.
pub fn raw_status(&self) -> (u16, &'a str) {
(self.code, self.reason)
}
/// Iterator over the headers of HTTP request
///
/// This iterator strips the following kinds of headers:
///
/// 1. Hop-by-hop headers (`Connection` itself, and ones it enumerates)
/// 2. `Content-Length` and `Transfer-Encoding`
///
/// You may use `all_headers()` if you really need to access to all of
/// them (mostly useful for debugging puproses). But you may want to
/// consider:
///
/// 1. Payload size can be fetched using `body_length()` method. Note:
/// this also includes cases where length is implicitly set to zero.
/// 2. `Connection` header might be discovered with `connection_close()`
/// or `connection_header()`
pub fn headers(&self) -> HeaderIter {
HeaderIter {
head: self,
iter: self.headers.iter(),
}
}
/// All headers of HTTP request
///
/// Unlike `self.headers()` this does include hop-by-hop headers. This
/// method is here just for completeness, you shouldn't need it.
pub fn all_headers(&self) -> &'a [Header<'a>] {
self.headers
}
}
impl<'a> Iterator for HeaderIter<'a> {
type Item = (&'a str, &'a [u8]);
fn next(&mut self) -> Option<(&'a str, &'a [u8])> {
while let Some(header) = self.iter.next() {
if header.name.eq_ignore_ascii_case("Connection") ||
header.name.eq_ignore_ascii_case("Transfer-Encoding") ||
header.name.eq_ignore_ascii_case("Content-Length")
{
continue;
}
if let Some(ref conn) = self.head.connection_header {
let mut conn_headers = conn.split(',').map(|x| x.trim());
if conn_headers.any(|x| x.eq_ignore_ascii_case(header.name)) {
continue;
}
}
return Some((header.name, header.value));
}
return None;
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/recv_mode.rs | src/client/recv_mode.rs | use client::RecvMode;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum Mode {
Buffered(usize),
Progressive(usize),
}
impl RecvMode {
/// Download whole message body (request or response) into the memory.
///
/// The argument is maximum size of the body. The Buffered variant
/// works equally well for Chunked encoding and for read-util-end-of-stream
/// mode of HTTP/1.0, so sometimes you can't know the size of the request
/// in advance. Note this is just an upper limit it's neither buffer size
/// nor *minimum* size of the body.
///
/// Note the buffer size is asserted on if it's bigger than max buffer size
pub fn buffered(maximum_size_of_body: usize) -> RecvMode {
RecvMode {
mode: Mode::Buffered(maximum_size_of_body),
}
}
/// Fetch data chunk-by-chunk.
///
/// The parameter denotes minimum number of bytes that may be passed
/// to the protocol handler. This is for performance tuning (i.e. less
/// wake-ups of protocol parser). But it's not an input buffer size. The
/// use of `Progressive(1)` is perfectly okay (for example if you use http
/// request body as a persistent connection for sending multiple messages
/// on-demand)
pub fn progressive(min_bytes_hint: usize) -> RecvMode {
RecvMode {
mode: Mode::Progressive(min_bytes_hint),
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/client/proto.rs | src/client/proto.rs | use std::collections::VecDeque;
use std::cmp::max;
use std::mem;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::atomic::{AtomicUsize, AtomicBool, Ordering};
use std::time::Instant;
use tk_bufstream::{IoBuf, WriteBuf, ReadBuf};
use tokio_core::net::TcpStream;
use tokio_core::reactor::{Handle, Timeout};
use tokio_io::{AsyncRead, AsyncWrite};
use futures::{Future, AsyncSink, Async, Sink, StartSend, Poll};
use client::parser::Parser;
use client::encoder::{self, get_inner};
use client::errors::ErrorEnum;
use client::{Codec, Error, Config};
enum OutState<S, F> {
Idle(WriteBuf<S>, Instant),
Write(F, Instant),
Void,
}
enum InState<S, C: Codec<S>> {
Idle(ReadBuf<S>, Instant),
Read(Parser<S, C>, Instant),
Void,
}
struct Waiting<C> {
codec: C,
state: Arc<AtomicUsize>, // TODO(tailhook) AtomicU8
queued_at: Instant,
}
pub struct PureProto<S, C: Codec<S>> {
writing: OutState<S, C::Future>,
waiting: VecDeque<Waiting<C>>,
reading: InState<S, C>,
close: Arc<AtomicBool>,
config: Arc<Config>,
}
/// A low-level HTTP/1.x client protocol handler
///
/// Note, most of the time you need some reconnection facility and/or
/// connection pooling on top of this interface
pub struct Proto<S, C: Codec<S>> {
proto: PureProto<S, C>,
handle: Handle,
timeout: Timeout,
}
impl<S, C: Codec<S>> Proto<S, C> {
/// Create a new protocol implementation from a TCP connection and a config
///
/// You should use this protocol as a `Sink`
pub fn new(conn: S, handle: &Handle, cfg: &Arc<Config>) -> Proto<S, C>
where S: AsyncRead + AsyncWrite
{
let (cout, cin) = IoBuf::new(conn).split();
Proto {
proto: PureProto {
writing: OutState::Idle(cout, Instant::now()),
waiting: VecDeque::with_capacity(
cfg.inflight_request_prealloc),
reading: InState::Idle(cin, Instant::now()),
close: Arc::new(AtomicBool::new(false)),
config: cfg.clone(),
},
handle: handle.clone(),
timeout: Timeout::new(cfg.keep_alive_timeout, &handle)
.expect("can always create a timeout"),
}
}
}
impl<C: Codec<TcpStream>> Proto<TcpStream, C> {
/// A convenience method to establish connection and create a protocol
/// instance
pub fn connect_tcp(addr: SocketAddr, cfg: &Arc<Config>, handle: &Handle)
-> Box<Future<Item=Self, Error=Error>>
{
let cfg = cfg.clone();
let handle = handle.clone();
Box::new(
TcpStream::connect(&addr, &handle)
.map(move |c| Proto::new(c, &handle, &cfg))
.map_err(ErrorEnum::Io).map_err(Error::from))
as Box<Future<Item=_, Error=_>>
}
}
impl<S: AsyncRead + AsyncWrite, C: Codec<S>> PureProto<S, C> {
fn poll_writing(&mut self) -> Result<bool, Error> {
let mut progress = false;
self.writing = match mem::replace(&mut self.writing, OutState::Void) {
OutState::Idle(mut io, time) => {
io.flush().map_err(ErrorEnum::Io)?;
if time.elapsed() > self.config.keep_alive_timeout &&
self.waiting.len() == 0 &&
matches!(self.reading, InState::Idle(..))
{
return Err(ErrorEnum::KeepAliveTimeout.into());
}
OutState::Idle(io, time)
}
// Note we break connection if serializer errored, because
// we don't actually know if connection can be reused
// safefully in this case
OutState::Write(mut fut, start) => match fut.poll()? {
Async::Ready(done) => {
let mut io = get_inner(done);
io.flush().map_err(ErrorEnum::Io)?;
progress = true;
OutState::Idle(io, Instant::now())
}
Async::NotReady => OutState::Write(fut, start),
},
OutState::Void => unreachable!(),
};
return Ok(progress);
}
fn poll_reading(&mut self) -> Result<bool, Error> {
let (state, progress) =
match mem::replace(&mut self.reading, InState::Void) {
InState::Idle(mut io, time) => {
if let Some(w) = self.waiting.pop_front() {
let Waiting { codec: nr, state, queued_at } = w;
let parser = Parser::new(io, nr,
state, self.close.clone());
(InState::Read(parser, queued_at), true)
} else {
// This serves for two purposes:
// 1. Detect connection has been closed (i.e.
// we need to call `poll_read()` every time)
// 2. Detect premature bytes (we didn't sent
// a request yet, but there is a response)
if io.read().map_err(ErrorEnum::Io)? != 0 {
return Err(
ErrorEnum::PrematureResponseHeaders.into());
}
if io.done() {
return Err(ErrorEnum::Closed.into());
}
(InState::Idle(io, time), false)
}
}
InState::Read(mut parser, time) => {
match parser.poll()? {
Async::NotReady => {
(InState::Read(parser, time), false)
}
Async::Ready(Some(io)) => {
// after request is done, rearm keep-alive
// timeout
match self.writing {
OutState::Idle(_, ref mut time) => {
*time = Instant::now();
}
_ => {}
}
(InState::Idle(io, Instant::now()), true)
}
Async::Ready(None) => {
return Err(ErrorEnum::Closed.into());
}
}
}
InState::Void => unreachable!(),
};
self.reading = state;
Ok(progress)
}
}
impl<S: AsyncRead + AsyncWrite, C: Codec<S>> Sink for Proto<S, C> {
type SinkItem = C;
type SinkError = Error;
fn start_send(&mut self, mut item: Self::SinkItem)
-> StartSend<Self::SinkItem, Self::SinkError>
{
let old_timeout = self.proto.get_timeout();
let res = loop {
item = match self.proto.start_send(item)? {
AsyncSink::Ready => break AsyncSink::Ready,
AsyncSink::NotReady(item) => item,
};
let wr = self.proto.poll_writing()?;
let rd = self.proto.poll_reading()?;
if !wr && !rd {
break AsyncSink::NotReady(item);
}
};
let new_timeout = self.proto.get_timeout();
let now = Instant::now();
if new_timeout < now {
return Err(ErrorEnum::RequestTimeout.into());
}
if old_timeout != new_timeout {
self.timeout = Timeout::new(new_timeout - now, &self.handle)
.expect("can always add a timeout");
let timeo = self.timeout.poll()
.expect("timeout can't fail on poll");
match timeo {
// it shouldn't be keep-alive timeout, but have to check
Async::Ready(()) => {
match res {
// don't discard request
AsyncSink::NotReady(..) => {}
// can return error (can it happen?)
// TODO(tailhook) it's strange that this can happen
AsyncSink::Ready => {
return Err(ErrorEnum::RequestTimeout.into());
}
}
}
Async::NotReady => {}
}
}
Ok(res)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
let old_timeout = self.proto.get_timeout();
let res = self.proto.poll_complete()?;
let new_timeout = self.proto.get_timeout();
let now = Instant::now();
if new_timeout < now {
return Err(ErrorEnum::RequestTimeout.into());
}
if old_timeout != new_timeout {
self.timeout = Timeout::new(new_timeout - now, &self.handle)
.expect("can always add a timeout");
let timeo = self.timeout.poll()
.expect("timeout can't fail on poll");
match timeo {
// it shouldn't be keep-alive timeout, but have to check
Async::Ready(()) => {
return Err(ErrorEnum::RequestTimeout.into());
}
Async::NotReady => {},
}
}
Ok(res)
}
}
impl<S, C: Codec<S>> PureProto<S, C> {
fn get_timeout(&self) -> Instant {
match self.writing {
OutState::Idle(_, time) => {
if self.waiting.len() == 0 {
match self.reading {
InState::Idle(.., rtime) => {
return max(time, rtime) +
self.config.keep_alive_timeout;
}
InState::Read(_, time) => {
return time + self.config.max_request_timeout;
}
InState::Void => unreachable!(),
}
} else {
let req = self.waiting.get(0).unwrap();
return req.queued_at + self.config.max_request_timeout;
}
}
OutState::Write(_, time) => {
return time + self.config.max_request_timeout;
}
OutState::Void => unreachable!(),
}
}
}
impl<S: AsyncRead + AsyncWrite, C: Codec<S>> Sink for PureProto<S, C> {
type SinkItem = C;
type SinkError = Error;
fn start_send(&mut self, mut item: Self::SinkItem)
-> StartSend<Self::SinkItem, Self::SinkError>
{
if self.waiting.len() > 0 {
if self.waiting.len() > self.config.inflight_request_limit {
// Return right away if limit reached
// (but limit is checked later for inflight request again)
return Ok(AsyncSink::NotReady(item));
}
let last = self.waiting.get(0).unwrap();
if last.queued_at.elapsed() > self.config.safe_pipeline_timeout {
// Return right away if request is being waited for too long
// (but limit is checked later for inflight request again)
return Ok(AsyncSink::NotReady(item));
}
}
if matches!(self.reading, InState::Read(_, time)
if time.elapsed() > self.config.safe_pipeline_timeout)
{
// Return right away if request is being waited for too long
return Ok(AsyncSink::NotReady(item));
}
let (r, st) = match mem::replace(&mut self.writing, OutState::Void) {
OutState::Idle(mut io, time) => {
if time.elapsed() > self.config.keep_alive_timeout &&
self.waiting.len() == 0 &&
matches!(self.reading, InState::Idle(..))
{
// Too dangerous to send request now
(AsyncSink::NotReady(item), OutState::Idle(io, time))
} else if self.close.load(Ordering::SeqCst) {
// TODO(tailhook) maybe shutdown?
io.flush().map_err(ErrorEnum::Io)?;
(AsyncSink::NotReady(item), OutState::Idle(io, time))
} else {
let mut limit = self.config.inflight_request_limit;
if matches!(self.reading, InState::Read(..)) {
limit -= 1;
}
if self.waiting.len() >= limit {
// Note: we recheck limit here, because inflight
// request ifluences the limit
(AsyncSink::NotReady(item), OutState::Idle(io, time))
} else {
let state = Arc::new(AtomicUsize::new(0));
let e = encoder::new(io,
state.clone(), self.close.clone());
let fut = item.start_write(e);
self.waiting.push_back(Waiting {
codec: item,
state: state,
queued_at: Instant::now(),
});
(AsyncSink::Ready,
OutState::Write(fut, Instant::now()))
}
}
}
OutState::Write(fut, start) => {
// TODO(tailhook) should we check "close"?
// Points:
// * Performance
// * Dropping future
(AsyncSink::NotReady(item), OutState::Write(fut, start))
}
OutState::Void => unreachable!(),
};
self.writing = st;
return Ok(r);
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
loop {
let wr = self.poll_writing()?;
let rd = self.poll_reading()?;
if !wr && !rd {
break;
}
}
// Basically we return Ready when there are no in-flight requests,
// which means we can shutdown connection safefully.
if self.waiting.len() == 0 &&
matches!(self.writing, OutState::Idle(..)) &&
matches!(self.reading, InState::Idle(..))
{
return Ok(Async::Ready(()));
} else {
return Ok(Async::NotReady);
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/enums/version.rs | src/enums/version.rs | use std::fmt;
/// Enum reprsenting HTTP version.
#[derive(Debug, Clone, PartialEq, Eq, Copy)]
pub enum Version {
/// Version 1.0 of the HTTP protocol
Http10,
/// Version 1.1 of the HTTP protocol
Http11,
}
impl fmt::Display for Version {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Version::Http10 => f.write_str("HTTP/1.0"),
Version::Http11 => f.write_str("HTTP/1.1"),
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/enums/status.rs | src/enums/status.rs | //! Http status codes helpers
//!
/// Enum with some HTTP Status codes.
#[derive(Debug, PartialEq, Copy, Clone)]
#[allow(missing_docs)]
pub enum Status {
// 1xx status codes
Continue, // 100
SwitchingProtocol, // 101
// 2xx status codes
Ok, // 200
Created, // 201
Accepted, // 202
NonAuthoritativeInformation, // 203
NoContent, // 204
ResetContent, // 205
PartialContent, // 206
// 3xx status codes
MultipleChoices, // 300
MovedPermanently, // 301
Found, // 302
SeeOther, // 303
NotModified, // 304
UseProxy, // 305
TemporaryRedirect, // 307
PermanentRedirect, // 308
// 4xx status codes
BadRequest, // 400
Unauthorized, // 401
PaymentRequired, // 402
Forbidden, // 403
NotFound, // 404
MethodNotAllowed, // 405
NotAcceptable, // 406
ProxyAuthenticationRequired, // 407
RequestTimeout, // 408
Conflict, // 409
Gone, // 410
LengthRequired, // 411
PreconditionFailed, // 412
RequestEntityTooLarge, // 413
RequestURITooLong, // 414
UnsupportedMediaType, // 415
RequestRangeNotSatisfiable, // 416
ExpectationFailed, // 417
UpgradeRequired, // 426
TooManyRequests, // 429
// 5xx status codes
InternalServerError, // 500
NotImplemented, // 501
BadGateway, // 502
ServiceUnavailable, // 503
GatewayTimeout, // 504
VersionNotSupported, // 505
}
/// Returns reason for specified status code.
impl Status {
/// Returns 3 digit numeric code
pub fn code(&self) -> u16 {
match *self {
// 1xx Status codes
Status::Continue => 100,
Status::SwitchingProtocol => 101,
// 2xx status codes
Status::Ok => 200,
Status::Created => 201,
Status::Accepted => 202,
Status::NonAuthoritativeInformation => 203,
Status::NoContent => 204,
Status::ResetContent => 205,
Status::PartialContent => 206,
// 3xx status codes
Status::MultipleChoices => 300,
Status::MovedPermanently => 301,
Status::Found => 302,
Status::SeeOther => 303,
Status::NotModified => 304,
Status::UseProxy => 305,
Status::TemporaryRedirect => 307,
Status::PermanentRedirect => 308,
// 4xx status codes
Status::BadRequest => 400,
Status::Unauthorized => 401,
Status::PaymentRequired => 402,
Status::Forbidden => 403,
Status::NotFound => 404,
Status::MethodNotAllowed => 405,
Status::NotAcceptable => 406,
Status::ProxyAuthenticationRequired => 407,
Status::RequestTimeout => 408,
Status::Conflict => 409,
Status::Gone => 410,
Status::LengthRequired => 411,
Status::PreconditionFailed => 412,
Status::RequestEntityTooLarge => 413,
Status::RequestURITooLong => 414,
Status::UnsupportedMediaType => 415,
Status::RequestRangeNotSatisfiable => 416,
Status::ExpectationFailed => 417,
Status::UpgradeRequired => 426,
Status::TooManyRequests => 429,
// 5xx status codes
Status::InternalServerError => 500,
Status::NotImplemented => 501,
Status::BadGateway => 502,
Status::ServiceUnavailable => 503,
Status::GatewayTimeout => 504,
Status::VersionNotSupported => 505,
}
}
/// Returns title for the status code
pub fn reason(&self) -> &'static str {
match self.code() {
// 1xx codes;
100 => "Continue",
101 => "Switching Protocol",
// 2xx codes
200 => "OK",
201 => "Created",
202 => "Accepted",
203 => "Non-Authoriative Information",
204 => "No Content",
205 => "Reset Content",
206 => "Partial Content",
// 3xx codes
300 => "Multiple Choice",
301 => "Moved Permanently",
302 => "Found",
303 => "See Other",
304 => "Not Modified",
305 => "Use Proxy",
307 => "Temporary Redirect",
308 => "Permanent Redirect",
// 4xx codes
400 => "Bad Request",
401 => "Unauthorized",
402 => "Payment Required",
403 => "Forbidden",
404 => "Not Found",
405 => "Method Not Allowed",
406 => "Not Acceptable",
407 => "Proxy Authentication Required",
408 => "Request Timeout",
409 => "Conflict",
410 => "Gone",
411 => "Length Required",
412 => "Precondition Failed",
413 => "Request Entity Too Large",
414 => "Request-URI Too Long",
415 => "Unsupported Media Type",
416 => "Request Range Not Satisfiable",
417 => "Expectation Failed",
426 => "Upgrade Required",
429 => "Too Many Requests",
// 5xx codes
500 => "Internal Server Error",
501 => "Not Implemented",
502 => "Bad Gateway",
503 => "Service Unavailable",
504 => "Gateway Timeout",
505 => "HTTP Version Not Supported",
// Custom code
_ => "Unknown",
}
}
/// Returns true if sending body is expected for such status code
pub fn response_has_body(&self) -> bool {
match self.code() {
100...199 | 204 | 304 => false,
_ => true,
}
}
/// Make Status from u16 if known code is passed.
pub fn from(code: u16) -> Option<Status> {
use self::Status::*;
let s = match code {
// 1xx
100 => Continue,
101 => SwitchingProtocol,
// 2xx
200 => Ok,
201 => Created,
202 => Accepted,
203 => NonAuthoritativeInformation,
204 => NoContent,
205 => ResetContent,
206 => PartialContent,
// 3xx
300 => MultipleChoices,
301 => MovedPermanently,
302 => Found,
303 => SeeOther,
304 => NotModified,
305 => UseProxy,
307 => TemporaryRedirect,
308 => PermanentRedirect,
// 4xx
400 => BadRequest,
401 => Unauthorized,
402 => PaymentRequired,
403 => Forbidden,
404 => NotFound,
405 => MethodNotAllowed,
406 => NotAcceptable,
407 => ProxyAuthenticationRequired,
408 => RequestTimeout,
409 => Conflict,
410 => Gone,
411 => LengthRequired,
412 => PreconditionFailed,
413 => RequestEntityTooLarge,
414 => RequestURITooLong,
415 => UnsupportedMediaType,
416 => RequestRangeNotSatisfiable,
417 => ExpectationFailed,
426 => UpgradeRequired,
429 => TooManyRequests,
// 5xx
500 => InternalServerError,
501 => NotImplemented,
502 => BadGateway,
503 => ServiceUnavailable,
504 => GatewayTimeout,
505 => VersionNotSupported,
_ => return None,
};
Some(s)
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/enums/mod.rs | src/enums/mod.rs | mod status;
mod version;
pub use self::status::*;
pub use self::version::*;
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/config.rs | src/websocket/config.rs | use std::time::Duration;
use std::sync::Arc;
use websocket::{Config};
impl Config {
/// Create a config with defaults
pub fn new() -> Config {
Config {
ping_interval: Duration::new(10, 0),
message_timeout: Duration::new(30, 0),
byte_timeout: Duration::new(30, 0),
max_packet_size: 10 << 20,
}
}
/// Set ping interval
///
/// Default is 10 seconds.
///
/// If no messages have been received within this interval, we send
/// a ping message. Only full messages are accounted. If some large
/// frame is being received for this long, we still send ping.
///
/// Note: you can't remove the interval, but you can set it to
/// a sufficiently large value.
///
/// Note 2: you may also need to tune inactivity timeout if you change
/// this value.
pub fn ping_interval(&mut self, dur: Duration) -> &mut Self {
self.ping_interval = dur;
self
}
/// Set inactivity timeout
///
/// Default is 25 seconds.
///
/// A connection is shut down if no messages were received during this
/// interval.
///
/// Note: only full frames are accounted. If some very large frame is
/// being sent too long, we drop the connection. So be sure to set this
/// value large enough so that slowest client can send largest frame and
/// another ping.
///
/// There are two use cases for this interval:
///
/// 1. Make it 2.5x the ping_interval to detect connections which
/// don't have destination host alive
///
/// 2. Inactivity interval that is smaller than `ping_interval` will
/// detect connections which are alive but do not send any messages.
/// This is similar to how HTTP servers shutdown inactive connections.
///
/// Note: you may also need to tune ping interval if you change
/// this value.
pub fn message_timeout(&mut self, dur: Duration) -> &mut Self {
self.message_timeout = dur;
self
}
/// Sets both message timeout and byte timeout to the same value
pub fn inactivity_timeout(&mut self, dur: Duration) -> &mut Self {
self.message_timeout = dur;
self.byte_timeout = dur;
self
}
/// Similar to message timeout but works at byte level
///
/// Being less strict timeout this value is two-way: any byte sent or
/// received resets the timer (Also, we do our best to ignore outgoing
/// pings)
///
/// There are two points to consider for tweaking timeout:
///
/// 1. To prevent resource exhaustion by a peer: sending a byte at a time,
/// you might make it higher, up to message timeout
/// 2. To be able to receive larger messages (say 1Mb or 10 Mb) you can
/// make message timeout much larger for largest message to fit, but
/// make byte timeout smaller so that if nothing it being received you
/// can close connection earlier.
///
/// Note: there is no sense to make this value larger than message_timeout
pub fn byte_timeout(&mut self, dur: Duration) -> &mut Self {
self.byte_timeout = dur;
self
}
/// Maximum packet size
///
/// If some frame declares size larger than this, we immediately abort
/// the connection
pub fn max_packet_size(&mut self, size: usize) -> &mut Self {
self.max_packet_size = size;
self
}
/// Create a Arc'd config clone to pass to the constructor
///
/// This is just a convenience method.
pub fn done(&mut self) -> Arc<Config> {
Arc::new(self.clone())
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/codec.rs | src/websocket/codec.rs | use std::io;
use tk_bufstream::{Buf, Encode, Decode};
use websocket::{Packet, Frame};
use websocket::error::Error;
const MAX_PACKET_SIZE: usize = 10 << 20;
/// Websocket codec for use with tk-bufstream in `Codec::hijack()`
///
/// This codec is used out of the box in
/// `BufferedDispatcher::new_with_websockets`
pub struct ServerCodec;
/// Websocket codec for use with tk-bufstream
///
/// This codec is used out of the box in `HandshakeProto`
pub struct ClientCodec;
impl Encode for ServerCodec {
type Item = Packet;
fn encode(&mut self, data: Packet, buf: &mut Buf) {
// TODO(tailhook) should we also change state on close somehow?
Frame::from(&data).write(buf, false)
}
}
impl Decode for ServerCodec {
type Item = Packet;
fn decode(&mut self, buf: &mut Buf) -> Result<Option<Packet>, io::Error> {
let parse_result = Frame::parse(buf, MAX_PACKET_SIZE, true)
// TODO(tailhook) fix me when error type in bufstream
// is associated type
.map_err(|e| io::Error::new(io::ErrorKind::Other, Error::from(e)))?
.map(|(p, b)| (p.into(), b));
if let Some((p, b)) = parse_result {
buf.consume(b);
Ok(Some(p))
} else {
Ok(None)
}
}
}
impl Encode for ClientCodec {
type Item = Packet;
fn encode(&mut self, data: Packet, buf: &mut Buf) {
// TODO(tailhook) should we also change state on close somehow?
Frame::from(&data).write(buf, true)
}
}
impl Decode for ClientCodec {
type Item = Packet;
fn decode(&mut self, buf: &mut Buf) -> Result<Option<Packet>, io::Error> {
let parse_result = Frame::parse(buf, MAX_PACKET_SIZE, false)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?
.map(|(p, b)| (p.into(), b));
if let Some((p, b)) = parse_result {
buf.consume(b);
Ok(Some(p))
} else {
Ok(None)
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/alloc.rs | src/websocket/alloc.rs | use websocket::zero_copy::Frame;
/// A websocket packet
///
/// Note: unlike `Frame` this has data allocated on the heap so has static
/// lifetime
#[derive(Debug, Clone)]
pub enum Packet {
/// Ping packet (with data)
Ping(Vec<u8>),
/// Pong packet (with data)
Pong(Vec<u8>),
/// Text (utf-8) messsage
Text(String),
/// Binary message
Binary(Vec<u8>),
/// Close message
Close(u16, String),
}
impl<'a> From<&'a Packet> for Frame<'a> {
fn from(pkt: &'a Packet) -> Frame<'a> {
use websocket::zero_copy::Frame as F;
use self::Packet as P;
match *pkt {
P::Ping(ref x) => F::Ping(x),
P::Pong(ref x) => F::Pong(x),
P::Text(ref x) => F::Text(x),
P::Binary(ref x) => F::Binary(x),
P::Close(c, ref t) => F::Close(c, t),
}
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/dispatcher.rs | src/websocket/dispatcher.rs | use std::cmp::min;
use std::fmt;
use std::sync::Arc;
use std::time::Instant;
use futures::{Future, Async, Stream};
use futures::future::{FutureResult, ok};
use futures::stream;
use tk_bufstream::{ReadFramed, WriteFramed, ReadBuf, WriteBuf};
use tk_bufstream::{Encode};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_core::reactor::{Handle, Timeout};
use websocket::{Frame, Config, Packet, Error, ServerCodec, ClientCodec};
use websocket::error::ErrorEnum;
use websocket::zero_copy::{write_packet, write_close};
/// Dispatches messages received from websocket
pub trait Dispatcher {
/// Future returned from `frame()`
type Future: Future<Item=(), Error=Error>;
/// A frame received
///
/// If backpressure is desired, method may return a future other than
/// `futures::FutureResult`.
fn frame(&mut self, frame: &Frame) -> Self::Future;
}
/// This is a helper for running websockets
///
/// The Loop object is a future which polls both: (1) input stream,
/// calling dispatcher on each message and a (2) channel where you can send
/// output messages to from external futures.
///
/// Also Loop object answers pings by itself and pings idle connections.
pub struct Loop<S, T, D: Dispatcher> {
config: Arc<Config>,
input: ReadBuf<S>,
output: WriteBuf<S>,
stream: Option<T>,
dispatcher: D,
backpressure: Option<D::Future>,
state: LoopState,
server: bool,
handle: Handle,
last_message_received: Instant,
last_ping: Instant,
last_byte: Instant,
timeout: Timeout,
}
/// A special kind of dispatcher that consumes all messages and does nothing
///
/// This is used with `Loop::closing()`.
pub struct BlackHole;
/// A displayable stream error that never happens
///
/// This is used with `Loop::closing()`.
pub struct VoidError;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum LoopState {
Open,
CloseSent,
CloseReceived,
Done,
}
// TODO(tailhook) Stream::Error should be Void here
impl<S, T, D, E> Loop<S, T, D>
where T: Stream<Item=Packet, Error=E>,
D: Dispatcher,
{
/// Create a new websocket Loop (server-side)
///
/// This method should be called in `hijack` method of `server::Codec`
pub fn server(
outp: WriteFramed<S, ServerCodec>,
inp: ReadFramed<S, ServerCodec>,
stream: T, dispatcher: D, config: &Arc<Config>,
handle: &Handle)
-> Loop<S, T, D>
{
Loop {
config: config.clone(),
input: inp.into_inner(),
output: outp.into_inner(),
stream: Some(stream),
dispatcher: dispatcher,
backpressure: None,
state: LoopState::Open,
server: true,
handle: handle.clone(),
last_message_received: Instant::now(),
last_ping: Instant::now(),
last_byte: Instant::now(),
// Note: we expect that loop is polled immediately, so timeout
// is polled too
timeout: Timeout::new(
min(config.byte_timeout,
min(config.ping_interval, config.message_timeout)),
handle)
.expect("Can always set timeout"),
}
}
/// Create a new websocket Loop (client-side)
///
/// This method should be called after `HandshakeProto` finishes
pub fn client(
outp: WriteFramed<S, ClientCodec>,
inp: ReadFramed<S, ClientCodec>,
stream: T, dispatcher: D, config: &Arc<Config>, handle: &Handle)
-> Loop<S, T, D>
{
Loop {
config: config.clone(),
input: inp.into_inner(),
output: outp.into_inner(),
stream: Some(stream),
dispatcher: dispatcher,
backpressure: None,
state: LoopState::Open,
server: false,
handle: handle.clone(),
last_message_received: Instant::now(),
last_ping: Instant::now(),
last_byte: Instant::now(),
// Note: we expect that loop is polled immediately, so timeout
// is polled too
timeout: Timeout::new(
min(config.byte_timeout,
min(config.ping_interval, config.message_timeout)),
handle)
.expect("Can always set timeout"),
}
}
}
impl<S> Loop<S, stream::Empty<Packet, VoidError>, BlackHole>
{
/// A websocket loop that sends failure and waits for closing handshake
///
/// This method should be called instead of `new` if something wrong
/// happened with handshake.
///
/// The motivation of such constructor is: browsers do not propagate
/// http error codes when websocket is established. This is presumed as
/// a security feature (so you can't attack server that doesn't support
/// websockets).
///
/// So to show useful failure to websocket code we return `101 Switching
/// Protocol` response code (which is success). I.e. establish a websocket
/// connection, then immediately close it with a reason code and text.
/// Javascript client can fetch the failure reason from `onclose` callback.
pub fn closing(
outp: WriteFramed<S, ServerCodec>,
inp: ReadFramed<S, ServerCodec>,
reason: u16, text: &str,
config: &Arc<Config>,
handle: &Handle)
-> Loop<S, stream::Empty<Packet, VoidError>, BlackHole>
{
let mut out = outp.into_inner();
write_close(&mut out.out_buf, reason, text, false);
Loop {
config: config.clone(),
input: inp.into_inner(),
output: out,
stream: None,
dispatcher: BlackHole,
backpressure: None,
state: LoopState::CloseSent,
// TODO(tailhook) should we provide client-size thing?
server: true,
handle: handle.clone(),
last_message_received: Instant::now(),
last_ping: Instant::now(),
last_byte: Instant::now(),
// Note: we expect that loop is polled immediately, so timeout
// is polled too
timeout: Timeout::new(
min(config.byte_timeout,
min(config.ping_interval, config.message_timeout)),
handle)
.expect("Can always set timeout"),
}
}
}
impl<S, T, D, E> Loop<S, T, D>
where T: Stream<Item=Packet, Error=E>,
D: Dispatcher,
S: AsyncRead + AsyncWrite,
{
fn read_stream(&mut self) -> Result<(), E> {
if self.state == LoopState::CloseSent {
return Ok(());
}
// For now we assume that there is no useful backpressure can
// be applied to a stream, so we read everything from the stream
// and put it into a buffer
if let Some(ref mut stream) = self.stream {
loop {
match stream.poll()? {
Async::Ready(value) => match value {
Some(pkt) => {
if self.server {
ServerCodec.encode(pkt,
&mut self.output.out_buf);
} else {
ClientCodec.encode(pkt,
&mut self.output.out_buf);
}
}
None => {
match self.state {
LoopState::Open => {
// send close
write_close(&mut self.output.out_buf,
1000, "", !self.server);
self.state = LoopState::CloseSent;
}
LoopState::CloseReceived => {
self.state = LoopState::Done;
}
_ => {}
}
break;
}
},
Async::NotReady => {
return Ok(());
}
}
}
}
self.stream = None;
Ok(())
}
/// Returns number of messages read
fn read_messages(&mut self) -> Result<usize, Error> {
if let Some(mut back) = self.backpressure.take() {
match back.poll()? {
Async::Ready(()) => {}
Async::NotReady => {
self.backpressure = Some(back);
return Ok(0);
}
}
}
let mut nmessages = 0;
loop {
while self.input.in_buf.len() > 0 {
let (fut, nbytes) = match
Frame::parse(&mut self.input.in_buf,
self.config.max_packet_size, self.server)?
{
Some((frame, nbytes)) => {
nmessages += 1;
let fut = match frame {
Frame::Ping(data) => {
trace!("Received ping {:?}", data);
write_packet(&mut self.output.out_buf,
0xA, data, !self.server);
None
}
Frame::Pong(data) => {
trace!("Received pong {:?}", data);
None
}
Frame::Close(code, reply) => {
debug!("Websocket closed by peer [{}]{:?}",
code, reply);
self.state = LoopState::CloseReceived;
Some(self.dispatcher.frame(
&Frame::Close(code, reply)))
}
pkt @ Frame::Text(_) | pkt @ Frame::Binary(_) => {
Some(self.dispatcher.frame(&pkt))
}
};
(fut, nbytes)
}
None => break,
};
self.input.in_buf.consume(nbytes);
if self.state == LoopState::Done {
return Ok(nmessages);
}
if let Some(mut fut) = fut {
match fut.poll()? {
Async::Ready(()) => {},
Async::NotReady => {
self.backpressure = Some(fut);
return Ok(nmessages);
}
}
}
}
match self.input.read().map_err(ErrorEnum::Io)? {
0 => {
if self.input.done() {
self.state = LoopState::Done;
}
return Ok(nmessages);
}
_ => {
self.last_byte = Instant::now();
continue;
}
}
}
}
}
impl<S, T, D, E> Future for Loop<S, T, D>
where T: Stream<Item=Packet, Error=E>,
D: Dispatcher,
E: fmt::Display,
S: AsyncRead + AsyncWrite,
{
type Item = (); // TODO(tailhook) void?
type Error = Error;
fn poll(&mut self) -> Result<Async<()>, Error> {
self.read_stream()
.map_err(|e| error!("Can't read from stream: {}", e)).ok();
let old_val = self.output.out_buf.len();
self.output.flush().map_err(ErrorEnum::Io)?;
if self.output.out_buf.len() < old_val {
self.last_byte = Instant::now();
}
if self.state == LoopState::Done {
return Ok(Async::Ready(()));
}
if self.read_messages()? > 0 {
self.last_message_received = Instant::now();
self.timeout = Timeout::new_at(
min(self.last_message_received +
self.config.message_timeout,
min(self.last_ping + self.config.ping_interval,
self.last_byte + self.config.byte_timeout)),
&self.handle,
).expect("can always set timeout");
}
loop {
match self.timeout.poll().map_err(|_| ErrorEnum::Timeout)? {
Async::Ready(()) => {
let deadline = min(
self.last_message_received +
self.config.message_timeout,
self.last_byte + self.config.byte_timeout);
if Instant::now() > deadline {
self.state = LoopState::Done;
return Ok(Async::Ready(()));
} else if Instant::now() >
self.last_ping + self.config.ping_interval
{
debug!("Sending ping");
let old_val = self.output.out_buf.len();
write_packet(&mut self.output.out_buf,
0x9, b"tk-http-ping", !self.server);
self.output.flush().map_err(ErrorEnum::Io)?;
// only update time if more than ping has been flushed
if old_val > 0 && self.output.out_buf.len() < old_val {
self.last_byte = Instant::now();
}
self.last_ping = Instant::now();
}
self.timeout = Timeout::new_at(
min(self.last_message_received +
self.config.message_timeout,
min(self.last_ping + self.config.ping_interval,
self.last_byte + self.config.byte_timeout)),
&self.handle)
.expect("can always set timeout");
match self.timeout.poll()
.map_err(|_| ErrorEnum::Timeout)?
{
Async::NotReady => break,
Async::Ready(()) => continue,
}
}
Async::NotReady => break,
}
}
if self.state == LoopState::Done {
return Ok(Async::Ready(()));
}
return Ok(Async::NotReady);
}
}
impl Dispatcher for BlackHole {
type Future = FutureResult<(), Error>;
fn frame(&mut self, _frame: &Frame) -> Self::Future {
ok(())
}
}
impl fmt::Display for VoidError {
fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result {
unreachable!();
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/client.rs | src/websocket/client.rs | //! Websocket client implementation
//!
#[allow(unused_imports)]
use std::ascii::AsciiExt;
use std::fmt::Display;
use futures::{Future, Async};
use httparse::{self, Header};
use tk_bufstream::{IoBuf, ReadBuf, WriteBuf, WriteFramed, ReadFramed};
use tokio_io::{AsyncRead, AsyncWrite};
use base_serializer::{MessageState, HeaderError};
// TODO(tailhook) change the error
use websocket::{Error};
use websocket::error::ErrorEnum;
use enums::{Version, Status};
use websocket::{ClientCodec, Key};
/// Number of headers to allocate on a stack
const MIN_HEADERS: usize = 16;
/// A hard limit on the number of headers
const MAX_HEADERS: usize = 1024;
/// This a request writer that you receive in `Codec`
///
/// Methods of this structure ensure that everything you write into a buffer
/// is consistent and valid protocol
pub struct Encoder<S> {
message: MessageState,
buf: WriteBuf<S>,
}
/// This structure returned from `Encoder::done` and works as a continuation
/// that should be returned from the future that writes request.
pub struct EncoderDone<S> {
buf: WriteBuf<S>,
}
/// Authorizer sends all the necessary headers and checks response headers
/// to establish websocket connection
///
/// The `SimpleAuthorizer` implementation is good enough for most cases, but
/// custom authorizer may be helpful for `Cookie` or `Authorization` header.
pub trait Authorizer<S> {
/// The type that may be returned from a `header_received`. It should
/// encompass everything parsed from input headers.
type Result: Sized;
/// Write request headers
///
/// Websocket-specific headers like `Connection`, `Upgrade`, and
/// `Sec-Websocket-Key` are written automatically. But other important
/// things like `Host`, `Origin`, `User-Agent` must be written by
/// this method, as well as path encoded in request-line.
fn write_headers(&mut self, e: Encoder<S>) -> EncoderDone<S>;
/// A handler of response headers
///
/// It's called when websocket has been sucessfully connected or when
/// server returned error, check that response code equals 101 to make
/// sure response is established.
///
/// Anyway, handler may be skipped in case of invalid response headers.
fn headers_received(&mut self, headers: &Head)
-> Result<Self::Result, Error>;
}
/// A borrowed structure that represents response headers
///
/// It's passed to `Authorizer::headers_received` and you are
/// free to store or discard any needed fields and headers from it.
///
#[derive(Debug)]
pub struct Head<'a> {
version: Version,
code: u16,
reason: &'a str,
headers: &'a [Header<'a>],
}
/// A future that resolves to framed streams when websocket handshake is done
pub struct HandshakeProto<S, A> {
input: Option<ReadBuf<S>>,
output: Option<WriteBuf<S>>,
authorizer: A,
}
/// Default handshake handler, if you just want to get websocket connected
pub struct SimpleAuthorizer {
host: String,
path: String,
}
impl SimpleAuthorizer {
/// Create a new authorizer that sends specified host and path
pub fn new<A, B>(host: A, path: B) -> SimpleAuthorizer
where A: Into<String>,
B: Into<String>,
{
SimpleAuthorizer {
host: host.into(),
path: path.into()
}
}
}
impl<S> Authorizer<S> for SimpleAuthorizer {
type Result = ();
fn write_headers(&mut self, mut e: Encoder<S>) -> EncoderDone<S> {
e.request_line(&self.path);
e.add_header("Host", &self.host).unwrap();
e.format_header("Origin",
format_args!("http://{}{}", self.host, self.path))
.unwrap();
e.add_header("User-Agent", concat!("tk-http/",
env!("CARGO_PKG_VERSION"))).unwrap();
e.done()
}
fn headers_received(&mut self, _headers: &Head)
-> Result<Self::Result, Error>
{
Ok(())
}
}
fn check_header(name: &str) {
if name.eq_ignore_ascii_case("Connection") ||
name.eq_ignore_ascii_case("Upgrade") ||
name.eq_ignore_ascii_case("Sec-Websocket-Key")
{
panic!("You shouldn't set websocket specific headers yourself");
}
}
impl<S> Encoder<S> {
/// Write request line.
///
/// This puts request line into a buffer immediately. If you don't
/// continue with request it will be sent to the network shortly.
///
/// # Panics
///
/// When request line is already written. It's expected that your request
/// handler state machine will never call the method twice.
pub fn request_line(&mut self, path: &str) {
self.message.request_line(&mut self.buf.out_buf,
"GET", path, Version::Http11);
}
/// Add a header to the websocket authenticatin data
///
/// Header is written into the output buffer immediately. And is sent
/// as soon as the next loop iteration
///
/// `Content-Length` header must be send using the `add_length` method
/// and `Transfer-Encoding: chunked` must be set with the `add_chunked`
/// method. These two headers are important for the security of HTTP.
///
/// Note that there is currently no way to use a transfer encoding other
/// than chunked.
///
/// We return Result here to make implementing proxies easier. In the
/// application handler it's okay to unwrap the result and to get
/// a meaningful panic (that is basically an assertion).
///
/// # Panics
///
/// Panics when `add_header` is called in the wrong state.
///
/// When you add a special header `Connection`, `Upgrade`,
/// `Sec-Websocket-*`, because they must be set with special methods
pub fn add_header<V: AsRef<[u8]>>(&mut self, name: &str, value: V)
-> Result<(), HeaderError>
{
check_header(name);
self.message.add_header(&mut self.buf.out_buf, name, value.as_ref())
}
/// Same as `add_header` but allows value to be formatted directly into
/// the buffer
///
/// Useful for dates and numeric headers, as well as some strongly typed
/// wrappers
pub fn format_header<D: Display>(&mut self, name: &str, value: D)
-> Result<(), HeaderError>
{
check_header(name);
self.message.format_header(&mut self.buf.out_buf, name, value)
}
/// Finish writing headers and return `EncoderDone` which can be moved to
///
/// # Panics
///
/// Panics when the request is in a wrong state.
pub fn done(mut self) -> EncoderDone<S> {
self.message.add_header(&mut self.buf.out_buf,
"Connection", b"upgrade").unwrap();
self.message.add_header(&mut self.buf.out_buf,
"Upgrade", b"websocket").unwrap();
// TODO(tailhook) generate real random key
self.message.format_header(&mut self.buf.out_buf,
"Sec-WebSocket-Key", Key::new()).unwrap();
self.message.add_header(&mut self.buf.out_buf,
"Sec-WebSocket-Version", b"13").unwrap();
self.message.done_headers(&mut self.buf.out_buf)
.map(|ignore_body| assert!(ignore_body)).unwrap();
self.message.done(&mut self.buf.out_buf);
EncoderDone { buf: self.buf }
}
}
fn encoder<S>(io: WriteBuf<S>) -> Encoder<S> {
Encoder {
message: MessageState::RequestStart,
buf: io,
}
}
impl<S, A: Authorizer<S>> HandshakeProto<S, A> {
/// Create an instance of future from already connected socket
pub fn new(transport: S, mut authorizer: A) -> HandshakeProto<S, A>
where S: AsyncRead + AsyncWrite
{
let (tx, rx) = IoBuf::new(transport).split();
let out = authorizer.write_headers(encoder(tx)).buf;
HandshakeProto {
authorizer: authorizer,
input: Some(rx),
output: Some(out),
}
}
fn parse_headers(&mut self) -> Result<Option<A::Result>, Error> {
let ref mut buf = self.input.as_mut()
.expect("buffer still exists")
.in_buf;
let (res, bytes) = {
let mut vec;
let mut headers = [httparse::EMPTY_HEADER; MIN_HEADERS];
let (code, reason, headers, bytes) = {
let mut raw = httparse::Response::new(&mut headers);
let mut result = raw.parse(&buf[..]);
if matches!(result, Err(httparse::Error::TooManyHeaders)) {
vec = vec![httparse::EMPTY_HEADER; MAX_HEADERS];
raw = httparse::Response::new(&mut vec);
result = raw.parse(&buf[..]);
}
match result.map_err(ErrorEnum::HeaderError)? {
httparse::Status::Complete(bytes) => {
let ver = raw.version.unwrap();
if ver != 1 {
//return Error::VersionTooOld;
unimplemented!();
}
let code = raw.code.unwrap();
(code, raw.reason.unwrap(), raw.headers, bytes)
}
_ => return Ok(None),
}
};
let head = Head {
version: Version::Http11,
code: code,
reason: reason,
headers: headers,
};
let data = self.authorizer.headers_received(&head)?;
(data, bytes)
};
buf.consume(bytes);
return Ok(Some(res));
}
}
impl<S, A> Future for HandshakeProto<S, A>
where A: Authorizer<S>,
S: AsyncRead + AsyncWrite
{
type Item = (WriteFramed<S, ClientCodec>, ReadFramed<S, ClientCodec>,
A::Result);
type Error = Error;
fn poll(&mut self) -> Result<Async<Self::Item>, Error> {
self.output.as_mut().expect("poll after complete")
.flush().map_err(ErrorEnum::Io)?;
self.input.as_mut().expect("poll after complete")
.read().map_err(ErrorEnum::Io)?;
if self.input.as_mut().expect("poll after complete").done() {
return Err(ErrorEnum::PrematureResponseHeaders.into());
}
match self.parse_headers()? {
Some(x) => {
let inp = self.input.take()
.expect("input still here")
.framed(ClientCodec);
let out = self.output.take()
.expect("input still here")
.framed(ClientCodec);
Ok(Async::Ready((out, inp, x)))
}
None => Ok(Async::NotReady),
}
}
}
impl<'a> Head<'a> {
/// Returns status if it is one of the supported statuses otherwise None
///
/// Note: this method does not consider "reason" string at all just
/// status code. Which is fine as specification states.
pub fn status(&self) -> Option<Status> {
Status::from(self.code)
}
/// Returns raw status code and reason as received even
///
/// This returns something even if `status()` returned `None`.
///
/// Note: the reason string may not match the status code or may even be
/// an empty string.
pub fn raw_status(&self) -> (u16, &'a str) {
(self.code, self.reason)
}
/// All headers of HTTP request
///
/// Unlike `self.headers()` this does include hop-by-hop headers. This
/// method is here just for completeness, you shouldn't need it.
pub fn all_headers(&self) -> &'a [Header<'a>] {
self.headers
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/error.rs | src/websocket/error.rs | use std::io;
use std::str::{Utf8Error};
use httparse;
quick_error! {
/// Websocket Error works both for client and server connections
#[derive(Debug)]
pub enum Error wraps pub ErrorEnum {
/// Socket IO error
Io(err: io::Error) {
description("IO error")
display("IO error: {}", err)
from()
}
/// Error when polling timeout future (unreachable)
Timeout {
description("Timeout error (unreachable)")
display("Timeout error (unreachable)")
}
/// Text frame can't be decoded
InvalidUtf8(err: Utf8Error) {
description("Error decoding text frame")
display("Error decoding text frame: {}", err)
from()
}
/// Got websocket message with wrong opcode
InvalidOpcode(code: u8) {
description("Opcode of the frame is invalid")
display("Opcode of the frame is invalid: {}", code)
from()
}
/// Got unmasked frame
Unmasked {
description("Received unmasked frame")
}
/// Got fragmented frame (fragmented frames are not supported yet)
Fragmented {
description("Received fragmented frame")
}
/// Received frame that is longer than configured limit
TooLong {
description("Received frame that is too long")
}
/// Currently this error means that channel to/from websocket closed
///
/// In future we expect this condition (processor dropping channel) to
/// happen when we forced killing connection by backend, so processor
/// got rid of all object that refer to the connection.
///
/// Another case: we are trying to use RemoteReplier for connection
/// that already closed
Closed {
description("Forced connection close")
}
/// Error parsing http headers
HeaderError(err: httparse::Error) {
description("parse error")
display("parse error: {:?}", err)
from()
}
PrematureResponseHeaders {
description("response headers before request are sent")
}
Custom(err: Box<::std::error::Error + Send + Sync>) {
description("custom error")
display("custom error: {}", err)
cause(&**err)
}
}
}
impl Error {
/// Create an error instance wrapping custom error
pub fn custom<E: Into<Box<::std::error::Error + Send + Sync>>>(err: E)
-> Error
{
Error(ErrorEnum::Custom(err.into()))
}
}
#[test]
fn send_sync() {
fn send_sync<T: Send+Sync>(_: T) {}
send_sync(Error::from(ErrorEnum::TooLong));
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/mod.rs | src/websocket/mod.rs | //! Websocket support stuff
//!
//! Websockets are initiated by server implementation, this module only
//! contains websocket message types and similar stuff.
use std::time::Duration;
mod alloc;
mod codec;
mod config;
mod dispatcher;
mod error;
mod keys;
mod zero_copy;
pub mod client;
pub use self::alloc::Packet;
pub use self::codec::{ServerCodec, ClientCodec};
pub use self::dispatcher::{Loop, Dispatcher};
pub use self::error::Error;
pub use self::keys::{GUID, Accept, Key};
pub use self::zero_copy::Frame;
/// Configuration of a `websocket::Loop` object (a server-side websocket
/// connection).
#[derive(Debug, Clone)]
pub struct Config {
ping_interval: Duration,
message_timeout: Duration,
byte_timeout: Duration,
max_packet_size: usize,
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/zero_copy.rs | src/websocket/zero_copy.rs | use std::str::from_utf8;
use rand::{thread_rng, Rng};
use tk_bufstream::Buf;
use byteorder::{BigEndian, ByteOrder};
use super::{Packet};
use websocket::error::ErrorEnum;
/// A borrowed frame of websocket data
#[derive(Debug, Clone, PartialEq)]
pub enum Frame<'a> {
/// Ping mesage
Ping(&'a [u8]),
/// Pong mesage
Pong(&'a [u8]),
/// Text (utf-8) message
Text(&'a str),
/// Binary message
Binary(&'a [u8]),
/// Close message
Close(u16, &'a str),
}
impl<'a> Into<Packet> for Frame<'a> {
fn into(self) -> Packet {
use self::Frame as F;
use super::Packet as P;
match self {
F::Ping(x) => P::Ping(x.to_owned()),
F::Pong(x) => P::Pong(x.to_owned()),
F::Text(x) => P::Text(x.to_owned()),
F::Binary(x) => P::Binary(x.to_owned()),
F::Close(c, t) => P::Close(c, t.to_owned()),
}
}
}
impl<'a> Into<Packet> for &'a Frame<'a> {
fn into(self) -> Packet {
use self::Frame as F;
use super::Packet as P;
match *self {
F::Ping(x) => P::Ping(x.to_owned()),
F::Pong(x) => P::Pong(x.to_owned()),
F::Text(x) => P::Text(x.to_owned()),
F::Binary(x) => P::Binary(x.to_owned()),
F::Close(c, t) => P::Close(c, t.to_owned()),
}
}
}
impl<'a> Frame<'a> {
/// Parse a frame for the specified buffer
///
/// Returns a frame and a number of bytes or None if no full frame was
/// in the buffer. After frame is processes you should use
/// `buf.consume(nbytes)`.
pub fn parse<'x>(buf: &'x mut Buf, limit: usize, masked: bool)
-> Result<Option<(Frame<'x>, usize)>, ErrorEnum>
{
use self::Frame::*;
if buf.len() < 2 {
return Ok(None);
}
let (size, fsize) = {
match buf[1] & 0x7F {
126 => {
if buf.len() < 4 {
return Ok(None);
}
(BigEndian::read_u16(&buf[2..4]) as u64, 4)
}
127 => {
if buf.len() < 10 {
return Ok(None);
}
(BigEndian::read_u64(&buf[2..10]), 10)
}
size => (size as u64, 2),
}
};
if size > limit as u64 {
return Err(ErrorEnum::TooLong);
}
let size = size as usize;
let start = fsize + if masked { 4 } else { 0 } /* mask size */;
if buf.len() < start + size {
return Ok(None);
}
let fin = buf[0] & 0x80 != 0;
let opcode = buf[0] & 0x0F;
// TODO(tailhook) should we assert that reserved bits are zero?
let mask = buf[1] & 0x80 != 0;
if !fin {
return Err(ErrorEnum::Fragmented);
}
if mask != masked {
return Err(ErrorEnum::Unmasked);
}
if mask {
let mask = [buf[start-4], buf[start-3], buf[start-2], buf[start-1]];
for idx in 0..size { // hopefully llvm is smart enough to optimize it
buf[start + idx] ^= mask[idx % 4];
}
}
let data = &buf[start..(start + size)];
let frame = match opcode {
0x9 => Ping(data),
0xA => Pong(data),
0x1 => Text(from_utf8(data)?),
0x2 => Binary(data),
// TODO(tailhook) implement shutdown packets
0x8 => {
if data.len() < 2 {
Close(1006, "")
} else {
Close(BigEndian::read_u16(&data[..2]), from_utf8(&data[2..])?)
}
}
x => return Err(ErrorEnum::InvalidOpcode(x)),
};
return Ok(Some((frame, start + size)));
}
/// Write a frame into specified buffer
///
/// `masked` should be true for client socket and false for servers socket
/// according to the spec
pub fn write(&self, buf: &mut Buf, masked: bool) {
use self::Frame::*;
match *self {
Ping(data) => write_packet(buf, 0x9, &data, masked),
Pong(data) => write_packet(buf, 0xA, &data, masked),
Text(data) => write_packet(buf, 0x1, data.as_bytes(), masked),
Binary(data) => write_packet(buf, 0x2, &data, masked),
Close(c, t) => write_close(buf, c, &t, masked),
}
}
}
pub(crate) fn write_packet(buf: &mut Buf, opcode: u8, data: &[u8], mask: bool)
{
debug_assert!(opcode & 0xF0 == 0);
let first_byte = opcode | 0x80; // always fin
let mask_bit = if mask { 0x80 } else { 0 };
match data.len() {
len @ 0...125 => {
buf.extend(&[first_byte, (len as u8) | mask_bit]);
}
len @ 126...65535 => {
buf.extend(&[first_byte, 126 | mask_bit,
(len >> 8) as u8, (len & 0xFF) as u8]);
}
len => {
buf.extend(&[first_byte, 127 | mask_bit,
((len >> 56) & 0xFF) as u8,
((len >> 48) & 0xFF) as u8,
((len >> 40) & 0xFF) as u8,
((len >> 32) & 0xFF) as u8,
((len >> 24) & 0xFF) as u8,
((len >> 16) & 0xFF) as u8,
((len >> 8) & 0xFF) as u8,
(len & 0xFF) as u8]);
}
}
let mask_data = if mask {
let mut bytes = [0u8; 4];
thread_rng().fill_bytes(&mut bytes[..]);
buf.extend(&bytes[..]);
Some((buf.len(), bytes))
} else {
None
};
buf.extend(data);
if let Some((start, bytes)) = mask_data {
for idx in 0..(buf.len() - start) { // hopefully llvm will optimize it
buf[start + idx] ^= bytes[idx % 4];
}
};
}
/// Write close message to websocket
pub(crate) fn write_close(buf: &mut Buf, code: u16, reason: &str, mask: bool) {
let data = reason.as_bytes();
let mask_bit = if mask { 0x80 } else { 0 };
assert!(data.len() <= 123);
buf.extend(&[0x88, ((data.len() + 2) as u8) | mask_bit]);
let mask_data = if mask {
let mut bytes = [0u8; 4];
thread_rng().fill_bytes(&mut bytes[..]);
buf.extend(&bytes[..]);
Some((buf.len(), bytes))
} else {
None
};
buf.extend(&[(code >> 8) as u8, (code & 0xFF) as u8]);
buf.extend(data);
if let Some((start, bytes)) = mask_data {
for idx in 0..(buf.len() - start) { // hopefully llvm will optimize it
buf[start + idx] ^= bytes[idx % 4];
}
};
}
#[cfg(test)]
mod test {
use netbuf::Buf;
use std::iter::repeat;
use super::Frame;
use super::Frame::*;
#[test]
fn empty_frame() {
let mut buf = Buf::new();
assert_eq!(Frame::parse(&mut buf, 1000, false).unwrap(), None);
assert_eq!(Frame::parse(&mut buf, 1000, true).unwrap(), None);
}
#[test]
fn invalid_close_frame() {
let mut buf = Buf::new();
let data = b"\x88\x80\x00\x00\x00\x00";
buf.extend(data);
assert_eq!(Frame::parse(&mut buf, 1000, true).unwrap(),
Some((Close(1006, ""), 6)));
}
#[test]
fn parse_small_masked() {
let data = b"\x81\x85\x00\x00\x00\x00hello";
for i in 0..data.len()-1 {
let mut buf = Buf::new();
buf.extend(&data[..i]);
assert_eq!(Frame::parse(&mut buf, 1000, true).unwrap(), None);
}
let mut buf = Buf::new();
buf.extend(data);
assert_eq!(Frame::parse(&mut buf, 1000, true).unwrap(),
Some((Text("hello"), 11)));
}
#[test]
fn parse_125m() {
let data = b"\x81\xFD\x00\x00\x00\x00";
for i in 0..124 {
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..i {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 1000, true).unwrap(), None);
}
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..125 {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 1000, true).unwrap(),
Some((Text(&repeat('x').take(125).collect::<String>()), 131)));
}
#[test]
fn parse_4k_masked() {
let data = b"\x81\xFE\x10\x00\x00\x00\x00\x00";
for i in 0..4095 {
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..i {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 4096, true).unwrap(), None);
}
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..4096 {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 4096, true).unwrap(),
Some((Text(&repeat('x').take(4096).collect::<String>()), 4104)));
}
#[test]
fn parse_small() {
let data = b"\x81\x05hello";
for i in 0..data.len()-1 {
let mut buf = Buf::new();
buf.extend(&data[..i]);
assert_eq!(Frame::parse(&mut buf, 1000, false).unwrap(), None);
}
let mut buf = Buf::new();
buf.extend(data);
assert_eq!(Frame::parse(&mut buf, 1000, false).unwrap(),
Some((Text("hello"), 7)));
}
#[test]
fn parse_125() {
let data = b"\x81\x7D";
for i in 0..124 {
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..i {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 1000, false).unwrap(), None);
}
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..125 {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 1000, false).unwrap(),
Some((Text(&repeat('x').take(125).collect::<String>()), 127)));
}
#[test]
fn parse_4k() {
let data = b"\x81\x7E\x10\x00";
for i in 0..4095 {
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..i {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 4096, false).unwrap(), None);
}
let mut buf = Buf::new();
buf.extend(data);
for _ in 0..4096 {
buf.extend(&[b'x']);
}
assert_eq!(Frame::parse(&mut buf, 4096, false).unwrap(),
Some((Text(&repeat('x').take(4096).collect::<String>()), 4100)));
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/src/websocket/keys.rs | src/websocket/keys.rs | use rand::{Rng, thread_rng};
use std::fmt;
use std::str::{from_utf8_unchecked};
use sha1::Sha1;
/// WebSocket GUID constant (provided by spec)
pub const GUID: &'static str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
/// The `Sec-WebSocket-Accept` header value
///
/// You can add it using `enc.format_header("Sec-WebSocket-Accept", accept)`.
/// Or use any other thing that supports `Display`.
pub struct Accept([u8; 20]);
/// The `Sec-WebSocket-Key` header value
///
/// You can add it using `enc.format_header("Sec-WebSocket-Key", key)`.
/// Or use any other thing that supports `Display`.
pub struct Key([u8; 16]);
impl Key {
/// Create a new (random) key, eligible to use for client connection
pub fn new() -> Key {
let mut key = [0u8; 16];
thread_rng().fill_bytes(&mut key);
return Key(key);
}
}
impl Accept {
/// Create an Accept header value from a key received in header
///
/// Note: key here is a key as passed in header value (base64-encoded)
/// despite that it's accepted as bytes (not as 16 bytes stored in Key)
///
/// Note 2: this does not validate a key (which is not required by spec)
pub fn from_key_bytes(key: &[u8]) -> Accept {
let mut sha1 = Sha1::new();
sha1.update(key);
sha1.update(GUID.as_bytes());
Accept(sha1.digest().bytes())
}
}
impl fmt::Display for Accept {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
const CHARS: &'static[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789+/";
let mut buf = [0u8; 28];
for i in 0..6 {
let n = ((self.0[i*3+0] as usize) << 16) |
((self.0[i*3+1] as usize) << 8) |
(self.0[i*3+2] as usize) ;
buf[i*4+0] = CHARS[(n >> 18) & 63];
buf[i*4+1] = CHARS[(n >> 12) & 63];
buf[i*4+2] = CHARS[(n >> 6) & 63];
buf[i*4+3] = CHARS[(n >> 0) & 63];
}
let n = ((self.0[18] as usize) << 16) |
((self.0[19] as usize) << 8);
buf[24] = CHARS[(n >> 18) & 63];
buf[25] = CHARS[(n >> 12) & 63];
buf[26] = CHARS[(n >> 6) & 63];
buf[27] = b'=';
fmt::Write::write_str(f, unsafe {
from_utf8_unchecked(&buf)
})
}
}
impl fmt::Debug for Accept {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "websocket::Accept({})", self)
}
}
impl fmt::Display for Key {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
const CHARS: &'static[u8] = b"ABCDEFGHIJKLMNOPQRSTUVWXYZ\
abcdefghijklmnopqrstuvwxyz\
0123456789+/";
let mut buf = [0u8; 24];
for i in 0..5 {
let n = ((self.0[i*3+0] as usize) << 16) |
((self.0[i*3+1] as usize) << 8) |
(self.0[i*3+2] as usize) ;
buf[i*4+0] = CHARS[(n >> 18) & 63];
buf[i*4+1] = CHARS[(n >> 12) & 63];
buf[i*4+2] = CHARS[(n >> 6) & 63];
buf[i*4+3] = CHARS[(n >> 0) & 63];
}
let n = (self.0[15] as usize) << 16;
buf[20] = CHARS[(n >> 18) & 63];
buf[21] = CHARS[(n >> 12) & 63];
buf[22] = b'=';
buf[23] = b'=';
fmt::Write::write_str(f, unsafe {
from_utf8_unchecked(&buf)
})
}
}
impl fmt::Debug for Key {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "websocket::Key({})", self)
}
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/tests/server_simple.rs | tests/server_simple.rs | rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false | |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/chunked.rs | examples/chunked.rs | extern crate tokio_core;
extern crate futures;
extern crate tk_bufstream;
extern crate netbuf;
extern crate tk_http;
extern crate tk_listen;
extern crate env_logger;
use std::env;
use std::time::Duration;
use tokio_core::reactor::Core;
use tokio_core::net::{TcpListener};
use futures::{Stream, Future};
use futures::future::{FutureResult, ok};
use tk_http::Status;
use tk_http::server::buffered::{Request, BufferedDispatcher};
use tk_http::server::{Encoder, EncoderDone, Config, Proto, Error};
use tk_listen::ListenExt;
fn service<S>(req: Request, mut e: Encoder<S>)
-> FutureResult<EncoderDone<S>, Error>
{
println!("{:?} {}", req.method(), req.path());
e.status(Status::Ok);
e.add_chunked().unwrap();
if e.done_headers().unwrap() {
e.write_body(b"Hello world!");
}
ok(e.done())
}
fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
env_logger::init().expect("init logging");
let mut lp = Core::new().unwrap();
let addr = "0.0.0.0:8080".parse().unwrap();
let listener = TcpListener::bind(&addr, &lp.handle()).unwrap();
let cfg = Config::new().done();
let h1 = lp.handle();
let done = listener.incoming()
.sleep_on_error(Duration::from_millis(100), &lp.handle())
.map(|(socket, addr)| {
Proto::new(socket, &cfg,
BufferedDispatcher::new(addr, &h1, || service),
&h1)
.map_err(|e| { println!("Connection error: {}", e); })
})
.listen(1000);
lp.run(done).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/websockets.rs | examples/websockets.rs | extern crate time;
extern crate tokio_core;
extern crate futures;
extern crate tk_bufstream;
extern crate netbuf;
extern crate tk_http;
extern crate tk_listen;
#[macro_use] extern crate log;
extern crate env_logger;
use std::env;
use std::time::Duration;
use tokio_core::reactor::Core;
use tokio_core::net::{TcpListener};
use futures::{Stream, Future};
use futures::future::{FutureResult, ok};
use tk_http::{Status};
use tk_http::server::buffered::{Request, BufferedDispatcher};
use tk_http::server::{Encoder, EncoderDone, Config, Proto, Error};
use tk_listen::ListenExt;
const INDEX: &'static str = include_str!("ws.html");
const JS: &'static str = include_str!("ws.js");
fn service<S>(req: Request, mut e: Encoder<S>)
-> FutureResult<EncoderDone<S>, Error>
{
if let Some(ws) = req.websocket_handshake() {
e.status(Status::SwitchingProtocol);
e.format_header("Date", time::now_utc().rfc822()).unwrap();
e.add_header("Server",
concat!("tk_http/", env!("CARGO_PKG_VERSION"))
).unwrap();
e.add_header("Connection", "upgrade").unwrap();
e.add_header("Upgrade", "websocket").unwrap();
e.format_header("Sec-Websocket-Accept", &ws.accept).unwrap();
e.done_headers().unwrap();
ok(e.done())
} else {
let (data, ctype) = match req.path() {
"/ws.js" => (JS, "text/javascript; charset=utf-8"),
_ => (INDEX, "text/html; charset=utf-8"),
};
e.status(Status::Ok);
e.add_length(data.as_bytes().len() as u64).unwrap();
e.format_header("Date", time::now_utc().rfc822()).unwrap();
e.add_header("Content-Type", ctype).unwrap();
e.add_header("Server",
concat!("tk_http/", env!("CARGO_PKG_VERSION"))
).unwrap();
if e.done_headers().unwrap() {
e.write_body(data.as_bytes());
}
ok(e.done())
}
}
fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
env_logger::init().expect("init logging");
let mut lp = Core::new().unwrap();
let h1 = lp.handle();
let addr = "0.0.0.0:8080".parse().unwrap();
let listener = TcpListener::bind(&addr, &lp.handle()).unwrap();
let cfg = Config::new().done();
let done = listener.incoming()
.sleep_on_error(Duration::from_millis(100), &lp.handle())
.map(move |(socket, addr)| {
Proto::new(socket, &cfg,
BufferedDispatcher::new_with_websockets(addr, &h1,
service,
|out, inp| {
inp.forward(out)
.map(|_| ())
.map_err(|e| error!("Websock err: {}", e))
}),
&h1)
.map_err(|e| { println!("Connection error: {}", e); })
.then(|_| Ok(())) // don't fail, please
})
.listen(1000);
lp.run(done).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/hello_world.rs | examples/hello_world.rs | extern crate time;
extern crate tokio_core;
extern crate futures;
extern crate tk_bufstream;
extern crate netbuf;
extern crate tk_http;
extern crate tk_listen;
extern crate env_logger;
use std::env;
use std::time::Duration;
use tokio_core::reactor::Core;
use tokio_core::net::{TcpListener};
use futures::{Stream, Future};
use futures::future::{FutureResult, ok};
use tk_http::{Status};
use tk_http::server::buffered::{Request, BufferedDispatcher};
use tk_http::server::{Encoder, EncoderDone, Config, Proto, Error};
use tk_listen::ListenExt;
const BODY: &'static str = "Hello World!";
fn service<S>(_: Request, mut e: Encoder<S>)
-> FutureResult<EncoderDone<S>, Error>
{
e.status(Status::Ok);
e.add_length(BODY.as_bytes().len() as u64).unwrap();
e.format_header("Date", time::now_utc().rfc822()).unwrap();
e.add_header("Server",
concat!("tk_http/", env!("CARGO_PKG_VERSION"))
).unwrap();
if e.done_headers().unwrap() {
e.write_body(BODY.as_bytes());
}
ok(e.done())
}
fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
env_logger::init().expect("init logging");
let mut lp = Core::new().unwrap();
let addr = "0.0.0.0:8080".parse().unwrap();
let listener = TcpListener::bind(&addr, &lp.handle()).unwrap();
let cfg = Config::new().done();
let h1 = lp.handle();
let done = listener.incoming()
.sleep_on_error(Duration::from_millis(100), &lp.handle())
.map(move |(socket, addr)| {
Proto::new(socket, &cfg,
BufferedDispatcher::new(addr, &h1, || service),
&h1)
.map_err(|e| { println!("Connection error: {}", e); })
})
.listen(1000);
lp.run(done).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/native_tls_client.rs | examples/native_tls_client.rs | extern crate argparse;
extern crate env_logger;
extern crate futures;
extern crate tk_http;
extern crate tokio_core;
extern crate url;
extern crate native_tls;
extern crate tokio_tls;
#[macro_use] extern crate log;
use std::io::{self, Write};
use std::env;
use std::net::ToSocketAddrs;
use std::sync::Arc;
use futures::{Future, Sink};
use native_tls::TlsConnector;
use tokio_core::net::TcpStream;
use tokio_tls::TlsConnectorExt;
use tk_http::client::buffered::{Buffered};
use tk_http::client::{Proto, Config, Error};
pub fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "warn");
}
env_logger::init().unwrap();
let host = "www.rust-lang.org";
let uri = ["https://", host, "/documentation.html"].join("");
let mut lp = tokio_core::reactor::Core::new().expect("loop created");
let handle = lp.handle();
let h2 = lp.handle();
let addr = (host, 443).to_socket_addrs()
.expect("resolve address").next().expect("at least one IP");
let cx = TlsConnector::builder().unwrap().build().unwrap();
let response = lp.run(futures::lazy(move || {
TcpStream::connect(&addr, &handle)
.and_then(move |sock| {
cx.connect_async(host, sock).map_err(|e| {
io::Error::new(io::ErrorKind::Other, e)
})
})
.map_err(|e| error!("{}", e))
.and_then(move |sock| {
let (codec, receiver) = Buffered::get(
uri.parse().unwrap());
let proto = Proto::new(sock, &h2, &Arc::new(Config::new()));
proto.send(codec)
.join(receiver.map_err(|_| -> Error { unimplemented!() }))
.map_err(|e| e)
.and_then(|(_proto, result)| {
result
})
.map_err(|e| error!("{}", e))
})
})).expect("request failed");
io::stdout().write_all(response.body()).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/tls_client.rs | examples/tls_client.rs | extern crate argparse;
extern crate env_logger;
extern crate futures;
extern crate tk_http;
extern crate tokio_core;
extern crate url;
extern crate rustls;
extern crate tokio_rustls;
extern crate webpki;
#[macro_use] extern crate log;
use std::io::{self, Write, BufReader};
use std::env;
use std::fs::File;
use std::net::ToSocketAddrs;
use std::sync::Arc;
use futures::{Future, Sink};
use rustls::ClientConfig;
use tokio_core::net::TcpStream;
use tokio_rustls::ClientConfigExt;
use tk_http::client::buffered::{Buffered};
use tk_http::client::{Proto, Config, Error};
use webpki::DNSNameRef;
pub fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "warn");
}
env_logger::init().unwrap();
let host = "www.rust-lang.org";
let uri = ["https://", host, "/documentation.html"].join("");
let mut lp = tokio_core::reactor::Core::new().expect("loop created");
let handle = lp.handle();
let h2 = lp.handle();
let addr = (host, 443).to_socket_addrs()
.expect("resolve address").next().expect("at least one IP");
let config = Arc::new({
let mut cfg = ClientConfig::new();
let mut pem = BufReader::new(
File::open("/etc/ssl/certs/ca-certificates.crt")
.expect("certificates exist"));
cfg.root_store.add_pem_file(&mut pem).unwrap();
cfg
});
let host = DNSNameRef::try_from_ascii_str(host).expect("host is valid");
let response = lp.run(futures::lazy(move || {
TcpStream::connect(&addr, &handle)
.and_then(move |sock| config.connect_async(host, sock))
.map_err(|e| error!("{}", e))
.and_then(move |sock| {
let (codec, receiver) = Buffered::get(
uri.parse().unwrap());
let proto = Proto::new(sock, &h2, &Arc::new(Config::new()));
proto.send(codec)
.join(receiver.map_err(|_| -> Error { unimplemented!() }))
.map_err(|e| e)
.and_then(|(_proto, result)| {
result
})
.map_err(|e| error!("{}", e))
})
})).expect("request failed");
io::stdout().write_all(response.body()).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/counter.rs | examples/counter.rs | extern crate time;
extern crate tokio_core;
extern crate futures;
extern crate tk_bufstream;
extern crate netbuf;
extern crate tk_http;
extern crate tk_listen;
extern crate env_logger;
use std::env;
use std::sync::Arc;
use std::sync::atomic::{Ordering, AtomicUsize};
use std::time::Duration;
use tokio_core::reactor::Core;
use tokio_core::net::{TcpListener};
use futures::{Stream, Future};
use futures::future::{FutureResult, ok};
use tk_http::{Status};
use tk_http::server::buffered::{Request, BufferedDispatcher};
use tk_http::server::{Encoder, EncoderDone, Config, Proto, Error};
use tk_listen::ListenExt;
fn service<S>(counter: usize, _: Request, mut e: Encoder<S>)
-> FutureResult<EncoderDone<S>, Error>
{
let formatted = format!("Visit #{}", counter);
e.status(Status::Ok);
e.add_length(formatted.as_bytes().len() as u64).unwrap();
e.format_header("Date", time::now_utc().rfc822()).unwrap();
e.add_header("Server",
concat!("tk_http/", env!("CARGO_PKG_VERSION"))
).unwrap();
if e.done_headers().unwrap() {
e.write_body(formatted.as_bytes());
}
ok(e.done())
}
fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
env_logger::init().expect("init logging");
let mut lp = Core::new().unwrap();
let addr = "0.0.0.0:8080".parse().unwrap();
let listener = TcpListener::bind(&addr, &lp.handle()).unwrap();
let cfg = Config::new().done();
let h1 = lp.handle();
let counter = Arc::new(AtomicUsize::new(0));
let done = listener.incoming()
.sleep_on_error(Duration::from_millis(100), &lp.handle())
.map(move |(socket, addr)| {
let counter = counter.clone();
Proto::new(socket, &cfg,
BufferedDispatcher::new(addr, &h1, move || {
let counter = counter.clone();
move |r, e| {
let val = counter.fetch_add(1, Ordering::SeqCst);
service(val, r, e)
}
}),
&h1)
.map_err(|e| { println!("Connection error: {}", e); })
})
.listen(1000);
lp.run(done).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/ws_cli.rs | examples/ws_cli.rs | extern crate futures;
extern crate tk_http;
extern crate argparse;
extern crate env_logger;
extern crate tokio_core;
#[macro_use] extern crate log;
use std::env;
use std::time::Duration;
use std::net::ToSocketAddrs;
use futures::{Future, Stream};
use futures::future::{FutureResult, ok};
use futures::sync::mpsc::unbounded;
use tokio_core::net::TcpStream;
use tokio_core::reactor::{Timeout};
use tk_http::websocket::{Loop, Frame, Error, Dispatcher, Config};
use tk_http::websocket::client::{HandshakeProto, SimpleAuthorizer};
use tk_http::websocket::Packet::{Text};
struct Echo;
impl Dispatcher for Echo {
type Future = FutureResult<(), Error>;
fn frame(&mut self, frame: &Frame) -> FutureResult<(), Error> {
println!("Frame arrived: {:?}", frame);
ok(())
}
}
pub fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "warn");
}
env_logger::init().unwrap();
let mut lp = tokio_core::reactor::Core::new().expect("loop created");
let handle = lp.handle();
let h2 = lp.handle();
let addr = ("echo.websocket.org", 80).to_socket_addrs()
.expect("resolve address").next().expect("at least one IP");
let wcfg = Config::new().done();
lp.run(futures::lazy(move || {
TcpStream::connect(&addr, &handle)
.map_err(|e| error!("Error {}", e))
.and_then(|sock| {
HandshakeProto::new(sock, SimpleAuthorizer::new(
"echo.websocket.org", "/"))
.map_err(|e| error!("Error {}", e))
})
.and_then(move |(out, inp, ())| {
println!("Connected");
let (tx, rx) = unbounded();
println!("Preparing to send packet in 5 seconds");
let tx2 = tx.clone();
h2.spawn(
Timeout::new(Duration::new(5, 0), &h2).unwrap()
.map_err(|_| unreachable!())
.and_then(move |_| {
println!("Sending 'hello'");
tx2.unbounded_send(Text("hello".to_string()))
.map_err(|_| ())
})
.then(|_| Ok(())));
let rx = rx.map_err(|_| format!("stream closed"));
Loop::client(out, inp, rx, Echo, &wcfg, &h2)
.map_err(|e| println!("websocket closed: {}", e))
})
.then(|_| -> Result<(), &'static str> { Ok(()) })
})).expect("request failed");
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/generate_on_the_fly.rs | examples/generate_on_the_fly.rs | extern crate env_logger;
extern crate futures;
extern crate netbuf;
extern crate tk_bufstream;
extern crate tk_http;
extern crate tk_listen;
extern crate tokio_core;
extern crate tokio_io;
use std::env;
use std::time::Duration;
use tokio_core::reactor::Core;
use tokio_core::net::{TcpListener};
use tokio_io::AsyncWrite;
use futures::{Stream, Future, Async};
use futures::future::{FutureResult, ok, Either};
use tk_http::Status;
use tk_http::server::buffered::{Request, BufferedDispatcher};
use tk_http::server::{Encoder, EncoderDone, Config, Proto, Error};
use tk_listen::ListenExt;
struct Fibonacci<S> {
encoder: Encoder<S>,
current: u64,
}
impl<S: AsyncWrite> Future for Fibonacci<S> {
type Item = EncoderDone<S>;
type Error = Error;
fn poll(&mut self) -> Result<Async<EncoderDone<S>>, Error> {
use std::io::Write;
while self.encoder.bytes_buffered() < 4096 {
for _ in 0..1000 {
self.current += 1;
writeln!(self.encoder, "{}", self.current).unwrap();
}
if self.current % 1000000 == 0 {
println!("Reached {}M", self.current / 1000000);
}
self.encoder.flush()?;
}
Ok(Async::NotReady)
}
}
fn service<S>(req: Request, mut e: Encoder<S>)
-> Either<Fibonacci<S>, FutureResult<EncoderDone<S>, Error>>
{
println!("{:?} {}", req.method(), req.path());
e.status(Status::Ok);
e.add_chunked().unwrap();
if e.done_headers().unwrap() {
Either::A(Fibonacci {
encoder: e,
current: 1,
})
} else {
Either::B(ok(e.done()))
}
}
fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
env_logger::init().expect("init logging");
let mut lp = Core::new().unwrap();
let addr = "0.0.0.0:8080".parse().unwrap();
let listener = TcpListener::bind(&addr, &lp.handle()).unwrap();
let cfg = Config::new().done();
let h1 = lp.handle();
let done = listener.incoming()
.sleep_on_error(Duration::from_millis(100), &lp.handle())
.map(|(socket, addr)| {
Proto::new(socket, &cfg,
BufferedDispatcher::new(addr, &h1, || service),
&h1)
.map_err(|e| { println!("Connection error: {}", e); })
})
.listen(1000);
lp.run(done).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/sendfile.rs | examples/sendfile.rs | extern crate tokio_core;
extern crate futures;
extern crate futures_cpupool;
extern crate netbuf;
extern crate argparse;
extern crate tk_http;
extern crate tk_sendfile;
extern crate tk_bufstream;
extern crate tk_listen;
extern crate log;
extern crate env_logger;
use std::env;
use std::net::SocketAddr;
use std::path::PathBuf;
use std::time::Duration;
use argparse::{ArgumentParser, Parse};
use tokio_core::reactor::Core;
use tokio_core::net::{TcpListener};
use futures::{Stream, Future};
use futures_cpupool::CpuPool;
use tk_sendfile::DiskPool;
use futures::future::{ok};
use tk_http::Status;
use tk_http::server::buffered::{BufferedDispatcher};
use tk_http::server::{Encoder, Config, Proto, Error};
use tk_listen::ListenExt;
fn main() {
let mut filename = PathBuf::from("examples/sendfile.rs");
let mut addr = "127.0.0.1:8080".parse::<SocketAddr>().unwrap();
{
let mut ap = ArgumentParser::new();
ap.set_description("Serve a file via HTTP on any open connection");
ap.refer(&mut addr)
.add_option(&["-l", "--listen"], Parse,
"Listening address");
ap.refer(&mut filename)
.add_option(&["-f", "--filename"], Parse,
"File to serve");
ap.parse_args_or_exit();
}
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
env_logger::init().expect("init logging");
let mut lp = Core::new().unwrap();
let listener = TcpListener::bind(&addr, &lp.handle()).unwrap();
let disk_pool = DiskPool::new(CpuPool::new(40));
let cfg = Config::new().done();
let h1 = lp.handle();
let done = listener.incoming()
.sleep_on_error(Duration::from_millis(100), &lp.handle())
.map(move |(socket, addr)| {
let filename = filename.clone();
let disk_pool = disk_pool.clone();
Proto::new(socket, &cfg,
BufferedDispatcher::new(addr, &h1, move || {
let filename = filename.clone();
let disk_pool = disk_pool.clone();
move |_, mut e: Encoder<_>| {
disk_pool.open(filename.clone())
.and_then(move |file| {
e.status(Status::Ok);
e.add_length(file.size()).unwrap();
if e.done_headers().unwrap() {
Box::new(e.raw_body()
.and_then(|raw_body| file.write_into(raw_body))
.map(|raw_body| raw_body.done()))
as Box<Future<Item=_, Error=_>>
} else {
Box::new(ok(e.done()))
as Box<Future<Item=_, Error=_>>
}
})
.map_err(|_| -> Error { unimplemented!(); })
}
}),
&h1)
.map_err(|e| { println!("Connection error: {}", e); })
})
.listen(1000);
lp.run(done).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
swindon-rs/tk-http | https://github.com/swindon-rs/tk-http/blob/ffd17e670b448c09e11d295261bc58832961e51a/examples/websocket2.rs | examples/websocket2.rs | extern crate time;
extern crate tokio_core;
extern crate futures;
extern crate tk_bufstream;
extern crate netbuf;
extern crate tk_http;
extern crate tk_listen;
extern crate env_logger;
use std::env;
use std::time::Duration;
use tokio_core::reactor::{Core, Timeout};
use tokio_core::net::{TcpListener};
use futures::{Stream, Future, Sink};
use futures::future::{FutureResult, ok};
use futures::sync::mpsc::{unbounded, UnboundedSender};
use tk_http::{Status};
use tk_http::server::buffered::{Request, BufferedDispatcher};
use tk_http::server::{Encoder, EncoderDone, Config, Proto, Error};
use tk_http::websocket::{Loop, Config as WebsockConfig, Dispatcher, Frame};
use tk_http::websocket::{Error as WsErr};
use tk_http::websocket::Packet::{self, Text};
use tk_listen::ListenExt;
const INDEX: &'static str = include_str!("ws.html");
const JS: &'static str = include_str!("ws.js");
fn service<S>(req: Request, mut e: Encoder<S>)
-> FutureResult<EncoderDone<S>, Error>
{
if let Some(ws) = req.websocket_handshake() {
e.status(Status::SwitchingProtocol);
e.format_header("Date", time::now_utc().rfc822()).unwrap();
e.add_header("Server",
concat!("tk_http/", env!("CARGO_PKG_VERSION"))
).unwrap();
e.add_header("Connection", "upgrade").unwrap();
e.add_header("Upgrade", "websocket").unwrap();
e.format_header("Sec-Websocket-Accept", &ws.accept).unwrap();
e.done_headers().unwrap();
ok(e.done())
} else {
let (data, ctype) = match req.path() {
"/ws.js" => (JS, "text/javascript; charset=utf-8"),
_ => (INDEX, "text/html; charset=utf-8"),
};
e.status(Status::Ok);
e.add_length(data.as_bytes().len() as u64).unwrap();
e.format_header("Date", time::now_utc().rfc822()).unwrap();
e.add_header("Content-Type", ctype).unwrap();
e.add_header("Server",
concat!("tk_http/", env!("CARGO_PKG_VERSION"))
).unwrap();
if e.done_headers().unwrap() {
e.write_body(data.as_bytes());
}
ok(e.done())
}
}
struct Echo(UnboundedSender<Packet>);
impl Dispatcher for Echo {
type Future = FutureResult<(), WsErr>;
fn frame(&mut self, frame: &Frame) -> FutureResult<(), WsErr> {
println!("Received frame: {:?}. Echoing...", frame);
self.0.start_send(frame.into()).unwrap();
ok(())
}
}
fn main() {
if env::var("RUST_LOG").is_err() {
env::set_var("RUST_LOG", "info");
}
env_logger::init().expect("init logging");
let mut lp = Core::new().unwrap();
let h1 = lp.handle();
let addr = "0.0.0.0:8080".parse().unwrap();
let listener = TcpListener::bind(&addr, &lp.handle()).unwrap();
let cfg = Config::new().done();
let wcfg = WebsockConfig::new().done();
let done = listener.incoming()
.sleep_on_error(Duration::from_millis(100), &lp.handle())
.map(move |(socket, addr)| {
let wcfg = wcfg.clone();
let h2 = h1.clone();
Proto::new(socket, &cfg,
BufferedDispatcher::new_with_websockets(addr, &h1,
service,
move |out, inp| {
let (tx, rx) = unbounded();
let tx2 = tx.clone();
h2.spawn(
Timeout::new(Duration::new(10, 0), &h2).unwrap()
.map_err(|_| unreachable!())
.and_then(move |_| {
tx2.send(Text("hello".to_string()))
.map_err(|_| ())
})
.then(|_| Ok(())));
let rx = rx.map_err(|_| format!("stream closed"));
Loop::server(out, inp, rx, Echo(tx), &wcfg, &h2)
.map_err(|e| println!("websocket closed: {}", e))
}),
&h1)
.map_err(|e| { println!("Connection error: {}", e); })
.then(|_| Ok(())) // don't fail, please
})
.listen(1000);
lp.run(done).unwrap();
}
| rust | Apache-2.0 | ffd17e670b448c09e11d295261bc58832961e51a | 2026-01-04T20:20:02.650005Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/src/lib.rs | src/lib.rs | //! **shared-bus** is a crate to allow sharing bus peripherals safely between multiple devices.
//!
//! In the `embedded-hal` ecosystem, it is convention for drivers to "own" the bus peripheral they
//! are operating on. This implies that only _one_ driver can have access to a certain bus. That,
//! of course, poses an issue when multiple devices are connected to a single bus.
//!
//! _shared-bus_ solves this by giving each driver a bus-proxy to own which internally manages
//! access to the actual bus in a safe manner. For a more in-depth introduction of the problem
//! this crate is trying to solve, take a look at the [blog post][blog-post].
//!
//! There are different 'bus managers' for different use-cases:
//!
//! # Sharing within a single task/thread
//! As long as all users of a bus are contained in a single task/thread, bus sharing is very
//! simple. With no concurrency possible, no special synchronization is needed. This is where
//! a [`BusManagerSimple`] should be used:
//!
//! ```
//! # use embedded_hal::blocking::i2c;
//! # use embedded_hal::blocking::i2c::Write as _;
//! # struct MyDevice<T>(T);
//! # impl<T: i2c::Write> MyDevice<T> {
//! # pub fn new(t: T) -> Self { MyDevice(t) }
//! # pub fn do_something_on_the_bus(&mut self) {
//! # self.0.write(0xab, &[0x00]);
//! # }
//! # }
//! #
//! # fn _example(i2c: impl i2c::Write) {
//! // For example:
//! // let i2c = I2c::i2c1(dp.I2C1, (scl, sda), 90.khz(), clocks, &mut rcc.apb1);
//!
//! let bus = shared_bus::BusManagerSimple::new(i2c);
//!
//! let mut proxy1 = bus.acquire_i2c();
//! let mut my_device = MyDevice::new(bus.acquire_i2c());
//!
//! proxy1.write(0x39, &[0xc0, 0xff, 0xee]);
//! my_device.do_something_on_the_bus();
//! # }
//! ```
//!
//! The `BusManager::acquire_*()` methods can be called as often as needed; each call will yield
//! a new bus-proxy of the requested type.
//!
//! # Sharing across multiple tasks/threads
//! For sharing across multiple tasks/threads, synchronization is needed to ensure all bus-accesses
//! are strictly serialized and can't race against each other. The synchronization is handled by
//! a platform-specific [`BusMutex`] implementation. _shared-bus_ already contains some
//! implementations for common targets. For each one, there is also a macro for easily creating
//! a bus-manager with `'static` lifetime, which is almost always a requirement when sharing across
//! task/thread boundaries. As an example:
//!
//! ```
//! # struct MyDevice<T>(T);
//! # impl<T> MyDevice<T> {
//! # pub fn new(t: T) -> Self { MyDevice(t) }
//! # pub fn do_something_on_the_bus(&mut self) { }
//! # }
//! #
//! # struct SomeI2cBus;
//! # let i2c = SomeI2cBus;
//! // For example:
//! // let i2c = I2c::i2c1(dp.I2C1, (scl, sda), 90.khz(), clocks, &mut rcc.apb1);
//!
//! // The bus is a 'static reference -> it lives forever and references can be
//! // shared with other threads.
//! let bus: &'static _ = shared_bus::new_std!(SomeI2cBus = i2c).unwrap();
//!
//! let mut proxy1 = bus.acquire_i2c();
//! let mut my_device = MyDevice::new(bus.acquire_i2c());
//!
//! // We can easily move a proxy to another thread:
//! # let t =
//! std::thread::spawn(move || {
//! my_device.do_something_on_the_bus();
//! });
//! # t.join().unwrap();
//! ```
//!
//! Those platform-specific bits are guarded by a feature that needs to be enabled. Here is an
//! overview of what's already available:
//!
//! | Mutex | Bus Manager | `'static` Bus Macro | Feature Name |
//! | --- | --- | --- | --- |
//! | `std::sync::Mutex` | [`BusManagerStd`] | [`new_std!()`] | `std` |
//! | `cortex_m::interrupt::Mutex` | [`BusManagerCortexM`] | [`new_cortexm!()`] | `cortex-m` |
//! | `shared_bus::XtensaMutex` (`spin::Mutex` in critical section) | [`BusManagerXtensa`] | [`new_xtensa!()`] | `xtensa` |
//! | None (Automatically Managed) | [`BusManagerAtomicCheck`] | [`new_atomic_check!()`] | `cortex-m` |
//!
//! # Supported buses and hardware blocks
//! Currently, the following buses/blocks can be shared with _shared-bus_:
//!
//! | Bus/Block | Proxy Type | Acquire Method | Comments |
//! | --- | --- | --- | --- |
//! | I2C | [`I2cProxy`] | [`.acquire_i2c()`] | |
//! | SPI | [`SpiProxy`] | [`.acquire_spi()`] | SPI can only be shared within a single task (See [`SpiProxy`] for details). |
//! | ADC | [`AdcProxy`] | [`.acquire_adc()`] | |
//!
//!
//! [`.acquire_i2c()`]: ./struct.BusManager.html#method.acquire_i2c
//! [`.acquire_spi()`]: ./struct.BusManager.html#method.acquire_spi
//! [`.acquire_adc()`]: ./struct.BusManager.html#method.acquire_adc
//! [`BusManagerCortexM`]: ./type.BusManagerCortexM.html
//! [`BusManagerXtensa`]: ./type.BusManagerXtensa.html
//! [`BusManagerAtomicCheck`]: ./type.BusManagerAtomicCheck.html
//! [`BusManagerSimple`]: ./type.BusManagerSimple.html
//! [`BusManagerStd`]: ./type.BusManagerStd.html
//! [`BusMutex`]: ./trait.BusMutex.html
//! [`I2cProxy`]: ./struct.I2cProxy.html
//! [`SpiProxy`]: ./struct.SpiProxy.html
//! [`AdcProxy`]: ./struct.AdcProxy.html
//! [`new_cortexm!()`]: ./macro.new_cortexm.html
//! [`new_xtensa!()`]: ./macro.new_xtensa.html
//! [`new_std!()`]: ./macro.new_std.html
//! [`new_atomic_check!()`]: ./macro.new_atomic_check.html
//! [blog-post]: https://blog.rahix.de/001-shared-bus
#![doc(html_root_url = "https://docs.rs/shared-bus")]
#![cfg_attr(not(feature = "std"), no_std)]
#![warn(missing_docs)]
mod macros;
mod manager;
mod mutex;
mod proxies;
#[doc(hidden)]
#[cfg(feature = "std")]
pub use once_cell;
#[doc(hidden)]
#[cfg(feature = "cortex-m")]
pub use cortex_m;
#[doc(hidden)]
#[cfg(feature = "xtensa")]
pub use xtensa_lx;
pub use manager::BusManager;
pub use mutex::BusMutex;
#[cfg(feature = "cortex-m")]
pub use mutex::CortexMMutex;
pub use mutex::NullMutex;
#[cfg(feature = "xtensa")]
pub use mutex::XtensaMutex;
pub use proxies::AdcProxy;
pub use proxies::I2cProxy;
pub use proxies::SpiProxy;
#[cfg(feature = "cortex-m")]
pub use mutex::AtomicCheckMutex;
/// A bus manager for sharing within a single task/thread.
///
/// This is the bus manager with the least overhead; it should always be used when all bus users
/// are confined to a single task/thread as it has no side-effects (like blocking or turning off
/// interrupts).
///
/// # Example
/// ```
/// # use embedded_hal::blocking::i2c;
/// # use embedded_hal::blocking::i2c::Write as _;
/// # struct MyDevice<T>(T);
/// # impl<T: i2c::Write> MyDevice<T> {
/// # pub fn new(t: T) -> Self { MyDevice(t) }
/// # pub fn do_something_on_the_bus(&mut self) {
/// # self.0.write(0xab, &[0x00]);
/// # }
/// # }
/// #
/// # fn _example(i2c: impl i2c::Write) {
/// // For example:
/// // let i2c = I2c::i2c1(dp.I2C1, (scl, sda), 90.khz(), clocks, &mut rcc.apb1);
///
/// let bus = shared_bus::BusManagerSimple::new(i2c);
///
/// let mut proxy1 = bus.acquire_i2c();
/// let mut my_device = MyDevice::new(bus.acquire_i2c());
///
/// proxy1.write(0x39, &[0xc0, 0xff, 0xee]);
/// my_device.do_something_on_the_bus();
/// # }
/// ```
pub type BusManagerSimple<BUS> = BusManager<NullMutex<BUS>>;
/// A bus manager for safely sharing between threads on a platform with `std` support.
///
/// This manager internally uses a `std::sync::Mutex` for synchronizing bus accesses. As sharing
/// across threads will in most cases require a manager with `'static` lifetime, the
/// [`shared_bus::new_std!()`][new_std] macro exists to create such a bus manager.
///
/// [new_std]: ./macro.new_std.html
///
/// This type is only available with the `std` feature.
#[cfg(feature = "std")]
pub type BusManagerStd<BUS> = BusManager<::std::sync::Mutex<BUS>>;
/// A bus manager for safely sharing between tasks on Cortex-M.
///
/// This manager works by turning off interrupts for each bus transaction which prevents racy
/// accesses from different tasks/execution contexts (e.g. interrupts). Usually, for sharing
/// between tasks, a manager with `'static` lifetime is needed which can be created using the
/// [`shared_bus::new_cortexm!()`][new_cortexm] macro.
///
/// [new_cortexm]: ./macro.new_cortexm.html
///
/// This type is only available with the `cortex-m` feature.
#[cfg(feature = "cortex-m")]
pub type BusManagerCortexM<BUS> = BusManager<CortexMMutex<BUS>>;
/// A bus manager for safely sharing between tasks on Xtensa-lx6.
///
/// This manager works by turning off interrupts for each bus transaction which prevents racy
/// accesses from different tasks/execution contexts (e.g. interrupts). Usually, for sharing
/// between tasks, a manager with `'static` lifetime is needed which can be created using the
/// [`shared_bus::new_xtensa!()`][new_xtensa] macro.
///
/// [new_xtensa]: ./macro.new_xtensa.html
///
/// This type is only available with the `xtensa` feature.
#[cfg(feature = "xtensa")]
pub type BusManagerXtensa<BUS> = BusManager<XtensaMutex<BUS>>;
/// A bus manager for safely sharing the bus when using concurrency frameworks (such as RTIC).
///
/// This manager relies on RTIC or some other concurrency framework to manage resource
/// contention automatically. As a redundancy, this manager uses an atomic boolean to check
/// whether or not a resource is currently in use. This is purely used as a fail-safe against
/// misuse.
///
/// ## Warning
/// If devices on the same shared bus are not treated as a singular resource, it is possible that
/// pre-emption may occur. In this case, the manger will panic to prevent the race condition.
///
/// ## Usage
/// In order to use this manager with a concurrency framework such as RTIC, all devices on the
/// shared bus must be stored in the same logic resource. The concurrency framework will require a
/// resource lock if pre-emption is possible.
///
/// In order to use this with RTIC (as an example), all devices on the shared bus must be stored in
/// a singular resource. Additionally, a manager with `'static` lifetime is needed which can be
/// created using the [`shared_bus::new_atomic_check!()`][new_atomic_check] macro. It should
/// roughly look like this (there is also a [full example][shared-bus-rtic-example] available):
///
/// ```rust
/// struct Device<T> { bus: T };
/// struct OtherDevice<T> { bus: T };
///
/// // the HAL I2C driver type
/// type I2cType = ();
/// type Proxy = shared_bus::I2cProxy<'static, shared_bus::AtomicCheckMutex<I2cType>>;
///
/// struct SharedBusDevices {
/// device: Device<Proxy>,
/// other_device: OtherDevice<Proxy>,
/// }
///
/// struct Resources {
/// shared_bus_devices: SharedBusDevices,
/// }
///
///
/// // in the RTIC init function
/// fn init() -> Resources {
/// // init the bus like usual
/// let i2c: I2cType = ();
///
/// let bus_manager: &'static _ = shared_bus::new_atomic_check!(I2cType = i2c).unwrap();
///
/// let devices = SharedBusDevices {
/// device: Device { bus: bus_manager.acquire_i2c() },
/// other_device: OtherDevice { bus: bus_manager.acquire_i2c() },
/// };
///
/// Resources {
/// shared_bus_devices: devices,
/// }
/// }
/// ```
///
/// [new_atomic_check]: ./macro.new_atomic_check.html
/// [shared-bus-rtic-example]: https://github.com/ryan-summers/shared-bus-example/blob/master/src/main.rs
///
/// This type is only available with the `cortex-m` feature (but this may change in the future!).
#[cfg(feature = "cortex-m")]
pub type BusManagerAtomicCheck<T> = BusManager<AtomicCheckMutex<T>>;
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/src/mutex.rs | src/mutex.rs | use core::cell;
/// Common interface for mutex implementations.
///
/// `shared-bus` needs a mutex to ensure only a single device can access the bus at the same time
/// in concurrent situations. `shared-bus` already implements this trait for a number of existing
/// mutex types. Most of them are guarded by a feature that needs to be enabled. Here is an
/// overview:
///
/// | Mutex | Feature Name | Notes |
/// | --- | --- | --- |
/// | [`shared_bus::NullMutex`][null-mutex] | always available | For sharing within a single execution context. |
/// | [`std::sync::Mutex`][std-mutex] | `std` | For platforms where `std` is available. |
/// | [`cortex_m::interrupt::Mutex`][cortexm-mutex] | `cortex-m` | For Cortex-M platforms; Uses a critcal section (i.e. turns off interrupts during bus transactions). |
///
/// [null-mutex]: ./struct.NullMutex.html
/// [std-mutex]: https://doc.rust-lang.org/std/sync/struct.Mutex.html
/// [cortexm-mutex]: https://docs.rs/cortex-m/0.6.3/cortex_m/interrupt/struct.Mutex.html
///
/// For other mutex types, a custom implementation is needed. Due to the orphan rule, it might be
/// necessary to wrap it in a newtype. As an example, this is what such a custom implementation
/// might look like:
///
/// ```
/// struct MyMutex<T>(std::sync::Mutex<T>);
///
/// impl<T> shared_bus::BusMutex for MyMutex<T> {
/// type Bus = T;
///
/// fn create(v: T) -> Self {
/// Self(std::sync::Mutex::new(v))
/// }
///
/// fn lock<R, F: FnOnce(&mut Self::Bus) -> R>(&self, f: F) -> R {
/// let mut v = self.0.lock().unwrap();
/// f(&mut v)
/// }
/// }
///
/// // It is also beneficial to define a type alias for the BusManager
/// type BusManagerCustom<BUS> = shared_bus::BusManager<MyMutex<BUS>>;
/// ```
pub trait BusMutex {
/// The actual bus that is wrapped inside this mutex.
type Bus;
/// Create a new mutex of this type.
fn create(v: Self::Bus) -> Self;
/// Lock the mutex and give a closure access to the bus inside.
fn lock<R, F: FnOnce(&mut Self::Bus) -> R>(&self, f: F) -> R;
}
/// "Dummy" mutex for sharing in a single task/thread.
///
/// This mutex type can be used when all bus users are contained in a single execution context. In
/// such a situation, no actual mutex is needed, because a RefCell alone is sufficient to ensuring
/// only a single peripheral can access the bus at the same time.
///
/// This mutex type is used with the [`BusManagerSimple`] type.
///
/// To uphold safety, this type is `!Send` and `!Sync`.
///
/// [`BusManagerSimple`]: ./type.BusManagerSimple.html
#[derive(Debug)]
pub struct NullMutex<T> {
bus: cell::RefCell<T>,
}
impl<T> BusMutex for NullMutex<T> {
type Bus = T;
fn create(v: Self::Bus) -> Self {
NullMutex {
bus: cell::RefCell::new(v),
}
}
fn lock<R, F: FnOnce(&mut Self::Bus) -> R>(&self, f: F) -> R {
let mut v = self.bus.borrow_mut();
f(&mut v)
}
}
#[cfg(feature = "std")]
impl<T> BusMutex for ::std::sync::Mutex<T> {
type Bus = T;
fn create(v: Self::Bus) -> Self {
::std::sync::Mutex::new(v)
}
fn lock<R, F: FnOnce(&mut Self::Bus) -> R>(&self, f: F) -> R {
let mut v = self.lock().unwrap();
f(&mut v)
}
}
/// Alias for a Cortex-M mutex.
///
/// Based on [`cortex_m::interrupt::Mutex`][cortexm-mutex]. This mutex works by disabling
/// interrupts while the mutex is locked.
///
/// [cortexm-mutex]: https://docs.rs/cortex-m/0.6.3/cortex_m/interrupt/struct.Mutex.html
///
/// This type is only available with the `cortex-m` feature.
#[cfg(feature = "cortex-m")]
pub type CortexMMutex<T> = cortex_m::interrupt::Mutex<cell::RefCell<T>>;
#[cfg(feature = "cortex-m")]
impl<T> BusMutex for CortexMMutex<T> {
type Bus = T;
fn create(v: T) -> Self {
cortex_m::interrupt::Mutex::new(cell::RefCell::new(v))
}
fn lock<R, F: FnOnce(&mut Self::Bus) -> R>(&self, f: F) -> R {
cortex_m::interrupt::free(|cs| {
let c = self.borrow(cs);
f(&mut c.borrow_mut())
})
}
}
/// Wrapper for an interrupt free spin mutex.
///
/// Based on [`spin::Mutex`][spin-mutex]. This mutex works by disabling
/// interrupts while the mutex is locked.
///
/// [spin-mutex]: https://docs.rs/spin/0.9.2/spin/type.Mutex.html
///
/// This type is only available with the `xtensa` feature.
#[cfg(feature = "xtensa")]
pub struct XtensaMutex<T>(spin::Mutex<T>);
#[cfg(feature = "xtensa")]
impl<T> BusMutex for XtensaMutex<T> {
type Bus = T;
fn create(v: T) -> Self {
XtensaMutex(spin::Mutex::new(v))
}
fn lock<R, F: FnOnce(&mut Self::Bus) -> R>(&self, f: F) -> R {
xtensa_lx::interrupt::free(|_| f(&mut (*self.0.lock())))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn std_mutex_api_test() {
let t = "hello ".to_string();
let m: std::sync::Mutex<_> = BusMutex::create(t);
BusMutex::lock(&m, |s| {
s.push_str("world");
});
BusMutex::lock(&m, |s| {
assert_eq!("hello world", s);
});
}
}
/// A simple coherency checker for sharing across multiple tasks/threads.
///
/// This mutex type can be used when all bus users are contained in a single structure, and is
/// intended for use with RTIC. When all bus users are contained within a structure managed by RTIC,
/// RTIC will guarantee that bus collisions do not occur. To protect against accidentaly misuse,
/// this mutex uses an atomic bool to determine when the bus is in use. If a bus collision is
/// detected, the code will panic.
///
/// This mutex type is used with the [`BusManagerAtomicMutex`] type.
///
/// This manager type is explicitly safe to share across threads because it checks to ensure that
/// collisions due to bus sharing do not occur.
///
/// [`BusManagerAtomicMutex`]: ./type.BusManagerAtomicMutex.html
#[cfg(feature = "cortex-m")]
#[derive(Debug)]
pub struct AtomicCheckMutex<BUS> {
bus: core::cell::UnsafeCell<BUS>,
busy: atomic_polyfill::AtomicBool,
}
// It is explicitly safe to share this across threads because there is a coherency check using an
// atomic bool comparison.
#[cfg(feature = "cortex-m")]
unsafe impl<BUS> Sync for AtomicCheckMutex<BUS> {}
#[cfg(feature = "cortex-m")]
impl<BUS> BusMutex for AtomicCheckMutex<BUS> {
type Bus = BUS;
fn create(v: BUS) -> Self {
Self {
bus: core::cell::UnsafeCell::new(v),
busy: atomic_polyfill::AtomicBool::from(false),
}
}
fn lock<R, F: FnOnce(&mut Self::Bus) -> R>(&self, f: F) -> R {
self.busy
.compare_exchange(
false,
true,
core::sync::atomic::Ordering::SeqCst,
core::sync::atomic::Ordering::SeqCst,
)
.expect("Bus conflict");
let result = f(unsafe { &mut *self.bus.get() });
self.busy.store(false, core::sync::atomic::Ordering::SeqCst);
result
}
}
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/src/manager.rs | src/manager.rs | /// "Manager" for a shared bus.
///
/// The manager owns the original bus peripheral (wrapped inside a mutex) and hands out proxies
/// which can be used by device drivers for accessing the bus. Certain bus proxies can only be
/// created with restrictions (see the individual methods for details).
///
/// Usually the type-aliases defined in this crate should be used instead of `BusManager` directly.
/// Otherwise, the mutex type needs to be specified explicitly. Here is an overview of aliases
/// (some are only available if a certain feature is enabled):
///
/// | Bus Manager | Mutex Type | Feature Name | Notes |
/// | --- | --- | --- | --- |
/// | [`BusManagerSimple`] | `shared_bus::NullMutex` | always available | For sharing within a single execution context. |
/// | [`BusManagerStd`] | `std::sync::Mutex` | `std` | For platforms where `std` is available. |
/// | [`BusManagerCortexM`] | `cortex_m::interrupt::Mutex` | `cortex-m` | For Cortex-M platforms; Uses a critcal section (i.e. turns off interrupts during bus transactions). |
///
/// [`BusManagerSimple`]: ./type.BusManagerSimple.html
/// [`BusManagerStd`]: ./type.BusManagerStd.html
/// [`BusManagerCortexM`]: ./type.BusManagerCortexM.html
///
/// # Constructing a `BusManager`
/// There are two ways to instanciate a bus manager. Which one to use depends on the kind of
/// sharing that is intended.
///
/// 1. When all bus users live in the same task/thread, a `BusManagerSimple` can be used:
///
/// ```
/// # use embedded_hal::blocking::i2c;
/// # use embedded_hal::blocking::i2c::Write as _;
/// # struct MyDevice<T>(T);
/// # impl<T: i2c::Write> MyDevice<T> {
/// # pub fn new(t: T) -> Self { MyDevice(t) }
/// # pub fn do_something_on_the_bus(&mut self) {
/// # self.0.write(0xab, &[0x00]);
/// # }
/// # }
/// #
/// # fn _example(i2c: impl i2c::Write) {
/// // For example:
/// // let i2c = I2c::i2c1(dp.I2C1, (scl, sda), 90.khz(), clocks, &mut rcc.apb1);
///
/// let bus = shared_bus::BusManagerSimple::new(i2c);
///
/// let mut proxy1 = bus.acquire_i2c();
/// let mut my_device = MyDevice::new(bus.acquire_i2c());
///
/// proxy1.write(0x39, &[0xc0, 0xff, 0xee]);
/// my_device.do_something_on_the_bus();
/// # }
/// ```
///
/// 2. When users are in different execution contexts, a proper mutex type is needed and the
/// manager must be made `static` to ensure it lives long enough. For this, `shared-bus`
/// provides a number of macros creating such a `static` instance:
///
/// ```
/// # struct MyDevice<T>(T);
/// # impl<T> MyDevice<T> {
/// # pub fn new(t: T) -> Self { MyDevice(t) }
/// # pub fn do_something_on_the_bus(&mut self) { }
/// # }
/// #
/// # struct SomeI2cBus;
/// # let i2c = SomeI2cBus;
/// // For example:
/// // let i2c = I2c::i2c1(dp.I2C1, (scl, sda), 90.khz(), clocks, &mut rcc.apb1);
///
/// // The bus is a 'static reference -> it lives forever and references can be
/// // shared with other threads.
/// let bus: &'static _ = shared_bus::new_std!(SomeI2cBus = i2c).unwrap();
///
/// let mut proxy1 = bus.acquire_i2c();
/// let mut my_device = MyDevice::new(bus.acquire_i2c());
///
/// // We can easily move a proxy to another thread:
/// # let t =
/// std::thread::spawn(move || {
/// my_device.do_something_on_the_bus();
/// });
/// # t.join().unwrap();
/// ```
///
/// For other platforms, similar macros exist (e.g. [`new_cortexm!()`]).
///
/// [`new_cortexm!()`]: ./macro.new_cortexm.html
#[derive(Debug)]
pub struct BusManager<M> {
mutex: M,
}
impl<M: crate::BusMutex> BusManager<M> {
/// Create a new bus manager for a bus.
///
/// See the documentation for `BusManager` for more details.
pub fn new(bus: M::Bus) -> Self {
let mutex = M::create(bus);
BusManager { mutex }
}
}
impl<M: crate::BusMutex> BusManager<M> {
/// Acquire an [`I2cProxy`] for this bus.
///
/// [`I2cProxy`]: ./struct.I2cProxy.html
///
/// The returned proxy object can then be used for accessing the bus by e.g. a driver:
///
/// ```
/// # use embedded_hal::blocking::i2c;
/// # use embedded_hal::blocking::i2c::Write as _;
/// # struct MyDevice<T>(T);
/// # impl<T: i2c::Write> MyDevice<T> {
/// # pub fn new(t: T) -> Self { MyDevice(t) }
/// # pub fn do_something_on_the_bus(&mut self) {
/// # self.0.write(0xab, &[0x00]);
/// # }
/// # }
/// #
/// # fn _example(i2c: impl i2c::Write) {
/// let bus = shared_bus::BusManagerSimple::new(i2c);
///
/// let mut proxy1 = bus.acquire_i2c();
/// let mut my_device = MyDevice::new(bus.acquire_i2c());
///
/// proxy1.write(0x39, &[0xc0, 0xff, 0xee]);
/// my_device.do_something_on_the_bus();
/// # }
/// ```
pub fn acquire_i2c<'a>(&'a self) -> crate::I2cProxy<'a, M> {
crate::I2cProxy { mutex: &self.mutex }
}
/// Acquire an [`AdcProxy`] for this hardware block.
///
/// [`AdcProxy`]: ./struct.AdcProxy.html
///
/// The returned proxy object can then be used for accessing the bus by e.g. a driver:
///
/// ```ignore
/// // For example:
/// // let ch0 = gpioa.pa0.into_analog(&mut gpioa.crl);
/// // let ch1 = gpioa.pa1.into_analog(&mut gpioa.crl);
/// // let adc = Adc::adc1(p.ADC1, &mut rcc.apb2, clocks);
///
/// let adc_bus: &'static _ = shared_bus::new_cortexm!(Adc<ADC1> = adc).unwrap();
/// let mut proxy1 = adc_bus.acquire_adc();
/// let mut proxy2 = adc_bus.acquire_adc();
///
/// proxy1.read(ch0).unwrap();
/// proxy2.read(ch1).unwrap();
///
/// ```
pub fn acquire_adc<'a>(&'a self) -> crate::AdcProxy<'a, M> {
crate::AdcProxy { mutex: &self.mutex }
}
}
impl<T> BusManager<crate::NullMutex<T>> {
/// Acquire an [`SpiProxy`] for this bus.
///
/// **Note**: SPI Proxies can only be created from [`BusManagerSimple`] (= bus managers using
/// the [`NullMutex`]). See [`SpiProxy`] for more details why.
///
/// [`BusManagerSimple`]: ./type.BusManagerSimple.html
/// [`NullMutex`]: ./struct.NullMutex.html
/// [`SpiProxy`]: ./struct.SpiProxy.html
///
/// The returned proxy object can then be used for accessing the bus by e.g. a driver:
///
/// ```
/// # use embedded_hal::blocking::spi;
/// # use embedded_hal::digital::v2;
/// # use embedded_hal::blocking::spi::Write as _;
/// # struct MyDevice<T>(T);
/// # impl<T: spi::Write<u8>> MyDevice<T> {
/// # pub fn new(t: T) -> Self { MyDevice(t) }
/// # pub fn do_something_on_the_bus(&mut self) {
/// # self.0.write(&[0x00]);
/// # }
/// # }
/// #
/// # fn _example(mut cs1: impl v2::OutputPin, spi: impl spi::Write<u8>) {
/// let bus = shared_bus::BusManagerSimple::new(spi);
///
/// let mut proxy1 = bus.acquire_spi();
/// let mut my_device = MyDevice::new(bus.acquire_spi());
///
/// // Chip-select needs to be managed manually
/// cs1.set_high();
/// proxy1.write(&[0xc0, 0xff, 0xee]);
/// cs1.set_low();
///
/// my_device.do_something_on_the_bus();
/// # }
/// ```
pub fn acquire_spi<'a>(&'a self) -> crate::SpiProxy<'a, crate::NullMutex<T>> {
crate::SpiProxy {
mutex: &self.mutex,
_u: core::marker::PhantomData,
}
}
}
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/src/proxies.rs | src/proxies.rs | #[cfg(feature = "eh-alpha")]
use embedded_hal_alpha::i2c as i2c_alpha;
use embedded_hal::adc;
use embedded_hal::blocking::i2c;
use embedded_hal::blocking::spi;
/// Proxy type for I2C bus sharing.
///
/// The `I2cProxy` implements all (blocking) I2C traits so it can be passed to drivers instead of
/// the bus instance. Internally, it holds reference to the bus via a mutex, ensuring that all
/// accesses are strictly synchronized.
///
/// An `I2cProxy` is created by calling [`BusManager::acquire_i2c()`][acquire_i2c].
///
/// [acquire_i2c]: ./struct.BusManager.html#method.acquire_i2c
#[derive(Debug)]
pub struct I2cProxy<'a, M> {
pub(crate) mutex: &'a M,
}
impl<'a, M: crate::BusMutex> Clone for I2cProxy<'a, M> {
fn clone(&self) -> Self {
Self { mutex: &self.mutex }
}
}
impl<'a, M: crate::BusMutex> i2c::Write for I2cProxy<'a, M>
where
M::Bus: i2c::Write,
{
type Error = <M::Bus as i2c::Write>::Error;
fn write(&mut self, addr: u8, buffer: &[u8]) -> Result<(), Self::Error> {
self.mutex.lock(|bus| bus.write(addr, buffer))
}
}
impl<'a, M: crate::BusMutex> i2c::Read for I2cProxy<'a, M>
where
M::Bus: i2c::Read,
{
type Error = <M::Bus as i2c::Read>::Error;
fn read(&mut self, addr: u8, buffer: &mut [u8]) -> Result<(), Self::Error> {
self.mutex.lock(|bus| bus.read(addr, buffer))
}
}
impl<'a, M: crate::BusMutex> i2c::WriteRead for I2cProxy<'a, M>
where
M::Bus: i2c::WriteRead,
{
type Error = <M::Bus as i2c::WriteRead>::Error;
fn write_read(
&mut self,
addr: u8,
buffer_in: &[u8],
buffer_out: &mut [u8],
) -> Result<(), Self::Error> {
self.mutex
.lock(|bus| bus.write_read(addr, buffer_in, buffer_out))
}
}
impl<'a, M: crate::BusMutex> i2c::WriteIterRead for I2cProxy<'a, M>
where
M::Bus: i2c::WriteIterRead,
{
type Error = <M::Bus as i2c::WriteIterRead>::Error;
fn write_iter_read<B>(
&mut self,
address: u8,
bytes: B,
buffer: &mut [u8],
) -> Result<(), Self::Error>
where
B: IntoIterator<Item = u8>,
{
self.mutex
.lock(|bus| bus.write_iter_read(address, bytes, buffer))
}
}
impl<'a, M: crate::BusMutex> i2c::WriteIter for I2cProxy<'a, M>
where
M::Bus: i2c::WriteIter,
{
type Error = <M::Bus as i2c::WriteIter>::Error;
fn write<B>(&mut self, address: u8, bytes: B) -> Result<(), Self::Error>
where
B: IntoIterator<Item = u8>,
{
self.mutex.lock(|bus| bus.write(address, bytes))
}
}
// Implementations for the embedded_hal alpha
#[cfg(feature = "eh-alpha")]
impl<'a, M: crate::BusMutex> i2c_alpha::ErrorType for I2cProxy<'a, M>
where
M::Bus: i2c_alpha::ErrorType,
{
type Error = <M::Bus as i2c_alpha::ErrorType>::Error;
}
#[cfg(feature = "eh-alpha")]
impl<'a, M: crate::BusMutex> i2c_alpha::I2c for I2cProxy<'a, M>
where
M::Bus: i2c_alpha::I2c,
{
fn read(&mut self, address: u8, buffer: &mut [u8]) -> Result<(), Self::Error> {
self.mutex.lock(|bus| bus.read(address, buffer))
}
fn write(&mut self, address: u8, bytes: &[u8]) -> Result<(), Self::Error> {
self.mutex.lock(|bus| bus.write(address, bytes))
}
fn write_iter<B>(&mut self, address: u8, bytes: B) -> Result<(), Self::Error>
where
B: IntoIterator<Item = u8>,
{
self.mutex.lock(|bus| bus.write_iter(address, bytes))
}
fn write_read(
&mut self,
address: u8,
bytes: &[u8],
buffer: &mut [u8],
) -> Result<(), Self::Error> {
self.mutex
.lock(|bus| bus.write_read(address, bytes, buffer))
}
fn write_iter_read<B>(
&mut self,
address: u8,
bytes: B,
buffer: &mut [u8],
) -> Result<(), Self::Error>
where
B: IntoIterator<Item = u8>,
{
self.mutex
.lock(|bus| bus.write_iter_read(address, bytes, buffer))
}
fn transaction<'b>(
&mut self,
address: u8,
operations: &mut [i2c_alpha::Operation<'b>],
) -> Result<(), Self::Error> {
self.mutex.lock(|bus| bus.transaction(address, operations))
}
fn transaction_iter<'b, O>(&mut self, address: u8, operations: O) -> Result<(), Self::Error>
where
O: IntoIterator<Item = i2c_alpha::Operation<'b>>,
{
self.mutex
.lock(|bus| bus.transaction_iter(address, operations))
}
}
/// Proxy type for SPI bus sharing.
///
/// The `SpiProxy` implements all (blocking) SPI traits so it can be passed to drivers instead of
/// the bus instance. An `SpiProxy` is created by calling [`BusManager::acquire_spi()`][acquire_spi].
///
/// **Note**: The `SpiProxy` can only be used for sharing **withing a single task/thread**. This
/// is due to drivers usually managing the chip-select pin manually which would be inherently racy
/// in a concurrent environment (because the mutex is locked only after asserting CS). To ensure
/// safe usage, a `SpiProxy` can only be created when using [`BusManagerSimple`] and is `!Send`.
///
/// [acquire_spi]: ./struct.BusManager.html#method.acquire_spi
/// [`BusManagerSimple`]: ./type.BusManagerSimple.html
#[derive(Debug)]
pub struct SpiProxy<'a, M> {
pub(crate) mutex: &'a M,
pub(crate) _u: core::marker::PhantomData<*mut ()>,
}
impl<'a, M: crate::BusMutex> Clone for SpiProxy<'a, M> {
fn clone(&self) -> Self {
Self {
mutex: &self.mutex,
_u: core::marker::PhantomData,
}
}
}
impl<'a, M: crate::BusMutex> spi::Transfer<u8> for SpiProxy<'a, M>
where
M::Bus: spi::Transfer<u8>,
{
type Error = <M::Bus as spi::Transfer<u8>>::Error;
fn transfer<'w>(&mut self, words: &'w mut [u8]) -> Result<&'w [u8], Self::Error> {
self.mutex.lock(move |bus| bus.transfer(words))
}
}
impl<'a, M: crate::BusMutex> spi::Write<u8> for SpiProxy<'a, M>
where
M::Bus: spi::Write<u8>,
{
type Error = <M::Bus as spi::Write<u8>>::Error;
fn write(&mut self, words: &[u8]) -> Result<(), Self::Error> {
self.mutex.lock(|bus| bus.write(words))
}
}
/// Proxy type for ADC sharing.
///
/// The `AdcProxy` implements OneShot trait so it can be passed to drivers instead of
/// ADC instance. Internally, it holds reference to the bus via a mutex, ensuring
/// that all accesses are strictly synchronized.
///
/// An `AdcProxy` is created by calling [`BusManager::acquire_adc()`][acquire_adc].
///
/// **Note**: The [`adc::OneShot`] trait proxied by this type describes a
/// non-blocking contract for ADC read operation. However access to a shared ADC
/// unit can not be arbitrated in a completely non-blocking and concurrency safe way.
/// Any reading from a channel shall be completed before `shared-bus` can allow the
/// next read from the same or another channel. So the current implementation breaks
/// the non-blocking contract of the trait and just busy-spins until a sample is
/// returned.
///
/// [acquire_adc]: ./struct.BusManager.html#method.acquire_adc
#[derive(Debug)]
pub struct AdcProxy<'a, M> {
pub(crate) mutex: &'a M,
}
impl<'a, M: crate::BusMutex> Clone for AdcProxy<'a, M> {
fn clone(&self) -> Self {
Self { mutex: &self.mutex }
}
}
impl<'a, M: crate::BusMutex, ADC, Word, Pin> adc::OneShot<ADC, Word, Pin> for AdcProxy<'a, M>
where
Pin: adc::Channel<ADC>,
M::Bus: adc::OneShot<ADC, Word, Pin>,
{
type Error = <M::Bus as adc::OneShot<ADC, Word, Pin>>::Error;
fn read(&mut self, pin: &mut Pin) -> nb::Result<Word, Self::Error> {
self.mutex
.lock(|bus| nb::block!(bus.read(pin)).map_err(nb::Error::Other))
}
}
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/src/macros.rs | src/macros.rs | /// Macro for creating a `std`-based bus manager with `'static` lifetime.
///
/// This macro is a convenience helper for creating a bus manager that lives for the `'static`
/// lifetime an thus can be safely shared across threads.
///
/// This macro is only available with the `std` feature.
///
/// # Syntax
/// ```ignore
/// let bus = shared_bus::new_std!(<Full Bus Type Signature> = <bus>).unwrap();
/// ```
///
/// The macro returns an Option which will be `Some(&'static bus_manager)` on the first run and
/// `None` afterwards. This is necessary to uphold safety around the inner `static` variable.
///
/// # Example
/// ```
/// # struct MyDevice<T>(T);
/// # impl<T> MyDevice<T> {
/// # pub fn new(t: T) -> Self { MyDevice(t) }
/// # pub fn do_something_on_the_bus(&mut self) { }
/// # }
/// #
/// # struct SomeI2cBus;
/// # let i2c = SomeI2cBus;
/// // For example:
/// // let i2c = I2c::i2c1(dp.I2C1, (scl, sda), 90.khz(), clocks, &mut rcc.apb1);
///
/// // The bus is a 'static reference -> it lives forever and references can be
/// // shared with other threads.
/// let bus: &'static _ = shared_bus::new_std!(SomeI2cBus = i2c).unwrap();
///
/// let mut proxy1 = bus.acquire_i2c();
/// let mut my_device = MyDevice::new(bus.acquire_i2c());
///
/// // We can easily move a proxy to another thread:
/// # let t =
/// std::thread::spawn(move || {
/// my_device.do_something_on_the_bus();
/// });
/// # t.join().unwrap();
/// ```
#[cfg(feature = "std")]
#[macro_export]
macro_rules! new_std {
($bus_type:ty = $bus:expr) => {{
use $crate::once_cell::sync::OnceCell;
static MANAGER: OnceCell<$crate::BusManagerStd<$bus_type>> = OnceCell::new();
let m = $crate::BusManagerStd::new($bus);
match MANAGER.set(m) {
Ok(_) => MANAGER.get(),
Err(_) => None,
}
}};
}
/// Macro for creating a Cortex-M bus manager with `'static` lifetime.
///
/// This macro is a convenience helper for creating a bus manager that lives for the `'static`
/// lifetime an thus can be safely shared across tasks/execution contexts (like interrupts).
///
/// This macro is only available with the `cortex-m` feature.
///
/// # Syntax
/// ```ignore
/// let bus = shared_bus::new_cortexm!(<Full Bus Type Signature> = <bus>).unwrap();
/// ```
///
/// The macro returns an Option which will be `Some(&'static bus_manager)` on the first run and
/// `None` afterwards. This is necessary to uphold safety around the inner `static` variable.
///
/// # Example
/// ```no_run
/// # use embedded_hal::blocking::i2c::Write;
/// # struct MyDevice<T>(T);
/// # impl<T> MyDevice<T> {
/// # pub fn new(t: T) -> Self { MyDevice(t) }
/// # pub fn do_something_on_the_bus(&mut self) { }
/// # }
/// #
/// # struct SomeI2cBus;
/// # impl Write for SomeI2cBus {
/// # type Error = ();
/// # fn write(&mut self, addr: u8, buffer: &[u8]) -> Result<(), Self::Error> { Ok(()) }
/// # }
/// static mut SHARED_DEVICE:
/// Option<MyDevice<shared_bus::I2cProxy<shared_bus::CortexMMutex<SomeI2cBus>>>>
/// = None;
///
/// fn main() -> ! {
/// # let i2c = SomeI2cBus;
/// // For example:
/// // let i2c = I2c::i2c1(dp.I2C1, (scl, sda), 90.khz(), clocks, &mut rcc.apb1);
///
/// // The bus is a 'static reference -> it lives forever and references can be
/// // shared with other tasks.
/// let bus: &'static _ = shared_bus::new_cortexm!(SomeI2cBus = i2c).unwrap();
///
/// let mut proxy1 = bus.acquire_i2c();
/// let my_device = MyDevice::new(bus.acquire_i2c());
///
/// unsafe {
/// SHARED_DEVICE = Some(my_device);
/// }
///
/// cortex_m::asm::dmb();
///
/// // enable the interrupt
///
/// loop {
/// proxy1.write(0x39, &[0xaa]);
/// }
/// }
///
/// fn INTERRUPT() {
/// let dev = unsafe {SHARED_DEVICE.as_mut().unwrap()};
///
/// dev.do_something_on_the_bus();
/// }
/// ```
#[cfg(feature = "cortex-m")]
#[macro_export]
macro_rules! new_cortexm {
($bus_type:ty = $bus:expr) => {{
let m: Option<&'static mut _> = $crate::cortex_m::singleton!(
: $crate::BusManagerCortexM<$bus_type> =
$crate::BusManagerCortexM::new($bus)
);
m
}};
}
/// Macro for creating a Xtensa-lx6 bus manager with `'static` lifetime.
///
/// This macro is a convenience helper for creating a bus manager that lives for the `'static`
/// lifetime an thus can be safely shared across tasks/execution contexts (like interrupts).
///
/// This macro is only available with the `xtensa` feature.
///
/// # Syntax
/// ```ignore
/// let bus = shared_bus::new_xtensa!(<Full Bus Type Signature> = <bus>).unwrap();
/// ```
///
/// The macro returns an Option which will be `Some(&'static bus_manager)` on the first run and
/// `None` afterwards. This is necessary to uphold safety around the inner `static` variable.
#[cfg(feature = "xtensa")]
#[macro_export]
macro_rules! new_xtensa {
($bus_type:ty = $bus:expr) => {{
let m: Option<&'static mut _> = $crate::xtensa_lx::singleton!(
: $crate::BusManagerXtensa<$bus_type> =
$crate::BusManagerXtensa::new($bus)
);
m
}};
}
/// Construct a statically allocated bus manager.
#[cfg(feature = "cortex-m")]
#[macro_export]
macro_rules! new_atomic_check {
($bus_type:ty = $bus:expr) => {{
let m: Option<&'static mut _> = $crate::cortex_m::singleton!(
: $crate::BusManagerAtomicCheck<$bus_type> =
$crate::BusManagerAtomicCheck::new($bus)
);
m
}};
}
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/tests/i2c.rs | tests/i2c.rs | use embedded_hal::prelude::*;
use embedded_hal_mock::i2c;
use std::thread;
#[test]
fn fake_i2c_device() {
let expect = vec![i2c::Transaction::write(0xc0, vec![0xff, 0xee])];
let mut device = i2c::Mock::new(&expect);
device.write(0xc0, &[0xff, 0xee]).unwrap();
device.done()
}
#[test]
fn i2c_manager_manual() {
let expect = vec![i2c::Transaction::write(0xde, vec![0xad, 0xbe, 0xef])];
let mut device = i2c::Mock::new(&expect);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy = manager.acquire_i2c();
proxy.write(0xde, &[0xad, 0xbe, 0xef]).unwrap();
device.done();
}
#[test]
fn i2c_manager_macro() {
let expect = vec![i2c::Transaction::write(0xde, vec![0xad, 0xbe, 0xef])];
let mut device = i2c::Mock::new(&expect);
let manager: &'static shared_bus::BusManagerStd<_> =
shared_bus::new_std!(i2c::Mock = device.clone()).unwrap();
let mut proxy = manager.acquire_i2c();
proxy.write(0xde, &[0xad, 0xbe, 0xef]).unwrap();
device.done();
}
#[test]
fn i2c_proxy() {
let expect = vec![
i2c::Transaction::write(0xde, vec![0xad, 0xbe, 0xef]),
i2c::Transaction::read(0xef, vec![0xbe, 0xad, 0xde]),
i2c::Transaction::write_read(0x44, vec![0x01, 0x02], vec![0x03, 0x04]),
];
let mut device = i2c::Mock::new(&expect);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy = manager.acquire_i2c();
proxy.write(0xde, &[0xad, 0xbe, 0xef]).unwrap();
let mut buf = [0u8; 3];
proxy.read(0xef, &mut buf).unwrap();
assert_eq!(&buf, &[0xbe, 0xad, 0xde]);
let mut buf = [0u8; 2];
proxy.write_read(0x44, &[0x01, 0x02], &mut buf).unwrap();
assert_eq!(&buf, &[0x03, 0x04]);
device.done();
}
#[test]
fn i2c_multi() {
let expect = vec![
i2c::Transaction::write(0xde, vec![0xad, 0xbe, 0xef]),
i2c::Transaction::read(0xef, vec![0xbe, 0xad, 0xde]),
i2c::Transaction::write_read(0x44, vec![0x01, 0x02], vec![0x03, 0x04]),
];
let mut device = i2c::Mock::new(&expect);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy1 = manager.acquire_i2c();
let mut proxy2 = manager.acquire_i2c();
let mut proxy3 = manager.acquire_i2c();
proxy1.write(0xde, &[0xad, 0xbe, 0xef]).unwrap();
let mut buf = [0u8; 3];
proxy2.read(0xef, &mut buf).unwrap();
assert_eq!(&buf, &[0xbe, 0xad, 0xde]);
let mut buf = [0u8; 2];
proxy3.write_read(0x44, &[0x01, 0x02], &mut buf).unwrap();
assert_eq!(&buf, &[0x03, 0x04]);
device.done();
}
#[test]
fn i2c_concurrent() {
let expect = vec![
i2c::Transaction::write(0xde, vec![0xad, 0xbe, 0xef]),
i2c::Transaction::read(0xef, vec![0xbe, 0xad, 0xde]),
];
let mut device = i2c::Mock::new(&expect);
let manager = shared_bus::new_std!(i2c::Mock = device.clone()).unwrap();
let mut proxy1 = manager.acquire_i2c();
let mut proxy2 = manager.acquire_i2c();
thread::spawn(move || {
proxy1.write(0xde, &[0xad, 0xbe, 0xef]).unwrap();
})
.join()
.unwrap();
thread::spawn(move || {
let mut buf = [0u8; 3];
proxy2.read(0xef, &mut buf).unwrap();
assert_eq!(&buf, &[0xbe, 0xad, 0xde]);
})
.join()
.unwrap();
device.done();
}
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/tests/spi.rs | tests/spi.rs | use embedded_hal::prelude::*;
use embedded_hal_mock::spi;
#[test]
fn fake_spi_device() {
let expect = vec![spi::Transaction::write(vec![0xff, 0xee])];
let mut device = spi::Mock::new(&expect);
device.write(&[0xff, 0xee]).unwrap();
device.done()
}
#[test]
fn spi_manager_manual() {
let expect = vec![spi::Transaction::write(vec![0xab, 0xcd, 0xef])];
let mut device = spi::Mock::new(&expect);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy = manager.acquire_spi();
proxy.write(&[0xab, 0xcd, 0xef]).unwrap();
device.done();
}
#[test]
fn spi_proxy() {
let expect = vec![
spi::Transaction::write(vec![0xab, 0xcd, 0xef]),
spi::Transaction::transfer(vec![0x01, 0x02], vec![0x03, 0x04]),
];
let mut device = spi::Mock::new(&expect);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy = manager.acquire_spi();
proxy.write(&[0xab, 0xcd, 0xef]).unwrap();
let mut buf = vec![0x01, 0x02];
proxy.transfer(&mut buf).unwrap();
assert_eq!(&buf, &[0x03, 0x04]);
device.done();
}
#[test]
fn spi_multi() {
let expect = vec![
spi::Transaction::write(vec![0xab, 0xcd, 0xef]),
spi::Transaction::transfer(vec![0x01, 0x02], vec![0x03, 0x04]),
];
let mut device = spi::Mock::new(&expect);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy1 = manager.acquire_spi();
let mut proxy2 = manager.acquire_spi();
proxy1.write(&[0xab, 0xcd, 0xef]).unwrap();
let mut buf = vec![0x01, 0x02];
proxy2.transfer(&mut buf).unwrap();
assert_eq!(&buf, &[0x03, 0x04]);
device.done();
}
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
Rahix/shared-bus | https://github.com/Rahix/shared-bus/blob/7ee581be3ea32fcc8eff7af97e15d993683c549b/tests/adc.rs | tests/adc.rs | use embedded_hal::prelude::*;
use embedded_hal_mock::adc;
use std::thread;
#[test]
fn adc_mock_device() {
let expectations = [
adc::Transaction::read(0, 0xabcd),
adc::Transaction::read(1, 0xabba),
adc::Transaction::read(2, 0xbaab),
];
let mut device = adc::Mock::new(&expectations);
assert_eq!(0xabcd, device.read(&mut adc::MockChan0).unwrap());
assert_eq!(0xabba, device.read(&mut adc::MockChan1).unwrap());
assert_eq!(0xbaab, device.read(&mut adc::MockChan2).unwrap());
device.done()
}
#[test]
fn adc_manager_simple() {
let expectations = [
adc::Transaction::read(0, 0xabcd),
adc::Transaction::read(1, 0xabba),
adc::Transaction::read(2, 0xbaab),
];
let mut device = adc::Mock::new(&expectations);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy = manager.acquire_adc();
assert_eq!(0xabcd, proxy.read(&mut adc::MockChan0).unwrap());
assert_eq!(0xabba, proxy.read(&mut adc::MockChan1).unwrap());
assert_eq!(0xbaab, proxy.read(&mut adc::MockChan2).unwrap());
device.done()
}
#[test]
fn adc_manager_std() {
let expectations = [
adc::Transaction::read(0, 0xabcd),
adc::Transaction::read(1, 0xabba),
adc::Transaction::read(2, 0xbaab),
];
let mut device = adc::Mock::new(&expectations);
let manager: &'static shared_bus::BusManagerStd<_> =
shared_bus::new_std!(adc::Mock<u16> = device.clone()).unwrap();
let mut proxy = manager.acquire_adc();
assert_eq!(0xabcd, proxy.read(&mut adc::MockChan0).unwrap());
assert_eq!(0xabba, proxy.read(&mut adc::MockChan1).unwrap());
assert_eq!(0xbaab, proxy.read(&mut adc::MockChan2).unwrap());
device.done()
}
#[test]
fn adc_proxy_multi() {
let expectations = [
adc::Transaction::read(0, 0xabcd),
adc::Transaction::read(1, 0xabba),
adc::Transaction::read(2, 0xbaab),
];
let mut device = adc::Mock::new(&expectations);
let manager = shared_bus::BusManagerSimple::new(device.clone());
let mut proxy1 = manager.acquire_adc();
let mut proxy2 = manager.acquire_adc();
let mut proxy3 = manager.acquire_adc();
assert_eq!(0xabcd, proxy1.read(&mut adc::MockChan0).unwrap());
assert_eq!(0xabba, proxy2.read(&mut adc::MockChan1).unwrap());
assert_eq!(0xbaab, proxy3.read(&mut adc::MockChan2).unwrap());
device.done()
}
#[test]
fn adc_proxy_concurrent() {
let expectations = [
adc::Transaction::read(0, 0xabcd),
adc::Transaction::read(1, 0xabba),
adc::Transaction::read(2, 0xbaab),
];
let mut device = adc::Mock::new(&expectations);
let manager: &'static shared_bus::BusManagerStd<_> =
shared_bus::new_std!(adc::Mock<u32> = device.clone()).unwrap();
let mut proxy1 = manager.acquire_adc();
let mut proxy2 = manager.acquire_adc();
let mut proxy3 = manager.acquire_adc();
thread::spawn(move || {
assert_eq!(0xabcd, proxy1.read(&mut adc::MockChan0).unwrap());
})
.join()
.unwrap();
thread::spawn(move || {
assert_eq!(0xabba, proxy2.read(&mut adc::MockChan1).unwrap());
})
.join()
.unwrap();
assert_eq!(0xbaab, proxy3.read(&mut adc::MockChan2).unwrap());
device.done()
}
| rust | Apache-2.0 | 7ee581be3ea32fcc8eff7af97e15d993683c549b | 2026-01-04T20:20:06.411723Z | false |
sean3z/rocket-diesel-rest-api-example | https://github.com/sean3z/rocket-diesel-rest-api-example/blob/9cc36e44a4829a1f9324e2b134c0ec621148f8de/src/db.rs | src/db.rs | use std::ops::Deref;
use rocket::http::Status;
use rocket::request::{self, FromRequest};
use rocket::{Request, State, Outcome};
use diesel::mysql::MysqlConnection;
use diesel::r2d2::{ConnectionManager, Pool, PooledConnection};
// An alias to the type for a pool of Diesel Mysql Connection
pub type MysqlPool = Pool<ConnectionManager<MysqlConnection>>;
// The URL to the database, set via the `DATABASE_URL` environment variable.
static DATABASE_URL: &str = env!("DATABASE_URL");
/// Initialize the database pool.
pub fn connect() -> MysqlPool {
let manager = ConnectionManager::<MysqlConnection>::new(DATABASE_URL);
Pool::new(manager).expect("Failed to create pool")
}
// Connection request guard type: a wrapper around an r2d2 pooled connection.
pub struct Connection(pub PooledConnection<ConnectionManager<MysqlConnection>>);
/// Attempts to retrieve a single connection from the managed database pool. If
/// no pool is currently managed, fails with an `InternalServerError` status. If
/// no connections are available, fails with a `ServiceUnavailable` status.
impl<'a, 'r> FromRequest<'a, 'r> for Connection {
type Error = ();
fn from_request(request: &'a Request<'r>) -> request::Outcome<Self, Self::Error> {
let pool = request.guard::<State<MysqlPool>>()?;
match pool.get() {
Ok(conn) => Outcome::Success(Connection(conn)),
Err(_) => Outcome::Failure((Status::ServiceUnavailable, ()))
}
}
}
// For the convenience of using an &Connection as an &MysqlConnection.
impl Deref for Connection {
type Target = MysqlConnection;
fn deref(&self) -> &Self::Target {
&self.0
}
}
| rust | MIT | 9cc36e44a4829a1f9324e2b134c0ec621148f8de | 2026-01-04T20:20:05.712825Z | false |
sean3z/rocket-diesel-rest-api-example | https://github.com/sean3z/rocket-diesel-rest-api-example/blob/9cc36e44a4829a1f9324e2b134c0ec621148f8de/src/hero.rs | src/hero.rs | use diesel;
use diesel::prelude::*;
use diesel::mysql::MysqlConnection;
use schema::heroes;
#[table_name = "heroes"]
#[derive(AsChangeset, Serialize, Deserialize, Queryable, Insertable)]
pub struct Hero {
pub id: Option<i32>,
pub name: String,
pub identity: String,
pub hometown: String,
pub age: i32
}
impl Hero {
pub fn create(hero: Hero, connection: &MysqlConnection) -> Hero {
diesel::insert_into(heroes::table)
.values(&hero)
.execute(connection)
.expect("Error creating new hero");
heroes::table.order(heroes::id.desc()).first(connection).unwrap()
}
pub fn read(connection: &MysqlConnection) -> Vec<Hero> {
heroes::table.order(heroes::id).load::<Hero>(connection).unwrap()
}
pub fn update(id: i32, hero: Hero, connection: &MysqlConnection) -> bool {
diesel::update(heroes::table.find(id)).set(&hero).execute(connection).is_ok()
}
pub fn delete(id: i32, connection: &MysqlConnection) -> bool {
diesel::delete(heroes::table.find(id)).execute(connection).is_ok()
}
}
| rust | MIT | 9cc36e44a4829a1f9324e2b134c0ec621148f8de | 2026-01-04T20:20:05.712825Z | false |
sean3z/rocket-diesel-rest-api-example | https://github.com/sean3z/rocket-diesel-rest-api-example/blob/9cc36e44a4829a1f9324e2b134c0ec621148f8de/src/schema.rs | src/schema.rs | table! {
heroes {
id -> Nullable<Integer>,
name -> Varchar,
identity -> Varchar,
hometown -> Varchar,
age -> Integer,
}
} | rust | MIT | 9cc36e44a4829a1f9324e2b134c0ec621148f8de | 2026-01-04T20:20:05.712825Z | false |
sean3z/rocket-diesel-rest-api-example | https://github.com/sean3z/rocket-diesel-rest-api-example/blob/9cc36e44a4829a1f9324e2b134c0ec621148f8de/src/main.rs | src/main.rs | #![feature(plugin)]
#![plugin(rocket_codegen)]
extern crate rocket;
#[macro_use] extern crate rocket_contrib;
#[macro_use] extern crate serde_derive;
#[macro_use] extern crate diesel;
use rocket_contrib::{Json, Value};
mod db;
mod schema;
mod hero;
use hero::Hero;
#[post("/", data = "<hero>")]
fn create(hero: Json<Hero>, connection: db::Connection) -> Json<Hero> {
let insert = Hero { id: None, ..hero.into_inner() };
Json(Hero::create(insert, &connection))
}
#[get("/")]
fn read(connection: db::Connection) -> Json<Value> {
Json(json!(Hero::read(&connection)))
}
#[put("/<id>", data = "<hero>")]
fn update(id: i32, hero: Json<Hero>, connection: db::Connection) -> Json<Value> {
let update = Hero { id: Some(id), ..hero.into_inner() };
Json(json!({
"success": Hero::update(id, update, &connection)
}))
}
#[delete("/<id>")]
fn delete(id: i32, connection: db::Connection) -> Json<Value> {
Json(json!({
"success": Hero::delete(id, &connection)
}))
}
fn main() {
rocket::ignite()
.manage(db::connect())
.mount("/hero", routes![create, update, delete])
.mount("/heroes", routes![read])
.launch();
}
| rust | MIT | 9cc36e44a4829a1f9324e2b134c0ec621148f8de | 2026-01-04T20:20:05.712825Z | false |
tauri-apps/tauri-plugin-window-state | https://github.com/tauri-apps/tauri-plugin-window-state/blob/dd4c37f73ffd4b497da32c5c58333b5e896d35bc/src/lib.rs | src/lib.rs | // Copyright 2021 Tauri Programme within The Commons Conservancy
// SPDX-License-Identifier: Apache-2.0
// SPDX-License-Identifier: MIT
use bitflags::bitflags;
use serde::{Deserialize, Serialize};
use tauri::{
plugin::{Builder as PluginBuilder, TauriPlugin},
LogicalSize, Manager, Monitor, PhysicalPosition, PhysicalSize, RunEvent, Runtime, Window,
WindowEvent,
};
use std::{
collections::{HashMap, HashSet},
fs::{create_dir_all, File},
io::Write,
sync::{Arc, Mutex},
};
mod cmd;
pub const STATE_FILENAME: &str = ".window-state";
#[derive(Debug, thiserror::Error)]
pub enum Error {
#[error(transparent)]
Io(#[from] std::io::Error),
#[error(transparent)]
Tauri(#[from] tauri::Error),
#[error(transparent)]
TauriApi(#[from] tauri::api::Error),
#[error(transparent)]
Bincode(#[from] Box<bincode::ErrorKind>),
}
pub type Result<T> = std::result::Result<T, Error>;
bitflags! {
#[derive(Clone, Copy, Debug)]
pub struct StateFlags: u32 {
const SIZE = 1 << 0;
const POSITION = 1 << 1;
const MAXIMIZED = 1 << 2;
const VISIBLE = 1 << 3;
const DECORATIONS = 1 << 4;
const FULLSCREEN = 1 << 5;
}
}
impl Default for StateFlags {
fn default() -> Self {
Self::all()
}
}
#[derive(Debug, Deserialize, Serialize, PartialEq)]
struct WindowState {
width: f64,
height: f64,
x: i32,
y: i32,
// prev_x and prev_y are used to store position
// before maximization happened, because maximization
// will set x and y to the top-left corner of the monitor
prev_x: i32,
prev_y: i32,
maximized: bool,
visible: bool,
decorated: bool,
fullscreen: bool,
}
impl Default for WindowState {
fn default() -> Self {
Self {
width: Default::default(),
height: Default::default(),
x: Default::default(),
y: Default::default(),
prev_x: Default::default(),
prev_y: Default::default(),
maximized: Default::default(),
visible: true,
decorated: true,
fullscreen: Default::default(),
}
}
}
struct WindowStateCache(Arc<Mutex<HashMap<String, WindowState>>>);
pub trait AppHandleExt {
/// Saves all open windows state to disk
fn save_window_state(&self, flags: StateFlags) -> Result<()>;
}
impl<R: Runtime> AppHandleExt for tauri::AppHandle<R> {
fn save_window_state(&self, flags: StateFlags) -> Result<()> {
if let Some(app_dir) = self.path_resolver().app_config_dir() {
let state_path = app_dir.join(STATE_FILENAME);
let cache = self.state::<WindowStateCache>();
let mut state = cache.0.lock().unwrap();
for (label, s) in state.iter_mut() {
if let Some(window) = self.get_window(label) {
window.update_state(s, flags)?;
}
}
create_dir_all(&app_dir)
.map_err(Error::Io)
.and_then(|_| File::create(state_path).map_err(Into::into))
.and_then(|mut f| {
f.write_all(&bincode::serialize(&*state).map_err(Error::Bincode)?)
.map_err(Into::into)
})
} else {
Ok(())
}
}
}
pub trait WindowExt {
/// Restores this window state from disk
fn restore_state(&self, flags: StateFlags) -> tauri::Result<()>;
}
impl<R: Runtime> WindowExt for Window<R> {
fn restore_state(&self, flags: StateFlags) -> tauri::Result<()> {
let cache = self.state::<WindowStateCache>();
let mut c = cache.0.lock().unwrap();
let mut should_show = true;
if let Some(state) = c.get(self.label()) {
// avoid restoring the default zeroed state
if *state == WindowState::default() {
return Ok(());
}
if flags.contains(StateFlags::DECORATIONS) {
self.set_decorations(state.decorated)?;
}
if flags.contains(StateFlags::SIZE) {
self.set_size(LogicalSize {
width: state.width,
height: state.height,
})?;
}
if flags.contains(StateFlags::POSITION) {
let position = (state.x, state.y).into();
let size = (state.width, state.height).into();
// restore position to saved value if saved monitor exists
// otherwise, let the OS decide where to place the window
for m in self.available_monitors()? {
if m.intersects(position, size) {
self.set_position(PhysicalPosition {
x: if state.maximized {
state.prev_x
} else {
state.x
},
y: if state.maximized {
state.prev_y
} else {
state.y
},
})?;
}
}
}
if flags.contains(StateFlags::MAXIMIZED) && state.maximized {
self.maximize()?;
}
if flags.contains(StateFlags::FULLSCREEN) {
self.set_fullscreen(state.fullscreen)?;
}
should_show = state.visible;
} else {
let mut metadata = WindowState::default();
if flags.contains(StateFlags::SIZE) {
let scale_factor = self
.current_monitor()?
.map(|m| m.scale_factor())
.unwrap_or(1.);
let size = self.inner_size()?.to_logical(scale_factor);
metadata.width = size.width;
metadata.height = size.height;
}
if flags.contains(StateFlags::POSITION) {
let pos = self.outer_position()?;
metadata.x = pos.x;
metadata.y = pos.y;
}
if flags.contains(StateFlags::MAXIMIZED) {
metadata.maximized = self.is_maximized()?;
}
if flags.contains(StateFlags::VISIBLE) {
metadata.visible = self.is_visible()?;
}
if flags.contains(StateFlags::DECORATIONS) {
metadata.decorated = self.is_decorated()?;
}
if flags.contains(StateFlags::FULLSCREEN) {
metadata.fullscreen = self.is_fullscreen()?;
}
c.insert(self.label().into(), metadata);
}
if flags.contains(StateFlags::VISIBLE) && should_show {
self.show()?;
self.set_focus()?;
}
Ok(())
}
}
trait WindowExtInternal {
fn update_state(&self, state: &mut WindowState, flags: StateFlags) -> tauri::Result<()>;
}
impl<R: Runtime> WindowExtInternal for Window<R> {
fn update_state(&self, state: &mut WindowState, flags: StateFlags) -> tauri::Result<()> {
let is_maximized = match flags.intersects(StateFlags::MAXIMIZED | StateFlags::SIZE) {
true => self.is_maximized()?,
false => false,
};
if flags.contains(StateFlags::MAXIMIZED) {
state.maximized = is_maximized;
}
if flags.contains(StateFlags::FULLSCREEN) {
state.fullscreen = self.is_fullscreen()?;
}
if flags.contains(StateFlags::DECORATIONS) {
state.decorated = self.is_decorated()?;
}
if flags.contains(StateFlags::VISIBLE) {
state.visible = self.is_visible()?;
}
if flags.contains(StateFlags::SIZE) {
let scale_factor = self
.current_monitor()?
.map(|m| m.scale_factor())
.unwrap_or(1.);
let size = self.inner_size()?.to_logical(scale_factor);
// It doesn't make sense to save a window with 0 height or width
if size.width > 0. && size.height > 0. && !is_maximized {
state.width = size.width;
state.height = size.height;
}
}
if flags.contains(StateFlags::POSITION) && !is_maximized {
let position = self.outer_position()?;
state.x = position.x;
state.y = position.y;
}
Ok(())
}
}
#[derive(Default)]
pub struct Builder {
denylist: HashSet<String>,
skip_initial_state: HashSet<String>,
state_flags: StateFlags,
}
impl Builder {
pub fn new() -> Self {
Self::default()
}
/// Sets the state flags to control what state gets restored and saved.
pub fn with_state_flags(mut self, flags: StateFlags) -> Self {
self.state_flags = flags;
self
}
/// Sets a list of windows that shouldn't be tracked and managed by this plugin
/// for example splash screen windows.
pub fn with_denylist(mut self, denylist: &[&str]) -> Self {
self.denylist = denylist.iter().map(|l| l.to_string()).collect();
self
}
/// Adds the given window label to a list of windows to skip initial state restore.
pub fn skip_initial_state(mut self, label: &str) -> Self {
self.skip_initial_state.insert(label.into());
self
}
pub fn build<R: Runtime>(self) -> TauriPlugin<R> {
let flags = self.state_flags;
PluginBuilder::new("window-state")
.invoke_handler(tauri::generate_handler![
cmd::save_window_state,
cmd::restore_state
])
.setup(|app| {
let cache: Arc<Mutex<HashMap<String, WindowState>>> = if let Some(app_dir) =
app.path_resolver().app_config_dir()
{
let state_path = app_dir.join(STATE_FILENAME);
if state_path.exists() {
Arc::new(Mutex::new(
tauri::api::file::read_binary(state_path)
.map_err(Error::TauriApi)
.and_then(|state| bincode::deserialize(&state).map_err(Into::into))
.unwrap_or_default(),
))
} else {
Default::default()
}
} else {
Default::default()
};
app.manage(WindowStateCache(cache));
Ok(())
})
.on_webview_ready(move |window| {
if self.denylist.contains(window.label()) {
return;
}
if !self.skip_initial_state.contains(window.label()) {
let _ = window.restore_state(self.state_flags);
}
let cache = window.state::<WindowStateCache>();
let cache = cache.0.clone();
let label = window.label().to_string();
let window_clone = window.clone();
let flags = self.state_flags;
// insert a default state if this window should be tracked and
// the disk cache doesn't have a state for it
{
cache
.lock()
.unwrap()
.entry(label.clone())
.or_insert_with(WindowState::default);
}
window.on_window_event(move |e| match e {
WindowEvent::CloseRequested { .. } => {
let mut c = cache.lock().unwrap();
if let Some(state) = c.get_mut(&label) {
let _ = window_clone.update_state(state, flags);
}
}
WindowEvent::Moved(position) if flags.contains(StateFlags::POSITION) => {
let mut c = cache.lock().unwrap();
if let Some(state) = c.get_mut(&label) {
state.prev_x = state.x;
state.prev_y = state.y;
state.x = position.x;
state.y = position.y;
}
}
_ => {}
});
})
.on_event(move |app, event| {
if let RunEvent::Exit = event {
let _ = app.save_window_state(flags);
}
})
.build()
}
}
trait MonitorExt {
fn intersects(&self, position: PhysicalPosition<i32>, size: LogicalSize<u32>) -> bool;
}
impl MonitorExt for Monitor {
fn intersects(&self, position: PhysicalPosition<i32>, size: LogicalSize<u32>) -> bool {
let size = size.to_physical::<u32>(self.scale_factor());
let PhysicalPosition { x, y } = *self.position();
let PhysicalSize { width, height } = *self.size();
let left = x;
let right = x + width as i32;
let top = y;
let bottom = y + height as i32;
[
(position.x, position.y),
(position.x + size.width as i32, position.y),
(position.x, position.y + size.height as i32),
(
position.x + size.width as i32,
position.y + size.height as i32,
),
]
.into_iter()
.any(|(x, y)| x >= left && x < right && y >= top && y < bottom)
}
}
| rust | Apache-2.0 | dd4c37f73ffd4b497da32c5c58333b5e896d35bc | 2026-01-04T20:20:08.111445Z | false |
tauri-apps/tauri-plugin-window-state | https://github.com/tauri-apps/tauri-plugin-window-state/blob/dd4c37f73ffd4b497da32c5c58333b5e896d35bc/src/cmd.rs | src/cmd.rs | use crate::{AppHandleExt, StateFlags, WindowExt};
use tauri::{command, AppHandle, Manager, Runtime};
#[command]
pub async fn save_window_state<R: Runtime>(
app: AppHandle<R>,
flags: u32,
) -> std::result::Result<(), String> {
let flags = StateFlags::from_bits(flags)
.ok_or_else(|| format!("Invalid state flags bits: {}", flags))?;
app.save_window_state(flags).map_err(|e| e.to_string())?;
Ok(())
}
#[command]
pub async fn restore_state<R: Runtime>(
app: AppHandle<R>,
label: String,
flags: u32,
) -> std::result::Result<(), String> {
let flags = StateFlags::from_bits(flags)
.ok_or_else(|| format!("Invalid state flags bits: {}", flags))?;
app.get_window(&label)
.ok_or_else(|| format!("Couldn't find window with label: {}", label))?
.restore_state(flags)
.map_err(|e| e.to_string())?;
Ok(())
}
| rust | Apache-2.0 | dd4c37f73ffd4b497da32c5c58333b5e896d35bc | 2026-01-04T20:20:08.111445Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/lib.rs | crates/wgparry/src/lib.rs | #![doc = include_str!("../README.md")]
#![warn(missing_docs)]
#![allow(clippy::result_large_err)]
#![allow(clippy::too_many_arguments)]
pub extern crate nalgebra as na;
#[cfg(feature = "dim2")]
pub extern crate parry2d as parry;
#[cfg(feature = "dim3")]
pub extern crate parry3d as parry;
use naga_oil::compose::ShaderDefValue;
use std::collections::HashMap;
/// Bounding volume data structures and GPU shaders for collision detection acceleration.
pub mod bounding_volumes;
/// Broad-phase collision detection algorithms implemented on the GPU.
///
/// Includes brute-force and LBVH (Linear Bounding Volume Hierarchy) implementations.
pub mod broad_phase;
/// Geometric query operations like ray-casting, point projection, and contact generation.
pub mod queries;
/// Geometric shape definitions and their GPU shader implementations.
pub mod shapes;
/// Utility functions and data structures, including GPU radix sort.
pub mod utils;
/// Returns shader definitions that depend on whether we are building the 2D or 3D version of this crate.
///
/// This function generates WGSL shader preprocessor definitions that allow shaders to adapt
/// based on dimensionality (2D vs 3D) and target platform (native vs WASM).
///
/// # Returns
///
/// A [`HashMap`] containing:
/// - `"DIM"`: Set to `2` for 2D builds (when `feature = "dim2"`) or `3` for 3D builds (when `feature = "dim3"`)
/// - `"NATIVE"`: Set to `1` for native targets or `0` for WASM targets
///
/// # Example
///
/// ```rust,ignore
/// let shader_defs = dim_shader_defs();
/// // On a 3D native build: {"DIM": 3, "NATIVE": 1}
/// // On a 2D WASM build: {"DIM": 2, "NATIVE": 0}
/// ```
pub fn dim_shader_defs() -> HashMap<String, ShaderDefValue> {
let mut defs = HashMap::new();
if cfg!(feature = "dim2") {
defs.insert("DIM".to_string(), ShaderDefValue::UInt(2));
} else {
defs.insert("DIM".to_string(), ShaderDefValue::UInt(3));
}
if cfg!(target_arch = "wasm32") {
defs.insert("NATIVE".to_string(), ShaderDefValue::UInt(0));
} else {
defs.insert("NATIVE".to_string(), ShaderDefValue::UInt(1));
}
defs
}
/// Substitutes type aliases in WGSL shader source code with their concrete dimension-specific types.
///
/// Since naga-oil doesn't support type aliases very well, this function performs textual
/// substitution to replace generic type names with their 2D or 3D equivalents before shader
/// compilation. This enables writing dimension-agnostic shader code that works for both
/// 2D and 3D builds.
///
/// # Parameters
///
/// - `src`: The WGSL shader source code containing generic type aliases
///
/// # Returns
///
/// The modified shader source with all aliases replaced by concrete types
///
/// # Substituted Aliases
///
/// For 2D builds (feature = "dim2"):
/// - `Transform` → `Pose::Sim2` (2D similarity transformation)
/// - `AngVector` → `f32` (scalar angle)
/// - `Vector` → `vec2<f32>` (2D vector)
///
/// For 3D builds (feature = "dim3"):
/// - `Transform` → `Pose::Sim3` (3D similarity transformation)
/// - `AngVector` → `vec3<f32>` (3D angular vector)
/// - `Vector` → `vec3<f32>` (3D vector)
///
/// # Example
///
/// ```rust,ignore
/// let shader = "fn distance(a: Vector, b: Vector) -> f32 { ... }";
/// let result = substitute_aliases(shader);
/// // In 3D: "fn distance(a: vec3<f32>, b: vec3<f32>) -> f32 { ... }"
/// ```
pub fn substitute_aliases(src: &str) -> String {
#[cfg(feature = "dim2")]
return src
.replace("Transform", "Pose::Sim2")
.replace("AngVector(", "f32(")
.replace("AngVector", "f32")
.replace("Vector(", "vec2<f32>(")
.replace("Vector", "vec2<f32>");
#[cfg(feature = "dim3")]
return src
.replace("Transform", "Pose::Sim3")
.replace("AngVector(", "vec3(")
.replace("AngVector", "vec3<f32>")
.replace("Vector(", "vec3<f32>(")
.replace("Vector", "vec3<f32>");
}
// NOTE: the modules below were copied from parry. Should we just add a dependency to parry?
/// Compilation flags dependent aliases for mathematical types.
#[cfg(feature = "dim3")]
pub mod math {
use na::{
Isometry3, Matrix3, Point3, Translation3, UnitQuaternion, UnitVector3, Vector3, Vector6,
U3, U6,
};
use wgebra::GpuSim3;
/// The default tolerance used for geometric operations.
pub const DEFAULT_EPSILON: f32 = f32::EPSILON;
/// The dimension of the space.
pub const DIM: usize = 3;
/// The dimension of the space multiplied by two.
pub const TWO_DIM: usize = DIM * 2;
/// The dimension of the ambient space.
pub type Dim = U3;
/// The dimension of a spatial vector.
pub type SpatialDim = U6;
/// The dimension of the rotations.
pub type AngDim = U3;
/// The point type.
pub type Point<N> = Point3<N>;
/// The angular vector type.
pub type AngVector<N> = Vector3<N>;
/// The vector type.
pub type Vector<N> = Vector3<N>;
/// The unit vector type.
pub type UnitVector<N> = UnitVector3<N>;
/// The matrix type.
pub type Matrix<N> = Matrix3<N>;
/// The vector type with dimension `SpatialDim × 1`.
pub type SpatialVector<N> = Vector6<N>;
/// The orientation type.
pub type Orientation<N> = Vector3<N>;
/// The transformation matrix type.
pub type Isometry<N> = Isometry3<N>;
/// The rotation matrix type.
pub type Rotation<N> = UnitQuaternion<N>;
/// The translation type.
pub type Translation<N> = Translation3<N>;
/// The angular inertia of a rigid body.
pub type AngularInertia<N> = Matrix3<N>;
/// The principal angular inertia of a rigid body.
pub type PrincipalAngularInertia<N> = Vector3<N>;
/// A matrix that represent the cross product with a given vector.
pub type CrossMatrix<N> = Matrix3<N>;
/// A vector with a dimension equal to the maximum number of degrees of freedom of a rigid body.
pub type SpacialVector<N> = Vector6<N>;
// /// A 3D symmetric-definite-positive matrix.
// pub type SdpMatrix<N> = crate::utils::SdpMatrix3<N>;
/// A 3D similarity with layout compatible with the corresponding wgsl struct.
pub type GpuSim = GpuSim3;
}
/// Compilation flags dependent aliases for mathematical types.
#[cfg(feature = "dim2")]
pub mod math {
use na::{
Isometry2, Matrix2, Point2, Translation2, UnitComplex, UnitVector2, Vector1, Vector2,
Vector3, U1, U2,
};
use wgebra::GpuSim2;
/// The default tolerance used for geometric operations.
pub const DEFAULT_EPSILON: f32 = f32::EPSILON;
/// The dimension of the space.
pub const DIM: usize = 2;
/// The dimension of the space multiplied by two.
pub const TWO_DIM: usize = DIM * 2;
/// The dimension of the ambient space.
pub type Dim = U2;
/// The dimension of the rotations.
pub type AngDim = U1;
/// The point type.
pub type Point<N> = Point2<N>;
/// The angular vector type.
pub type AngVector<N> = N;
/// The vector type.
pub type Vector<N> = Vector2<N>;
/// The unit vector type.
pub type UnitVector<N> = UnitVector2<N>;
/// The matrix type.
pub type Matrix<N> = Matrix2<N>;
/// The orientation type.
pub type Orientation<N> = Vector1<N>;
/// The transformation matrix type.
pub type Isometry<N> = Isometry2<N>;
/// The rotation matrix type.
pub type Rotation<N> = UnitComplex<N>;
/// The translation type.
pub type Translation<N> = Translation2<N>;
/// The angular inertia of a rigid body.
pub type AngularInertia<N> = N;
/// The principal angular inertia of a rigid body.
pub type PrincipalAngularInertia<N> = N;
/// A matrix that represent the cross product with a given vector.
pub type CrossMatrix<N> = Vector2<N>;
/// A vector with a dimension equal to the maximum number of degrees of freedom of a rigid body.
pub type SpacialVector<N> = Vector3<N>;
// /// A 2D symmetric-definite-positive matrix.
// pub type SdpMatrix<N> = crate::utils::SdpMatrix2<N>;
/// A 2D similarity with layout compatible with the corresponding wgsl struct.
pub type GpuSim = GpuSim2;
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/broad_phase/brute_force_broad_phase.rs | crates/wgparry/src/broad_phase/brute_force_broad_phase.rs | //! Brute-force broad-phase collision detection.
//!
//! Tests all pairs of colliders for AABB overlap (O(n²) complexity). While not scalable
//! for large scenes, the algorithm is highly parallelizable on GPU and can outperform more
//! sophisticated algorithms for small to medium-sized simulations (< 1000 objects).
//!
//! The GPU implementation processes pairs in parallel, making effective use of GPU compute
//! resources even though the algorithmic complexity is quadratic.
use crate::bounding_volumes::WgAabb;
use crate::math::GpuSim;
use crate::shapes::{GpuShape, WgShape};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::indirect::{DispatchIndirectArgs, WgIndirect};
#[cfg(feature = "dim2")]
use nalgebra::Vector2;
#[cfg(feature = "dim3")]
use nalgebra::Vector4;
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::{test_shader_compilation, Shader};
use wgebra::{WgSim2, WgSim3};
use wgpu::{ComputePass, ComputePipeline, Device};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgShape, WgAabb, WgIndirect),
src = "./brute_force_broad_phase.wgsl",
shader_defs = "dim_shader_defs",
src_fn = "substitute_aliases",
composable = false
)]
/// GPU shader for brute-force broad-phase collision detection.
///
/// This shader tests all pairs of colliders for AABB overlap in parallel. While O(n²),
/// it can be convenient for testing and debugging more sophisticated algorithms.
pub struct WgBruteForceBroadPhase {
main: ComputePipeline,
reset: ComputePipeline,
init_indirect_args: ComputePipeline,
debug_compute_aabb: ComputePipeline, // TODO: remove this. For debugging only.
}
impl WgBruteForceBroadPhase {
const WORKGROUP_SIZE: u32 = 64;
/// Dispatches the brute-force broad-phase collision detection.
pub fn dispatch(
&self,
device: &Device,
pass: &mut ComputePass,
num_colliders: u32,
poses: &GpuVector<GpuSim>,
shapes: &GpuVector<GpuShape>,
num_shapes: &GpuScalar<u32>,
collision_pairs: &GpuVector<[u32; 2]>,
collision_pairs_len: &GpuScalar<u32>,
collision_pairs_indirect: &GpuScalar<DispatchIndirectArgs>,
#[cfg(feature = "dim2")] debug_aabb_mins: &GpuVector<Vector2<f32>>,
#[cfg(feature = "dim2")] debug_aabb_maxs: &GpuVector<Vector2<f32>>,
#[cfg(feature = "dim3")] debug_aabb_mins: &GpuVector<Vector4<f32>>,
#[cfg(feature = "dim3")] debug_aabb_maxs: &GpuVector<Vector4<f32>>,
) {
KernelDispatch::new(device, pass, &self.reset)
.bind_at(0, [(collision_pairs_len.buffer(), 4)])
.dispatch(1);
KernelDispatch::new(device, pass, &self.main)
.bind0([
num_shapes.buffer(),
poses.buffer(),
shapes.buffer(),
collision_pairs.buffer(),
collision_pairs_len.buffer(),
])
.dispatch(num_colliders.div_ceil(Self::WORKGROUP_SIZE));
KernelDispatch::new(device, pass, &self.init_indirect_args)
.bind_at(
0,
[
(collision_pairs_len.buffer(), 4),
(collision_pairs_indirect.buffer(), 5),
],
)
.dispatch(1);
KernelDispatch::new(device, pass, &self.debug_compute_aabb)
.bind_at(0, [(poses.buffer(), 1), (shapes.buffer(), 2)])
.bind(1, [debug_aabb_mins.buffer(), debug_aabb_maxs.buffer()])
.dispatch(num_colliders.div_ceil(Self::WORKGROUP_SIZE));
}
}
test_shader_compilation!(WgBruteForceBroadPhase, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/broad_phase/lbvh.rs | crates/wgparry/src/broad_phase/lbvh.rs | use crate::bounding_volumes::WgAabb;
use crate::math::{GpuSim, Point};
use crate::shapes::{GpuShape, WgShape};
use crate::utils::{RadixSort, RadixSortWorkspace};
use crate::{dim_shader_defs, substitute_aliases};
use naga_oil::compose::ComposerError;
use parry::bounding_volume::Aabb;
use wgcore::indirect::{DispatchIndirectArgs, WgIndirect};
#[cfg(feature = "dim2")]
use nalgebra::Vector2;
#[cfg(feature = "dim3")]
use nalgebra::Vector4;
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::Shader;
use wgebra::WgSim3;
use wgpu::{BufferUsages, ComputePass, ComputePipeline, Device};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgShape, WgAabb, WgIndirect),
src = "./lbvh.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs",
composable = false
)]
/// GPU shader for Linear Bounding Volume Hierarchy (LBVH) construction and traversal.
///
/// Implements the Karras 2012 parallel LBVH construction algorithm on the GPU, providing
/// O(n log n) collision detection suitable for large dynamic scenes.
pub struct WgLbvh {
reset_collision_pairs: ComputePipeline,
compute_domain: ComputePipeline,
compute_morton: ComputePipeline,
build: ComputePipeline,
refit_leaves: ComputePipeline,
refit_internal: ComputePipeline,
#[allow(dead_code)]
refit: ComputePipeline,
find_collision_pairs: ComputePipeline,
init_indirect_args: ComputePipeline,
}
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable, Default)]
#[repr(C)]
/// A node in the LBVH binary tree structure.
///
/// Each node represents either:
/// - **Leaf node**: Contains a single collider (when `left == right`)
/// - **Internal node**: Contains references to two child nodes
///
/// The tree is built using Morton codes for spatial sorting, enabling cache-friendly
/// traversal and efficient parallel construction.
pub struct LbvhNode {
#[cfg(feature = "dim3")]
aabb_mins: Vector4<f32>,
#[cfg(feature = "dim3")]
aabb_maxs: Vector4<f32>,
#[cfg(feature = "dim2")]
aabb_mins: Vector2<f32>,
#[cfg(feature = "dim2")]
aabb_maxs: Vector2<f32>,
left: u32,
right: u32,
parent: u32,
refit_count: u32,
}
impl LbvhNode {
/// Extracts the AABB (axis-aligned bounding box) from this node.
///
/// Returns a [`parry::bounding_volume::Aabb`] constructed from the node's min/max bounds.
#[cfg(feature = "dim3")]
pub fn aabb(&self) -> Aabb {
Aabb::new(self.aabb_mins.xyz().into(), self.aabb_maxs.xyz().into())
}
/// Extracts the AABB (axis-aligned bounding box) from this node.
///
/// Returns a [`parry::bounding_volume::Aabb`] constructed from the node's min/max bounds.
#[cfg(feature = "dim2")]
pub fn aabb(&self) -> Aabb {
Aabb::new(self.aabb_mins.into(), self.aabb_maxs.into())
}
}
/// GPU-resident state for LBVH construction and queries.
///
/// Maintains all GPU buffers needed for building and querying the LBVH:
/// - Morton codes and their sorted versions
/// - Collider indices (sorted by Morton code)
/// - The BVH tree structure itself
/// - Radix sort workspace for Morton code sorting
///
/// Buffers automatically resize when the number of colliders changes.
pub struct LbvhState {
buffer_usages: BufferUsages, // Just for debugging if we want COPY_SRC
#[cfg(feature = "dim3")]
domain_aabb: GpuScalar<[Vector4<f32>; 2]>,
#[cfg(feature = "dim2")]
domain_aabb: GpuScalar<[Vector2<f32>; 2]>,
n_sort: GpuScalar<u32>,
unsorted_morton_keys: GpuVector<u32>,
sorted_morton_keys: GpuVector<u32>,
unsorted_colliders: GpuVector<u32>,
sorted_colliders: GpuVector<u32>,
tree: GpuVector<LbvhNode>,
sort_workspace: RadixSortWorkspace,
}
/// High-level LBVH broad-phase interface (shaders only).
///
/// Provides the complete LBVH pipeline:
/// 1. Compute AABBs and domain bounds
/// 2. Generate Morton codes for spatial sorting
/// 3. Sort colliders by Morton code
/// 4. Build binary tree structure
/// 5. Traverse tree to find collision pairs
pub struct Lbvh {
shaders: WgLbvh,
sort: RadixSort,
}
impl LbvhState {
/// Creates a new LBVH state with default buffer usage flags.
///
/// Initializes all buffers with `BufferUsages::STORAGE` flag for compute shader access.
pub fn new(device: &Device) -> Result<Self, ComposerError> {
Self::with_usages(device, BufferUsages::STORAGE)
}
/// Creates a new LBVH state with custom buffer usage flags.
///
/// Allows specifying custom usage flags for debugging or special use cases
/// (e.g., adding `COPY_SRC` for buffer readback).
pub fn with_usages(device: &Device, usages: BufferUsages) -> Result<Self, ComposerError> {
Ok(Self {
n_sort: GpuScalar::init(device, 0, usages),
domain_aabb: GpuScalar::uninit(device, usages),
unsorted_morton_keys: GpuVector::uninit(device, 0, usages),
sorted_morton_keys: GpuVector::uninit(device, 0, usages),
unsorted_colliders: GpuVector::uninit(device, 0, usages),
sorted_colliders: GpuVector::uninit(device, 0, usages),
tree: GpuVector::uninit(device, 0, usages),
sort_workspace: RadixSortWorkspace::new(device),
buffer_usages: usages,
})
}
fn resize_buffers(&mut self, device: &Device, colliders_len: u32) {
if self.tree.len() < 2 * colliders_len as u64 {
self.unsorted_morton_keys =
GpuVector::uninit(device, colliders_len, self.buffer_usages);
self.sorted_morton_keys = GpuVector::uninit(device, colliders_len, self.buffer_usages);
let unsorted_colliders: Vec<_> = (0..colliders_len).collect();
self.unsorted_colliders =
GpuVector::init(device, &unsorted_colliders, self.buffer_usages);
self.sorted_colliders = GpuVector::uninit(device, colliders_len, self.buffer_usages);
self.tree = GpuVector::uninit(device, 2 * colliders_len, self.buffer_usages);
// FIXME: we should instead write the len into the existing buffer at each frame
// to handle dynamic body/collider insertion/removal.
self.n_sort = GpuScalar::init(device, colliders_len, self.buffer_usages);
}
}
}
impl Lbvh {
const WORKGROUP_SIZE: u32 = 64;
/// Creates a new LBVH instance by compiling shaders on the given device.
///
/// # Errors
///
/// Returns an error if shader compilation fails.
pub fn from_device(device: &Device) -> Result<Self, ComposerError> {
Ok(Self {
shaders: WgLbvh::from_device(device)?,
sort: RadixSort::from_device(device)?,
})
}
/// Rebuilds the LBVH tree from current collider poses and shapes.
///
/// This method:
/// 1. Computes AABBs for all colliders
/// 2. Calculates the bounding domain
/// 3. Generates Morton codes for spatial sorting
/// 4. Sorts colliders by Morton code using radix sort
/// 5. Builds the binary BVH tree structure
///
/// Should be called each frame before [`find_pairs`](Self::find_pairs) if colliders have moved.
///
/// # Parameters
///
/// - `device`: The GPU device
/// - `pass`: Active compute pass to record commands into
/// - `state`: Mutable LBVH state (buffers may be resized if needed)
/// - `colliders_len`: Number of colliders to process
/// - `poses`: Collider world-space poses
/// - `shapes`: Collider shapes
/// - `num_shapes`: Scalar buffer containing the collider count
pub fn update_tree(
&self,
device: &Device,
pass: &mut ComputePass<'_>,
state: &mut LbvhState,
colliders_len: u32,
poses: &GpuVector<GpuSim>,
vertex_buffers: &GpuVector<Point<f32>>,
shapes: &GpuVector<GpuShape>,
num_shapes: &GpuScalar<u32>,
) {
state.resize_buffers(device, colliders_len);
// Bind group 0.
let num_colliders = (num_shapes.buffer(), 0);
let poses = (poses.buffer(), 1);
let shapes = (shapes.buffer(), 2);
let domain_aabb = (state.domain_aabb.buffer(), 6);
let unsorted_morton_keys = (state.unsorted_morton_keys.buffer(), 7);
let sorted_morton_keys = (state.sorted_morton_keys.buffer(), 7);
let sorted_colliders = (state.sorted_colliders.buffer(), 8);
let tree = (state.tree.buffer(), 9);
let vertices = (vertex_buffers.buffer(), 0);
// Dispatch everything.
KernelDispatch::new(device, pass, &self.shaders.compute_domain)
.bind_at(0, [num_colliders, poses, domain_aabb])
.dispatch(1);
KernelDispatch::new(device, pass, &self.shaders.compute_morton)
.bind_at(0, [num_colliders, poses, domain_aabb, unsorted_morton_keys])
.dispatch(colliders_len.div_ceil(Self::WORKGROUP_SIZE));
self.sort.dispatch(
device,
pass,
&mut state.sort_workspace,
&state.unsorted_morton_keys,
&state.unsorted_colliders,
&state.n_sort,
32,
&state.sorted_morton_keys,
&state.sorted_colliders,
);
KernelDispatch::new(device, pass, &self.shaders.build)
.bind_at(0, [num_colliders, sorted_morton_keys, tree])
.dispatch((colliders_len - 1).div_ceil(Self::WORKGROUP_SIZE));
KernelDispatch::new(device, pass, &self.shaders.refit_leaves)
.bind_at(0, [num_colliders, tree, poses, shapes, sorted_colliders])
.bind_at(1, [])
.bind_at(2, [vertices])
.dispatch(colliders_len.div_ceil(Self::WORKGROUP_SIZE));
KernelDispatch::new(device, pass, &self.shaders.refit_internal)
.bind_at(0, [num_colliders, tree])
.dispatch(1);
// .dispatch(colliders_len.div_ceil(Self::WORKGROUP_SIZE));
// KernelDispatch::new(device, pass, &self.shaders.refit)
// .bind_at(0, [num_colliders, tree, poses, shapes, sorted_colliders])
// .dispatch(colliders_len.div_ceil(Self::WORKGROUP_SIZE));
}
/// Traverses the LBVH tree to find potentially colliding pairs.
///
/// After the tree has been built with [`update_tree`](Self::update_tree), this method
/// traverses it to identify pairs of colliders whose AABBs overlap.
///
/// # Parameters
///
/// - `device`: The GPU device
/// - `pass`: Active compute pass to record commands into
/// - `state`: LBVH state containing the built tree
/// - `colliders_len`: Number of colliders in the scene
/// - `num_shapes`: Scalar buffer containing the collider count
/// - `collision_pairs`: Output buffer for potentially colliding pairs
/// - `collision_pairs_len`: Output count of collision pairs found
/// - `collision_pairs_indirect`: Indirect dispatch args for subsequent kernels
pub fn find_pairs(
&self,
device: &Device,
pass: &mut ComputePass<'_>,
state: &mut LbvhState,
colliders_len: u32,
num_shapes: &GpuScalar<u32>,
collision_pairs: &GpuVector<[u32; 2]>,
collision_pairs_len: &GpuScalar<u32>,
collision_pairs_indirect: &GpuScalar<DispatchIndirectArgs>,
) {
// Bind group 0.
let num_colliders = (num_shapes.buffer(), 0);
let collision_pairs = (collision_pairs.buffer(), 3);
let collision_pairs_len = (collision_pairs_len.buffer(), 4);
let collision_pairs_indirect = (collision_pairs_indirect.buffer(), 5);
let tree = (state.tree.buffer(), 9);
KernelDispatch::new(device, pass, &self.shaders.reset_collision_pairs)
.bind_at(0, [collision_pairs_len])
.dispatch(1);
KernelDispatch::new(device, pass, &self.shaders.find_collision_pairs)
.bind_at(
0,
[num_colliders, collision_pairs_len, collision_pairs, tree],
)
.dispatch(colliders_len.div_ceil(Self::WORKGROUP_SIZE));
KernelDispatch::new(device, pass, &self.shaders.init_indirect_args)
.bind_at(0, [collision_pairs_len, collision_pairs_indirect])
.dispatch(1);
}
}
#[cfg(test)]
mod test {
use super::*;
use parry::bounding_volume::BoundingVolume;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::CommandEncoderExt;
#[cfg(feature = "dim2")]
use na::{Similarity2, Vector2};
#[cfg(feature = "dim3")]
use na::{Similarity3, Vector3};
#[futures_test::test]
#[serial_test::serial]
async fn tree_construction() {
let storage = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
let gpu = GpuInstance::new().await.unwrap();
let lbvh = Lbvh::from_device(gpu.device()).unwrap();
let mut state = LbvhState::with_usages(gpu.device(), storage).unwrap();
const LEN: u32 = 1000;
let poses: Vec<_> = (0..LEN)
.map(|i| {
#[cfg(feature = "dim3")]
{
Similarity3::new(
-Vector3::new(i as f32, (i as f32).sin(), (i as f32).cos()),
na::zero(),
1.0,
)
}
#[cfg(feature = "dim2")]
{
Similarity2::new(-Vector2::new(i as f32, (i as f32).sin()), 0.0, 1.0)
}
})
.collect();
#[cfg(feature = "dim3")]
let gpu_poses_data: Vec<GpuSim> = poses.clone();
#[cfg(feature = "dim2")]
let gpu_poses_data: Vec<GpuSim> = poses.iter().map(|p| (*p).into()).collect();
let shapes: Vec<_> = vec![GpuShape::ball(0.5); LEN as usize];
let gpu_vertices = GpuVector::encase(gpu.device(), [], storage);
let gpu_poses = GpuVector::init(gpu.device(), &gpu_poses_data, storage);
let gpu_shapes = GpuVector::init(gpu.device(), &shapes, storage);
let gpu_num_shapes = GpuScalar::init(
gpu.device(),
LEN,
BufferUsages::STORAGE | BufferUsages::UNIFORM,
);
let gpu_collision_pairs = GpuVector::uninit(gpu.device(), 10000, storage);
let gpu_collision_pairs_len = GpuScalar::init(gpu.device(), 0, storage);
let gpu_collision_pairs_indirect = GpuScalar::uninit(gpu.device(), storage);
let mut encoder = gpu
.device()
.create_command_encoder(&wgpu::CommandEncoderDescriptor::default());
let mut pass = encoder.compute_pass("", None);
lbvh.update_tree(
gpu.device(),
&mut pass,
&mut state,
LEN,
&gpu_poses,
&gpu_vertices,
&gpu_shapes,
&gpu_num_shapes,
);
lbvh.find_pairs(
gpu.device(),
&mut pass,
&mut state,
LEN,
&gpu_num_shapes,
&gpu_collision_pairs,
&gpu_collision_pairs_len,
&gpu_collision_pairs_indirect,
);
drop(pass);
gpu.queue().submit(Some(encoder.finish()));
// Check result of `compute_domain`.
let domain = state.domain_aabb.slow_read(&gpu).await[0];
let pts: Vec<_> = poses
.iter()
.map(|p| p.isometry.translation.vector.into())
.collect();
let domain_cpu = Aabb::from_points(pts.iter().copied());
#[cfg(feature = "dim3")]
{
assert_eq!(domain_cpu.mins.coords, domain[0].xyz());
assert_eq!(domain_cpu.maxs.coords, domain[1].xyz());
}
#[cfg(feature = "dim2")]
{
assert_eq!(domain_cpu.mins.coords, domain[0].xy());
assert_eq!(domain_cpu.maxs.coords, domain[1].xy());
}
// Check result of `compute_morton`.
let mortons = state.unsorted_morton_keys.slow_read(&gpu).await;
let morton_cpu: Vec<_> = pts
.iter()
.map(|pt| {
let normalized = (pt - domain_cpu.mins).component_div(&domain_cpu.extents());
#[cfg(feature = "dim3")]
{
morton(normalized.x, normalized.y, normalized.z)
}
#[cfg(feature = "dim2")]
{
morton(normalized.x, normalized.y)
}
})
.collect();
// Check morton codes match (allow small differences due to floating point precision).
for (i, (&cpu, &gpu)) in morton_cpu.iter().zip(mortons.iter()).enumerate() {
let diff = cpu.abs_diff(gpu);
assert!(
diff <= 2,
"Morton code mismatch at index {}: CPU={}, GPU={}, diff={}",
i,
cpu,
gpu,
diff
);
}
// Check result of `sort`.
// Use GPU morton values for sorting to ensure exact match.
let mut sorted_colliders_cpu: Vec<_> = (0..LEN).collect();
sorted_colliders_cpu.sort_by_key(|i| mortons[*i as usize]);
let mut morton_sorted = mortons.to_vec();
morton_sorted.sort();
let sorted_mortons = state.sorted_morton_keys.slow_read(&gpu).await;
let sorted_colliders = state.sorted_colliders.slow_read(&gpu).await;
assert_eq!(sorted_mortons, morton_sorted);
assert_eq!(sorted_colliders, sorted_colliders_cpu);
// Check result of `build`.
let tree = state.tree.slow_read(&gpu).await;
{
// Check that a traversal covers all the nodes and that there is no loop.
let mut visited = vec![false; tree.len()];
let mut stack = vec![0];
while let Some(curr) = stack.pop() {
let node = &tree[curr];
assert!(!visited[curr]);
visited[curr] = true;
if curr < LEN as usize - 1 {
// This is an internal node
stack.push(node.left as usize);
stack.push(node.right as usize);
}
}
assert_eq!(visited.iter().filter(|e| **e).count(), LEN as usize * 2 - 1);
// Check parent pointers.
for (i, node) in tree[..LEN as usize - 1].iter().enumerate() {
assert_eq!(tree[node.right as usize].parent, i as u32);
assert_eq!(tree[node.left as usize].parent, i as u32);
}
}
// Check result of `refit`.
{
// Check that the leaf AABBs are correct.
let first_leaf_id = LEN - 1;
for i in 0..LEN {
let node = &tree[(first_leaf_id + i) as usize];
let collider = sorted_colliders[i as usize];
assert_eq!(
node.aabb(),
Aabb::from_half_extents(
poses[collider as usize].isometry.translation.vector.into(),
parry::math::Vector::repeat(0.5)
)
);
}
// Check that each AABB encloses the AABB of its children.
for i in 0..LEN - 1 {
let node = &tree[i as usize];
let left = &tree[node.left as usize];
let right = &tree[node.right as usize];
println!("Testing: {} -> ({},{})", i, node.left, node.right);
println!("Node: {:?}", node.aabb());
println!("Left: {:?}", left.aabb());
println!("Right: {:?}", right.aabb());
assert_eq!(node.aabb(), left.aabb().merged(&right.aabb()));
}
}
}
#[cfg(feature = "dim3")]
// Expands a 10-bit integer into 30 bits
// by inserting 2 zeros after each bit.
fn expand_bits(v: u32) -> u32 {
let mut vv = v.wrapping_mul(0x00010001) & 0xFF0000FF;
vv = vv.wrapping_mul(0x00000101) & 0x0F00F00F;
vv = vv.wrapping_mul(0x00000011) & 0xC30C30C3;
vv = vv.wrapping_mul(0x00000005) & 0x49249249;
vv
}
#[cfg(feature = "dim3")]
// Calculates a 30-bit Morton code for the
// given 3D point located within the unit cube [0,1].
fn morton(x: f32, y: f32, z: f32) -> u32 {
let scaled_x = (x * 1024.0).clamp(0.0, 1023.0);
let scaled_y = (y * 1024.0).clamp(0.0, 1023.0);
let scaled_z = (z * 1024.0).clamp(0.0, 1023.0);
let xx = expand_bits(scaled_x as u32);
let yy = expand_bits(scaled_y as u32);
let zz = expand_bits(scaled_z as u32);
xx * 4 + yy * 2 + zz
}
#[cfg(feature = "dim2")]
// Expands a 16-bit integer into 32 bits
// by inserting 1 zero after each bit.
fn expand_bits(v: u32) -> u32 {
let mut x = v & 0x0000ffff;
x = (x | (x << 8)) & 0x00ff00ff;
x = (x | (x << 4)) & 0x0f0f0f0f;
x = (x | (x << 2)) & 0x33333333;
x = (x | (x << 1)) & 0x55555555;
x
}
#[cfg(feature = "dim2")]
// Calculates a 32-bit Morton code for the
// given 2D point located within the unit square [0,1].
fn morton(x: f32, y: f32) -> u32 {
let scaled_x = (x * 65536.0).clamp(0.0, 65535.0);
let scaled_y = (y * 65536.0).clamp(0.0, 65535.0);
let xx = expand_bits(scaled_x as u32);
let yy = expand_bits(scaled_y as u32);
xx | (yy << 1)
}
//
// struct PrefixLen<'a> {
// morton_keys: &'a [u32],
// num_colliders: usize,
// }
//
// impl<'a> PrefixLen<'a> {
// fn morton_at(&self, i: i32) -> i32 {
// // TODO PERF: would it be meaningful to add sentinels at the begining
// // and end of the morton_keys array so we don’t have to check
// // bounds?
// if i < 0 || i > self.num_colliders as i32 - 1 {
// return -1;
// } else {
// return self.morton_keys[i as usize] as i32;
// }
// }
//
// fn prefix_len(&self, curr_key: u32, other_index: i32) -> i32 {
// let other_key = self.morton_at(other_index);
// (curr_key as i32 ^ other_key).leading_zeros() as i32
// }
// }
//
// /// Builds each node of the tree in parallel.
// ///
// /// This only computes the tree topology (children and parent pointers).
// /// This doesn’t update the bounding boxes. Call `refit` for updating bounding boxes!
// fn build(tree: &mut [LbvhNode], num_colliders: usize, morton_keys: &[u32]) {
// let num_internal_nodes = num_colliders - 1;
// let first_leaf_id = num_internal_nodes;
// let pl = PrefixLen {
// morton_keys,
// num_colliders,
// };
//
// for i in 0..num_internal_nodes {
// // Determine the direction of the range (+1 or -1).
// let ii = i as i32;
// let curr_key = morton_keys[i];
// let d = (pl.prefix_len(curr_key, ii + 1) - pl.prefix_len(curr_key, ii - 1)).signum();
//
// // Compute upper bound for the length of the range.
// let delta_min = pl.prefix_len(curr_key, ii - d);
// let mut l = 0;
// while pl.prefix_len(curr_key, ii + (l + 1) * d) > delta_min {
// l += 1;
// }
//
// let mut lmax = 2; // TODO PERF: start at 128 ?
// while pl.prefix_len(curr_key, ii + lmax * d) > delta_min {
// lmax *= 2; // TODO PERF: multiply by 4 instead of 2 ?
// }
//
// // Find the other end using binary search.
// let mut l = 0;
// let mut t = lmax / 2;
// while t >= 1 {
// if pl.prefix_len(curr_key, ii + (l + t) * d) > delta_min {
// l += t;
// }
//
// t /= 2;
// }
//
// let j = ii + l * d;
//
// // Find the split position using binary search.
// let delta_node = pl.prefix_len(curr_key, j);
//
// let mut s = 0;
// while pl.prefix_len(curr_key, ii + (s + 1) * d) > delta_node {
// s += 1;
// }
// let seq_s = s;
//
// let mut s = 0;
// let mut t = (l as u32).div_ceil(2) as i32;
// loop {
// if pl.prefix_len(curr_key, ii + (s + t) * d) > delta_node {
// s += t;
// }
//
// if t == 1 {
// break;
// } else {
// t = (t as u32).div_ceil(2) as i32;
// }
// }
//
// println!(
// "base t: {}, delta: {delta_node}, plen seq: {}, plen: {}",
// l / 2,
// pl.prefix_len(curr_key, ii + seq_s * d),
// pl.prefix_len(curr_key, ii + s * d)
// );
// assert_eq!(seq_s, s);
//
// let gamma = ii + s * d + d.min(0);
//
// // Output child and parent pointers.
// let left = if ii.min(j) == gamma {
// first_leaf_id as i32 + gamma
// } else {
// gamma
// };
// let right = if ii.max(j) == gamma + 1 {
// first_leaf_id as i32 + gamma + 1
// } else {
// gamma + 1
// };
// tree[i].left = left as u32;
// tree[i].right = right as u32;
// tree[i].refit_count = 0; // This is a good opportunity to reset the `refit_count` too.
// tree[left as usize].parent = i as u32;
// tree[right as usize].parent = i as u32;
// }
// }
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/broad_phase/mod.rs | crates/wgparry/src/broad_phase/mod.rs | //! Broad-phase collision detection algorithms for identifying potentially colliding pairs.
//!
//! Broad-phase collision detection quickly filters out non-colliding object pairs before
//! running expensive narrow-phase tests. This module provides GPU-accelerated implementations
//! of various broad-phase algorithms.
//!
//! # Available Algorithms
//!
//! ## Brute Force
//!
//! Tests all pairs of objects (O(n²) complexity). Simple but only practical for small scenes
//! (typically < 100 objects). It is mostly relevant for testing and debugging.
//!
//! **Pros**: Simple, no preprocessing, deterministic.
//! **Cons**: O(n²) scaling, impractical for large scenes.
//!
//! ## LBVH - Linear Bounding Volume Hierarchy
//!
//! Builds a binary tree of bounding volumes using Morton codes for spatial sorting.
//! Near-linear construction time (O(n log n)) and efficient traversal make it suitable
//! for large dynamic scenes.
//!
//! **Pros**: O(n log n) construction and O(log n) traversal (average), good for dynamic scenes.
//! **Cons**: Requires tree rebuild for moving objects, more complex than brute force.
mod brute_force_broad_phase;
mod lbvh;
mod narrow_phase;
pub use brute_force_broad_phase::*;
pub use lbvh::*;
pub use narrow_phase::*;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/broad_phase/narrow_phase.rs | crates/wgparry/src/broad_phase/narrow_phase.rs | //! Narrow-phase collision detection for generating contact manifolds.
//!
//! After the broad-phase identifies potentially colliding pairs using AABBs, the narrow-phase
//! performs detailed collision tests to generate contact manifolds. These manifolds contain
//! precise contact point information needed for physics simulation.
//!
//! The narrow-phase:
//! 1. Takes collision pairs from the broad-phase.
//! 2. Performs shape-specific collision tests (ball-ball, cuboid-cuboid, etc.)
//! 3. Generates contact manifolds with points, normals, and penetration depths.
//! 4. Outputs indexed contacts for the physics solver.
use crate::bounding_volumes::WgAabb;
use crate::math::{GpuSim, Point};
use crate::queries::{GpuIndexedContact, WgContact};
use crate::shapes::{GpuShape, WgCapsule, WgPolyline, WgShape, WgTriMesh};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::indirect::{DispatchIndirectArgs, WgIndirect};
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::{test_shader_compilation, Shader};
use wgebra::{WgSim2, WgSim3};
use wgpu::{ComputePass, ComputePipeline, Device};
#[derive(Shader)]
#[shader(
derive(
WgSim3, WgSim2, WgShape, WgAabb, WgContact, WgIndirect, WgTriMesh, WgPolyline, WgCapsule
),
src = "./narrow_phase.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs",
composable = false
)]
/// GPU shader for narrow-phase collision detection.
///
/// This shader performs detailed collision tests on potentially colliding pairs identified
/// by the broad-phase. It generates contact manifolds containing:
/// - Contact points (up to 2 in 2D, 4 in 3D)
/// - Contact normals
/// - Penetration depths
///
/// # Pipeline Stages
///
/// The narrow-phase executes in three stages:
/// 1. **Reset**: Clears the contact count from the previous frame.
/// 2. **Main**: Processes collision pairs and generates contacts.
/// 3. **Init indirect args**: Prepares dispatch arguments for subsequent kernels.
pub struct WgNarrowPhase {
main: ComputePipeline,
reset: ComputePipeline,
init_indirect_args: ComputePipeline,
}
impl WgNarrowPhase {
#[allow(dead_code)]
const WORKGROUP_SIZE: u32 = 64;
/// Dispatches the narrow-phase collision detection pipeline.
///
/// # Parameters
///
/// - `device`: The GPU device
/// - `pass`: The compute pass to record commands into
/// - `_num_colliders`: Total number of colliders (unused currently)
/// - `poses`: Collider poses (positions and rotations)
/// - `shapes`: Collider shapes
/// - `collision_pairs`: Potentially colliding pairs from broad-phase
/// - `collision_pairs_len`: Number of collision pairs
/// - `collision_pairs_indirect`: Indirect dispatch arguments for collision pairs
/// - `contacts`: Output buffer for contact manifolds
/// - `contacts_len`: Output count of generated contacts
/// - `contacts_indirect`: Indirect dispatch arguments for contacts
pub fn dispatch(
&self,
device: &Device,
pass: &mut ComputePass,
_num_colliders: u32,
poses: &GpuVector<GpuSim>,
shapes: &GpuVector<GpuShape>,
vertices: &GpuVector<Point<f32>>,
indices: &GpuVector<u32>,
collision_pairs: &GpuVector<[u32; 2]>,
collision_pairs_len: &GpuScalar<u32>,
collision_pairs_indirect: &GpuScalar<DispatchIndirectArgs>,
contacts: &GpuVector<GpuIndexedContact>,
contacts_len: &GpuScalar<u32>,
contacts_indirect: &GpuScalar<DispatchIndirectArgs>,
) {
KernelDispatch::new(device, pass, &self.reset)
.bind_at(0, [(contacts_len.buffer(), 5)])
.dispatch(1);
KernelDispatch::new(device, pass, &self.main)
.bind0([
collision_pairs.buffer(),
collision_pairs_len.buffer(),
poses.buffer(),
shapes.buffer(),
contacts.buffer(),
contacts_len.buffer(),
])
.bind_at(1, [])
.bind_at(2, [(vertices.buffer(), 0), (indices.buffer(), 1)])
.dispatch_indirect(collision_pairs_indirect.buffer());
KernelDispatch::new(device, pass, &self.init_indirect_args)
.bind_at(
0,
[(contacts_len.buffer(), 5), (contacts_indirect.buffer(), 6)],
)
.dispatch(1);
}
}
test_shader_compilation!(WgNarrowPhase, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/utils/mod.rs | crates/wgparry/src/utils/mod.rs | //! Utility functions and data structures for GPU-accelerated algorithms.
//!
//! This module provides general-purpose GPU algorithms that support the collision
//! detection and physics simulation pipelines.
pub use radix_sort::{RadixSort, RadixSortWorkspace};
mod radix_sort;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/utils/radix_sort/mod.rs | crates/wgparry/src/utils/radix_sort/mod.rs | //! Radix sort implementation, ported from `brush-sort`: <https://github.com/ArthurBrussee/brush/tree/main/crates/brush-sort>
use naga_oil::compose::ComposerError;
use wgcore::kernel::KernelDispatch;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgcore::Shader;
use wgpu::{BufferUsages, ComputePass, ComputePipeline, Device};
// NOTE: must match the values from `sorting.wgsl`.
const WG: u32 = 256;
const ELEMENTS_PER_THREAD: u32 = 4;
const BLOCK_SIZE: u32 = WG * ELEMENTS_PER_THREAD;
#[allow(dead_code)]
const BITS_PER_PASS: u32 = 4;
#[allow(dead_code)]
const BIN_COUNT: u32 = 1 << BITS_PER_PASS;
#[derive(Shader)]
#[shader(
derive(Sorting),
src = "./init_indirect_dispatches.wgsl",
composable = false
)]
struct InitIndirectDispatches {
main: ComputePipeline,
}
#[derive(Shader)]
#[shader(derive(Sorting), src = "./sort_count.wgsl", composable = false)]
struct SortCount {
main: ComputePipeline,
}
#[derive(Shader)]
#[shader(derive(Sorting), src = "./sort_reduce.wgsl", composable = false)]
struct SortReduce {
main: ComputePipeline,
}
#[derive(Shader)]
#[shader(derive(Sorting), src = "./sort_scan.wgsl", composable = false)]
struct SortScan {
main: ComputePipeline,
}
#[derive(Shader)]
#[shader(derive(Sorting), src = "./sort_scan_add.wgsl", composable = false)]
struct SortScanAdd {
main: ComputePipeline,
}
#[derive(Shader)]
#[shader(derive(Sorting), src = "./sort_scatter.wgsl", composable = false)]
struct SortScatter {
main: ComputePipeline,
}
#[derive(Shader)]
#[shader(src = "./sorting.wgsl")]
struct Sorting;
/// GPU-accelerated radix sort for sorting large arrays of u32 keys with associated values.
///
/// This implementation uses a 4-bit radix (16 bins per pass) and processes multiple passes
/// to sort up to 32-bit integers. The algorithm is highly optimized for GPU execution with:
/// - Workgroup-local histograms for reduced memory bandwidth
/// - Prefix sum (scan) operations for determining output positions
/// - Scatter phase that writes sorted elements to output buffers
///
/// # Algorithm Overview
///
/// For each 4-bit pass (up to 8 passes for 32-bit keys):
/// 1. **Count**: Histogram computation per workgroup
/// 2. **Reduce**: Aggregate histograms across workgroups
/// 3. **Scan**: Prefix sum on aggregated histograms
/// 4. **Scan Add**: Distribute prefix sums back to workgroup histograms
/// 5. **Scatter**: Write elements to sorted positions based on histograms
///
/// # Performance
///
/// - Processes ~10-100M elements/second on modern GPUs
/// - Near-linear scaling with input size
/// - Memory bandwidth bound (optimal for GPU)
pub struct RadixSort {
init: InitIndirectDispatches,
count: SortCount,
reduce: SortReduce,
scan: SortScan,
scan_add: SortScanAdd,
scatter: SortScatter,
}
/// Workspace buffers for radix sort operations.
///
/// Maintains intermediate buffers needed by the radix sort algorithm:
/// - Histogram buffers for bin counts
/// - Reduction buffers for prefix sums
/// - Ping-pong buffers for multi-pass sorting
///
/// The workspace is reusable across multiple sort operations and automatically
/// resizes buffers as needed.
pub struct RadixSortWorkspace {
pass_uniforms: Vec<GpuScalar<u32>>,
reduced_buf: GpuVector<u32>, // Tensor of size BLOCK_SIZE
count_buf: GpuVector<u32>,
num_wgs: GpuScalar<[u32; 3]>,
num_reduce_wgs: GpuScalar<[u32; 3]>,
output_keys_pong: GpuVector<u32>, // dual-buffering for output keys.
output_values_pong: GpuVector<u32>, // dual-buffering for output values.
}
impl RadixSortWorkspace {
/// Creates a new radix sort workspace with default buffer sizes.
///
/// Buffers will be automatically resized on first use to match input data size.
///
/// # Parameters
///
/// - `device`: The GPU device to allocate buffers on
pub fn new(device: &Device) -> Self {
let zeros = vec![0u32; BLOCK_SIZE as usize];
Self {
pass_uniforms: vec![],
reduced_buf: GpuVector::init(device, &zeros, BufferUsages::STORAGE),
count_buf: GpuVector::uninit(device, 0, BufferUsages::STORAGE),
num_wgs: GpuScalar::init(
device,
[1; 3],
BufferUsages::STORAGE | BufferUsages::INDIRECT,
),
num_reduce_wgs: GpuScalar::init(
device,
[1; 3],
BufferUsages::STORAGE | BufferUsages::INDIRECT,
),
output_keys_pong: GpuVector::uninit(device, 0, BufferUsages::STORAGE),
output_values_pong: GpuVector::uninit(device, 0, BufferUsages::STORAGE),
}
}
}
impl RadixSort {
/// Creates a new radix sort instance by compiling shaders on the given device.
///
/// # Parameters
///
/// - `device`: The GPU device to compile shaders for
///
/// # Returns
///
/// - `Ok(RadixSort)` on successful shader compilation
/// - `Err(ComposerError)` if shader compilation fails
///
/// # Errors
///
/// Returns an error if any of the radix sort shader stages fail to compile.
pub fn from_device(device: &Device) -> Result<Self, ComposerError> {
Ok(Self {
init: InitIndirectDispatches::from_device(device)?,
count: SortCount::from_device(device)?,
reduce: SortReduce::from_device(device)?,
scan: SortScan::from_device(device)?,
scan_add: SortScanAdd::from_device(device)?,
scatter: SortScatter::from_device(device)?,
})
}
/// Dispatches the radix sort operation to sort keys with associated values.
///
/// The sort is stable: elements with equal keys maintain their relative order.
/// Both keys and values are sorted together, making this useful for indirect sorting
/// (where values are indices into another array).
///
/// # Parameters
///
/// - `device`: The GPU device
/// - `pass`: The compute pass to record commands into
/// - `workspace`: Workspace buffers (automatically resized if needed)
/// - `input_keys`: The u32 keys to sort
/// - `input_values`: Associated values to sort alongside keys
/// - `n_sort`: Number of elements to sort (must be <= input buffer size)
/// - `sorting_bits`: Number of bits to sort (1-32). Use 32 for full sorting,
/// or fewer bits if your keys have a limited range (e.g., 24 for Morton codes)
/// - `output_keys`: Buffer to write sorted keys to
/// - `output_values`: Buffer to write sorted values to
///
/// # Panics
///
/// - Panics if `input_keys` and `input_values` have different lengths
/// - Panics if `sorting_bits > 32`
///
/// # Performance Tips
///
/// - Use the minimum `sorting_bits` needed for your data range
/// - Reuse the same `workspace` across multiple sort operations
/// - Ensure input buffers are properly aligned for GPU access
pub fn dispatch(
&self,
device: &Device,
pass: &mut ComputePass,
workspace: &mut RadixSortWorkspace,
input_keys: &GpuVector<u32>,
input_values: &GpuVector<u32>,
n_sort: &GpuScalar<u32>,
sorting_bits: u32,
output_keys: &GpuVector<u32>,
output_values: &GpuVector<u32>,
) {
assert_eq!(
input_keys.len(),
input_values.len(),
"Input keys and values must have the same number of elements"
);
assert!(sorting_bits <= 32, "Can only sort up to 32 bits");
let max_n = input_keys.len() as u32;
// compute buffer and dispatch sizes
let max_needed_wgs = max_n.div_ceil(BLOCK_SIZE);
if workspace.count_buf.len() < max_needed_wgs as u64 * 16 {
workspace.count_buf =
GpuVector::uninit(device, max_needed_wgs * 16, BufferUsages::STORAGE);
}
KernelDispatch::new(device, pass, &self.init.main)
.bind0([
n_sort.buffer(),
workspace.num_wgs.buffer(),
workspace.num_reduce_wgs.buffer(),
])
.dispatch(1);
let mut cur_keys = input_keys;
let mut cur_vals = input_values;
if workspace.output_keys_pong.len() < input_keys.len() {
// TODO: is this OK even in the case where we call the radix sort multiple times
// successively but with increasing input buffer sizes? Wondering if that could
// free the previous buffer and then crash the previous invocation.
workspace.output_keys_pong =
GpuVector::uninit(device, input_keys.len() as u32, BufferUsages::STORAGE);
workspace.output_values_pong =
GpuVector::uninit(device, input_values.len() as u32, BufferUsages::STORAGE);
}
let num_passes = sorting_bits.div_ceil(4);
let mut output_keys = output_keys;
let mut output_values = output_values;
let mut output_keys_pong = &workspace.output_keys_pong;
let mut output_values_pong = &workspace.output_values_pong;
if num_passes.is_multiple_of(2) {
// Make sure the last pass has the user provided `output_keys`
// set as the output buffer so that the final results doesn’t end
// up stored in the workspace’s pong buffers instead.
std::mem::swap(&mut output_keys, &mut output_keys_pong);
std::mem::swap(&mut output_values, &mut output_values_pong);
}
for pass_id in 0..num_passes {
if pass_id as usize >= workspace.pass_uniforms.len() {
workspace.pass_uniforms.push(GpuScalar::init(
device,
pass_id * 4,
BufferUsages::STORAGE | BufferUsages::UNIFORM,
));
}
let uniforms_buffer = &workspace.pass_uniforms[pass_id as usize];
KernelDispatch::new(device, pass, &self.count.main)
.bind0([
uniforms_buffer.buffer(),
n_sort.buffer(),
cur_keys.buffer(),
workspace.count_buf.buffer(),
])
.dispatch_indirect(workspace.num_wgs.buffer());
KernelDispatch::new(device, pass, &self.reduce.main)
.bind0([
n_sort.buffer(),
workspace.count_buf.buffer(),
workspace.reduced_buf.buffer(),
])
.dispatch_indirect(workspace.num_reduce_wgs.buffer());
KernelDispatch::new(device, pass, &self.scan.main)
.bind0([n_sort.buffer(), workspace.reduced_buf.buffer()])
.dispatch(1);
KernelDispatch::new(device, pass, &self.scan_add.main)
.bind0([
n_sort.buffer(),
workspace.reduced_buf.buffer(),
workspace.count_buf.buffer(),
])
.dispatch_indirect(workspace.num_reduce_wgs.buffer());
KernelDispatch::new(device, pass, &self.scatter.main)
.bind0([
uniforms_buffer.buffer(),
n_sort.buffer(),
cur_keys.buffer(),
cur_vals.buffer(),
workspace.count_buf.buffer(),
output_keys.buffer(),
output_values.buffer(),
])
.dispatch_indirect(workspace.num_wgs.buffer());
if pass_id == 0 {
cur_keys = output_keys;
cur_vals = output_values;
output_keys = output_keys_pong;
output_values = output_values_pong;
} else {
std::mem::swap(&mut cur_keys, &mut output_keys);
std::mem::swap(&mut cur_vals, &mut output_values);
}
}
}
}
#[cfg(all(test, not(target_family = "wasm")))]
mod tests {
use crate::utils::radix_sort::RadixSortWorkspace;
use crate::utils::RadixSort;
use na::DVector;
use wgcore::gpu::GpuInstance;
use wgcore::kernel::CommandEncoderExt;
use wgcore::tensor::{GpuScalar, GpuVector};
use wgpu::BufferUsages;
pub fn cpu_argsort<T: Ord>(data: &[T]) -> Vec<usize> {
let mut indices = (0..data.len()).collect::<Vec<_>>();
indices.sort_by_key(|&i| &data[i]);
indices
}
#[futures_test::test]
#[serial_test::serial]
async fn test_sorting() {
let gpu = GpuInstance::new().await.unwrap();
let sort = RadixSort::from_device(gpu.device()).unwrap();
let mut workspace = RadixSortWorkspace::new(gpu.device());
for i in 0u32..128 {
let keys_inp = [
5 + i * 4,
i,
6,
123,
74657,
123,
999,
2u32.pow(24) + 123,
6,
7,
8,
0,
i * 2,
16 + i,
128 * i,
];
let values_inp: Vec<_> = keys_inp.iter().copied().map(|x| x * 2 + 5).collect();
let input_usages = BufferUsages::STORAGE;
let output_usages = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
let staging_usage = BufferUsages::MAP_READ | BufferUsages::COPY_DST;
let keys = GpuVector::init(gpu.device(), keys_inp, input_usages);
let values = GpuVector::init(gpu.device(), &values_inp, input_usages);
let out_keys = GpuVector::init(gpu.device(), keys_inp, output_usages);
let out_values = GpuVector::init(gpu.device(), &values_inp, output_usages);
let staging_keys = GpuVector::init(gpu.device(), keys_inp, staging_usage);
let staging_values = GpuVector::init(gpu.device(), &values_inp, staging_usage);
let num_points =
GpuScalar::init(gpu.device(), keys_inp.len() as u32, BufferUsages::STORAGE);
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("test", None);
sort.dispatch(
gpu.device(),
&mut pass,
&mut workspace,
&keys,
&values,
&num_points,
32,
&out_keys,
&out_values,
);
drop(pass);
staging_keys.copy_from(&mut encoder, &out_keys);
staging_values.copy_from(&mut encoder, &out_values);
gpu.queue().submit(Some(encoder.finish()));
let result_keys = staging_keys.read(gpu.device()).await.unwrap();
let result_values = staging_values.read(gpu.device()).await.unwrap();
let inds = cpu_argsort(&keys_inp);
let ref_keys: Vec<u32> = inds.iter().map(|&i| keys_inp[i]).collect();
let ref_values: Vec<u32> = inds.iter().map(|&i| values_inp[i]).collect();
assert_eq!(DVector::from(ref_keys), DVector::from(result_keys));
assert_eq!(DVector::from(ref_values), DVector::from(result_values));
}
}
#[futures_test::test]
#[serial_test::serial]
async fn test_sorting_big() {
use rand::Rng;
let gpu = GpuInstance::new().await.unwrap();
let sort = RadixSort::from_device(gpu.device()).unwrap();
let mut workspace = RadixSortWorkspace::new(gpu.device());
// Simulate some data as one might find for a bunch of gaussians.
let mut rng = rand::rng();
let mut keys_inp = Vec::new();
for i in 0..10000 {
let start = rng.random_range(i..i + 150);
let end = rng.random_range(start..start + 250);
for j in start..end {
if rng.random::<f32>() < 0.5 {
keys_inp.push(j);
}
}
}
let values_inp: Vec<_> = keys_inp.iter().map(|&x| x * 2 + 5).collect();
let input_usages = BufferUsages::STORAGE;
let output_usages = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
let staging_usage = BufferUsages::MAP_READ | BufferUsages::COPY_DST;
let keys = GpuVector::init(gpu.device(), &keys_inp, input_usages);
let values = GpuVector::init(gpu.device(), &values_inp, input_usages);
let out_keys = GpuVector::init(gpu.device(), &keys_inp, output_usages);
let out_values = GpuVector::init(gpu.device(), &values_inp, output_usages);
let staging_keys = GpuVector::init(gpu.device(), &keys_inp, staging_usage);
let staging_values = GpuVector::init(gpu.device(), &values_inp, staging_usage);
let num_points =
GpuScalar::init(gpu.device(), keys_inp.len() as u32, BufferUsages::STORAGE);
let mut encoder = gpu.device().create_command_encoder(&Default::default());
let mut pass = encoder.compute_pass("test", None);
sort.dispatch(
gpu.device(),
&mut pass,
&mut workspace,
&keys,
&values,
&num_points,
32,
&out_keys,
&out_values,
);
drop(pass);
staging_keys.copy_from(&mut encoder, &out_keys);
staging_values.copy_from(&mut encoder, &out_values);
gpu.queue().submit(Some(encoder.finish()));
let result_keys = staging_keys.read(gpu.device()).await.unwrap();
let result_values = staging_values.read(gpu.device()).await.unwrap();
let inds = cpu_argsort(&keys_inp);
let ref_keys: Vec<u32> = inds.iter().map(|&i| keys_inp[i]).collect();
let ref_values: Vec<u32> = inds.iter().map(|&i| values_inp[i]).collect();
assert_eq!(DVector::from(ref_keys), DVector::from(result_keys));
assert_eq!(DVector::from(ref_values), DVector::from(result_values));
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/queries/sat.rs | crates/wgparry/src/queries/sat.rs | //! Separating Axis Theorem (SAT) collision detection.
//!
//! SAT is an efficient algorithm for detecting collisions between convex polyhedra
//! (like cuboids). It works by testing potential separating axes: if any axis exists
//! along which the shapes' projections don't overlap, the shapes don't collide.
use crate::shapes::WgCuboid;
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgCuboid, WgSim3, WgSim2),
src = "sat.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader implementing the Separating Axis Theorem for collision detection.
///
/// This shader provides SAT-based collision detection, particularly efficient for
/// detecting collisions between convex polyhedra like cuboids. SAT is a fundamental
/// algorithm in computational geometry that determines whether two convex shapes
/// overlap by testing separating planes.
pub struct WgSat;
wgcore::test_shader_compilation!(WgSat, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/queries/contact.rs | crates/wgparry/src/queries/contact.rs | //! Contact generation for collision response.
//!
//! This module implements contact manifold generation between pairs of colliding shapes.
//! Contact manifolds contain multiple contact points with normals and penetration depths,
//! which are essential for physics simulation and collision response.
//!
//! # Contact Manifolds
//!
//! A contact manifold represents a collision between two shapes and contains:
//! - **Contact points**: Up to 2 points in 2D, 4 points in 3D.
//! - **Contact normal**: The direction to separate the shapes.
//! - **Penetration depths**: How deep each contact point penetrates.
use super::{WgPolygonalFeature, WgSat};
use crate::math::Vector;
use crate::queries::gjk::{WgCsoPoint, WgEpa, WgGjk, WgVoronoiSimplex};
use crate::shapes::{WgBall, WgCuboid, WgShape};
use crate::{dim_shader_defs, substitute_aliases};
use encase::ShaderType;
use na::Vector2;
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[cfg(feature = "dim3")]
use crate::shapes::{WgCone, WgCylinder};
#[cfg(feature = "dim2")]
const MAX_MANIFOLD_POINTS: usize = 2;
#[cfg(feature = "dim3")]
const MAX_MANIFOLD_POINTS: usize = 4;
/// A single contact point within a contact manifold.
///
/// Each contact point represents a location where two shapes are touching or penetrating,
/// with an associated penetration distance (negative for separation, positive for overlap).
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuContactPoint {
/// The contact point position in world space.
point: Vector<f32>,
/// Signed distance (penetration depth).
///
/// - Positive values indicate penetration (shapes overlap)
/// - Negative values indicate separation (shapes don't touch)
/// - Zero indicates contact at the surface
dist: f32,
}
/// A contact manifold containing multiple contact points.
///
/// The manifold describes a collision between two shapes with:
/// - Up to 2 contact points in 2D or 4 contact points in 3D.
/// - A shared contact normal.
/// - The actual number of valid contact points.
///
/// # Stability
///
/// Multiple contact points provide rotational stability in physics simulation.
/// For example, a box resting on a plane will have 2-4 contact points depending
/// on orientation, preventing unrealistic tipping.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuContactManifold {
/// Array of contact points.
///
/// Only the first `len` points are valid; remaining slots may contain garbage data.
points: [GpuContactPoint; MAX_MANIFOLD_POINTS],
/// The contact normal pointing from shape A toward shape B.
///
/// This is the direction along which the shapes should be separated to resolve
/// the collision.
normal: Vector<f32>,
/// Number of valid contact points in the `points` array.
///
/// Valid range: 0 to [`MAX_MANIFOLD_POINTS`]
len: u32,
}
/// An indexed contact associating a manifold with two collider indices.
///
/// Used in broad-phase collision detection to store contacts along with the
/// identifiers of the colliding objects.
#[derive(Copy, Clone, PartialEq, Debug, ShaderType)]
pub struct GpuIndexedContact {
/// The contact manifold describing the collision.
contact: GpuContactManifold,
/// Indices of the two colliding objects `[collider_a, collider_b]`.
colliders: Vector2<u32>,
}
#[derive(Shader)]
#[cfg_attr(
feature = "dim2",
shader(
derive(
WgBall,
WgCuboid,
WgShape,
WgSim2,
WgSim3,
WgSat,
WgPolygonalFeature,
WgContactManifold,
WgContactPfmPfm,
),
src = "contact.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)
)]
#[cfg_attr(
feature = "dim3",
shader(
derive(
WgBall,
WgCuboid,
WgShape,
WgSim2,
WgSim3,
WgSat,
WgPolygonalFeature,
WgContactManifold,
WgContactPfmPfm,
WgCylinder,
WgCone
),
src = "contact.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)
)]
/// GPU shader for contact generation between shapes.
///
/// This shader implements collision detection algorithms that generate contact
/// manifolds for various shape type pairs. It composes specialized algorithms:
/// - Ball-ball contacts (simple distance checks)
/// - Cuboid-cuboid contacts (SAT + polygonal feature clipping)
/// - Mixed shape contacts (ball-cuboid, etc.)
pub struct WgContact;
#[derive(Shader)]
#[shader(
derive(WgSim2, WgSim3),
src = "contact_manifold.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader defining contact manifold data structures.
///
/// Provides the WGSL definitions for [`GpuContactPoint`], [`GpuContactManifold`],
/// and [`GpuIndexedContact`], along with utility functions for manipulating
/// contact manifolds.
///
/// This shader is a dependency for [`WgContact`] and other contact-related shaders.
pub struct WgContactManifold;
#[derive(Shader)]
#[shader(
derive(
WgCsoPoint,
WgVoronoiSimplex,
WgShape,
WgGjk,
WgEpa,
WgPolygonalFeature,
),
src = "contact_pfm_pfm.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// Kernel for contact manifold calculation between two "Polygonal Feature Maps".
///
/// A Polygonal Feature Map is similar to the concept of Support Mappings, except that
/// instead associating an extremal point to a vector, it associates an extremal polygonal
/// face to the direction.
pub struct WgContactPfmPfm;
wgcore::test_shader_compilation!(WgContact, wgcore, crate::dim_shader_defs());
wgcore::test_shader_compilation!(WgContactPfmPfm, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/queries/projection.rs | crates/wgparry/src/queries/projection.rs | //! Point projection queries for distance and closest point calculations.
//!
//! Point projection finds the closest point on a shape's surface to a given query point.
//! This is fundamental for:
//! - Distance queries between shapes
//! - Penetration depth calculations
//! - Closest point computations
//! - Contact point generation
use crate::math::Point;
use crate::substitute_aliases;
use wgcore::Shader;
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
/// Result of a point projection query, GPU-compatible layout.
///
/// Contains the closest point on the shape's surface and whether the query point
/// was inside the shape.
///
/// # Fields
///
/// - `point`: The projected point (closest point on the shape's surface)
/// - `is_inside`: Whether the query point was inside the shape (non-zero = inside, 0 = outside)
/// - `padding` (2D only): Alignment padding for GPU buffer compatibility
///
/// # Memory Layout
///
/// This struct is `#[repr(C)]` and implements `bytemuck::Pod`, making it suitable for
/// direct GPU buffer uploads and downloads.
pub struct GpuProjectionResult {
/// The projected point on the shape's surface.
///
/// This is the point on the shape's boundary that is closest to the query point.
pub point: Point<f32>,
/// Whether the query point was inside the shape.
///
/// - `0`: Query point is outside the shape
/// - Non-zero: Query point is inside the shape
pub is_inside: u32,
#[cfg(feature = "dim2")]
/// Padding for 2D builds to maintain alignment (unused).
pub padding: u32,
}
#[derive(Shader)]
#[shader(src = "projection.wgsl", src_fn = "substitute_aliases")]
/// GPU shader for point projection operations.
///
/// This shader provides WGSL types and utility functions for projecting points onto
/// shape surfaces. It defines the [`GpuProjectionResult`] type and related functions
/// used by shape-specific projection implementations.
pub struct WgProjection;
wgcore::test_shader_compilation!(WgProjection, wgcore, crate::dim_shader_defs());
#[cfg(test)]
pub(crate) mod test_utils {
use crate::queries::GpuProjectionResult;
use crate::{dim_shader_defs, substitute_aliases};
use nalgebra::vector;
#[cfg(feature = "dim2")]
use parry2d::shape::Shape;
#[cfg(feature = "dim3")]
use parry3d::shape::Shape;
use wgcore::kernel::{CommandEncoderExt, KernelDispatch};
use wgcore::tensor::GpuVector;
use wgcore::{gpu::GpuInstance, Shader};
use wgpu::{Buffer, BufferUsages, Device};
fn test_pipeline<Sh: Shader>(
device: &Device,
shader_shape_type: &str,
) -> wgpu::ComputePipeline {
let test_kernel = substitute_aliases(&format!(
r#"
struct ProjectionResultHostShareable {{
point: Vector,
is_inside: u32,
}}
@group(0) @binding(0)
var<storage, read> test_shapes: array<{shader_shape_type}>;
@group(0) @binding(1)
var<storage, read> test_points: array<Vector>;
@group(0) @binding(2)
var<storage, read_write> projs: array<Vector>;
@group(0) @binding(3)
var<storage, read_write> projs_on_boundary: array<ProjectionResultHostShareable>;
@compute @workgroup_size(1, 1, 1)
fn test(@builtin(global_invocation_id) invocation_id: vec3<u32>) {{
let i = invocation_id.x;
let point = test_points[i];
projs[i] = projectLocalPoint(test_shapes[i], point);
let proj = projectLocalPointOnBoundary(test_shapes[i], point);
projs_on_boundary[i] = ProjectionResultHostShareable(proj.point, u32(proj.is_inside));
}}
"#
));
let src = format!("{}\n{}", Sh::src(), test_kernel);
let module = Sh::composer()
.unwrap()
.make_naga_module(naga_oil::compose::NagaModuleDescriptor {
source: &src,
file_path: Sh::FILE_PATH,
shader_defs: dim_shader_defs(),
..Default::default()
})
.unwrap();
wgcore::utils::load_module(device, "test", module)
}
pub async fn test_point_projection<Sh: Shader, S: Shape + Copy>(
shader_shape_type: &str,
shape: S,
shape_buffer: impl FnOnce(&Device, &[S], BufferUsages) -> Buffer,
) {
let gpu = GpuInstance::new().await.unwrap();
let wg_ball = test_pipeline::<Sh>(gpu.device(), shader_shape_type);
let mut encoder = gpu.device().create_command_encoder(&Default::default());
const LEN: u32 = 30;
let mut points = vec![];
let aabb = shape.compute_local_aabb();
let step = aabb.half_extents() * 4.0 / (LEN as f32);
let nk = if cfg!(feature = "dim2") { 1 } else { LEN };
for i in 0..LEN {
for j in 0..LEN {
for _k in 0..nk {
let origin = aabb.mins.coords * 2.0;
#[cfg(feature = "dim2")]
let pt = vector![i as f32, j as f32].component_mul(&step) + origin;
#[cfg(feature = "dim3")]
let pt = (vector![i as f32, j as f32, _k as f32].component_mul(&step) + origin)
.push(0.0);
points.push(pt);
}
}
}
#[cfg(feature = "dim2")]
type GpuPoint = na::Point2<f32>;
#[cfg(feature = "dim3")]
type GpuPoint = na::Point4<f32>;
let usages = BufferUsages::STORAGE | BufferUsages::COPY_SRC;
let shapes = vec![shape; (LEN * LEN * LEN) as usize];
let gpu_shapes = shape_buffer(gpu.device(), &shapes, usages);
let gpu_points = GpuVector::init(gpu.device(), &points, usages);
let gpu_projs: GpuVector<GpuPoint> =
GpuVector::uninit(gpu.device(), points.len() as u32, usages);
let gpu_projs_on_boundary: GpuVector<GpuProjectionResult> =
GpuVector::uninit(gpu.device(), points.len() as u32, usages);
let usages = BufferUsages::MAP_READ | BufferUsages::COPY_DST;
let staging_projs: GpuVector<GpuPoint> =
GpuVector::uninit(gpu.device(), points.len() as u32, usages);
let staging_projs_on_boundary: GpuVector<GpuProjectionResult> =
GpuVector::uninit(gpu.device(), points.len() as u32, usages);
let mut pass = encoder.compute_pass("test", None);
KernelDispatch::new(gpu.device(), &mut pass, &wg_ball)
.bind0([
&gpu_shapes,
gpu_points.buffer(),
gpu_projs.buffer(),
gpu_projs_on_boundary.buffer(),
])
.dispatch(points.len() as u32);
drop(pass); // Ensure the pass is ended before the encoder is borrowed again.
staging_projs.copy_from(&mut encoder, &gpu_projs);
staging_projs_on_boundary.copy_from(&mut encoder, &gpu_projs_on_boundary);
gpu.queue().submit(Some(encoder.finish()));
let result_projs = staging_projs.read(gpu.device()).await.unwrap();
let gpu_result_projs_on_boundary =
staging_projs_on_boundary.read(gpu.device()).await.unwrap();
#[cfg(feature = "dim2")]
for (i, pt) in points.iter().enumerate() {
let proj = shape.project_local_point(&(*pt).into(), true);
approx::assert_relative_eq!(proj.point, result_projs[i], epsilon = 1.0e-6);
let proj = shape.project_local_point(&(*pt).into(), false);
if !proj.point.x.is_finite() {
continue;
}
assert_eq!(
proj.is_inside,
gpu_result_projs_on_boundary[i].is_inside != 0
);
approx::assert_relative_eq!(
proj.point,
gpu_result_projs_on_boundary[i].point,
epsilon = 1.0e-6
);
}
#[cfg(feature = "dim3")]
for (i, pt) in points.iter().enumerate() {
let proj = shape.project_local_point(&pt.xyz().into(), true);
approx::assert_relative_eq!(proj.point, result_projs[i].xyz(), epsilon = 1.0e-6);
let proj = shape.project_local_point(&pt.xyz().into(), false);
if !proj.point.x.is_finite() {
continue;
}
assert_eq!(
proj.is_inside,
gpu_result_projs_on_boundary[i].is_inside != 0
);
approx::assert_relative_eq!(
proj.point,
gpu_result_projs_on_boundary[i].point,
epsilon = 1.0e-6
);
}
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/queries/ray.rs | crates/wgparry/src/queries/ray.rs | //! Ray data structure and shader for ray-casting queries.
use wgcore::Shader;
#[derive(Shader)]
#[shader(src = "ray.wgsl")]
/// GPU shader defining the ray data structure for ray-casting queries.
///
/// This shader provides the WGSL definition of a ray, which consists of:
/// - **Origin**: The starting point of the ray.
/// - **Direction**: The direction vector (typically normalized).
pub struct WgRay;
wgcore::test_shader_compilation!(WgRay);
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/queries/mod.rs | crates/wgparry/src/queries/mod.rs | //! Geometric query operations for collision detection and physics simulation.
//!
//! This module provides GPU-accelerated geometric algorithms for:
//! - **Ray-casting**: Finding intersections between rays and shapes
//! - **Point projection**: Finding the closest point on a shape's surface
//! - **Contact generation**: Computing contact points and normals for collision response
//! - **Separating axis tests**: Efficient collision detection between convex shapes
mod contact;
mod gjk;
mod polygonal_feature;
mod projection;
mod ray;
mod sat;
pub use contact::*;
pub use polygonal_feature::*;
pub use projection::*;
pub use ray::*;
pub use sat::*;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/queries/polygonal_feature.rs | crates/wgparry/src/queries/polygonal_feature.rs | //! Polygonal feature extraction and contact manifold generation.
//!
//! Polygonal features (faces, edges, vertices) are geometric primitives extracted from
//! shapes like cuboids for contact manifold generation. This module implements the
//! feature-clipping algorithm for generating multi-point contact manifolds in one shot.
//!
//! # Feature Clipping
//!
//! When two polyhedra collide, the algorithm:
//! 1. Identifies the colliding features (faces, edges, or vertices).
//! 2. Clips the incident feature against the reference feature's boundaries.
//! 3. Generates contact points where the features overlap.
use super::WgContactManifold;
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgContactManifold),
src = "polygonal_feature.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for polygonal feature extraction and contact manifold generation.
///
/// This shader implements algorithms for:
/// - Extracting geometric features (faces, edges, vertices) from polygonal shapes.
/// - Clipping features against each other to find contact points.
/// - Generating contact manifolds with multiple points for stability.
///
/// # Feature Types
///
/// - **Face**: A flat polygonal surface (most common collision feature).
/// - **Edge**: A line segment (edge-edge contacts in 3D).
/// - **Vertex**: A point (vertex-face or vertex-edge contacts).
///
/// # Contact Generation Process
///
/// 1. Identify which features are colliding (face-face, edge-edge, etc.).
/// 2. Choose a reference feature and an incident feature.
/// 3. Clip the incident feature against the reference feature's boundaries.
/// 4. Keep points that penetrate the reference feature as contact points.
pub struct WgPolygonalFeature;
wgcore::test_shader_compilation!(WgPolygonalFeature, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/queries/gjk/mod.rs | crates/wgparry/src/queries/gjk/mod.rs | use crate::queries::WgProjection;
use crate::shapes::{WgSegment, WgShape, WgTriangle};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
#[cfg(feature = "dim3")]
use crate::shapes::WgTetrahedron;
#[derive(Shader)]
#[shader(
src = "cso_point.wgsl",
shader_defs = "dim_shader_defs",
src_fn = "substitute_aliases"
)]
pub struct WgCsoPoint;
#[derive(Shader)]
#[shader(
derive(WgCsoPoint, WgVoronoiSimplex, WgShape),
src = "gjk.wgsl",
shader_defs = "dim_shader_defs",
src_fn = "substitute_aliases"
)]
pub struct WgGjk;
#[derive(Shader)]
#[cfg_attr(
feature = "dim2",
shader(
derive(WgCsoPoint, WgSegment, WgTriangle, WgProjection),
src = "voronoi_simplex2.wgsl",
shader_defs = "dim_shader_defs",
src_fn = "substitute_aliases"
)
)]
#[cfg_attr(
feature = "dim3",
shader(
derive(WgCsoPoint, WgSegment, WgTriangle, WgTetrahedron, WgProjection),
src = "voronoi_simplex3.wgsl",
shader_defs = "dim_shader_defs",
src_fn = "substitute_aliases"
)
)]
pub struct WgVoronoiSimplex;
#[derive(Shader)]
#[cfg_attr(
feature = "dim2",
shader(
derive(WgCsoPoint, WgVoronoiSimplex, WgShape, WgGjk),
src = "epa2.wgsl",
shader_defs = "dim_shader_defs",
src_fn = "substitute_aliases"
)
)]
#[cfg_attr(
feature = "dim3",
shader(
derive(WgCsoPoint, WgVoronoiSimplex, WgShape, WgGjk),
src = "epa3.wgsl",
shader_defs = "dim_shader_defs",
src_fn = "substitute_aliases"
)
)]
pub struct WgEpa;
wgcore::test_shader_compilation!(WgVoronoiSimplex);
wgcore::test_shader_compilation!(WgGjk, wgcore, crate::dim_shader_defs());
wgcore::test_shader_compilation!(WgCsoPoint);
wgcore::test_shader_compilation!(WgEpa, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/bounding_volumes/aabb.rs | crates/wgparry/src/bounding_volumes/aabb.rs | //! Axis-Aligned Bounding Box (AABB) implementation.
//!
//! An AABB is the simplest and most widely used bounding volume, defined by its
//! minimum and maximum corner points along each coordinate axis. AABBs are not
//! rotated with the objects they bound; instead, they expand to accommodate rotation.
//!
//! # Properties
//!
//! - **Axis-aligned**: Edges are always parallel to coordinate axes.
//! - **Conservative**: Always contains the entire object (may have empty space).
//! - **Fast overlap test**: Just compare min/max coordinates (6 comparisons in 3D).
//! - **Fast to compute**: For most shapes, AABB computation is very efficient.
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::{test_shader_compilation, Shader};
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2),
src = "./aabb.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for computing and manipulating Axis-Aligned Bounding Boxes.
///
/// This shader provides functions for:
/// - AABB overlap testing.
/// - AABB merging and manipulation.
pub struct WgAabb;
test_shader_compilation!(WgAabb, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/bounding_volumes/mod.rs | crates/wgparry/src/bounding_volumes/mod.rs | //! Bounding volume data structures for collision detection acceleration.
//!
//! Bounding volumes are simple geometric shapes that enclose more complex objects.
//! They enable fast broad-phase collision detection by providing cheap overlap tests
//! before performing expensive narrow-phase collision detection.
//!
//! Only Axis-Aligned Bounding Boxes (AABB) are provided at the moment.
mod aabb;
pub use aabb::*;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/convex_polyhedron.rs | crates/wgparry/src/shapes/convex_polyhedron.rs | //! Convex polyhedron shape.
use crate::bounding_volumes::WgAabb;
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::shapes::WgVtxIdx;
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(
WgSim3,
WgSim2,
WgRay,
WgProjection,
WgPolygonalFeature,
WgAabb,
WgVtxIdx
),
src = "convex_polyhedron.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the convex polyhedron shape.
///
/// The convex polyhedron is defined by a vertex buffer and a triangle index buffer.
/// Both convex polyhedrons and triangle meshes share the same GPU buffer for storing their
/// vertices and indices.
pub struct WgConvexPolyhedron;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/shape.rs | crates/wgparry/src/shapes/shape.rs | //! Unified shape representation for GPU-accelerated collision detection.
//!
//! This module provides [`GpuShape`], a compact, GPU-friendly representation that can
//! encode any supported geometric primitive. It uses a tagged union approach where
//! shape data is packed into two `Vector4<f32>` values, with the shape type encoded
//! in the tag field.
//!
//! # Shape Encoding
//!
//! Different shapes pack their parameters differently:
//! - **Ball**: `a = [radius, _, _, tag]`
//! - **Cuboid**: `a = [hx, hy, hz, tag]` (half-extents)
//! - **Capsule**: `a = [ax, ay, az, tag]`, `b = [bx, by, bz, radius]` (segment endpoints + radius)
//! - **Cone**: `a = [half_height, radius, _, tag]`
//! - **Cylinder**: `a = [half_height, radius, _, tag]`
//! - **Polyline/TriMesh**: `a = [range_start, range_end, _, tag]` (vertex buffer indices)
//!
//! The tag is stored as the `w` component of the first vector using bit-casting to preserve
//! the `f32` representation while encoding a `u32` shape type identifier.
use crate::bounding_volumes::WgAabb;
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::shapes::{
WgBall, WgCapsule, WgConvexPolyhedron, WgCuboid, WgPolyline, WgTriMesh, WgTriangle,
};
use crate::{dim_shader_defs, substitute_aliases};
use na::{vector, Vector4};
use parry::bounding_volume::Aabb;
use parry::shape::{Ball, Cuboid, Shape, ShapeType, TypedShape};
use wgcore::{test_shader_compilation, Shader};
use wgebra::{WgSim2, WgSim3};
use crate::math::{Point, Vector, DIM};
#[cfg(feature = "dim3")]
use crate::shapes::cone::WgCone;
#[cfg(feature = "dim3")]
use crate::shapes::cylinder::WgCylinder;
/// Shape type identifiers for GPU representation.
///
/// These numeric values are encoded in the tag field of [`GpuShape`] and must match
/// the corresponding values in `shape.wgsl`. The values are bit-cast as `f32` for
/// storage in GPU buffers.
pub enum GpuShapeType {
/// Ball/sphere shape (type ID = 0)
Ball = 0,
/// Cuboid/box shape (type ID = 1)
Cuboid = 1,
/// Capsule shape (type ID = 2)
Capsule = 2,
#[cfg(feature = "dim3")]
/// Cone shape, 3D only (type ID = 3)
Cone = 3,
#[cfg(feature = "dim3")]
/// Cylinder shape, 3D only (type ID = 4)
Cylinder = 4,
/// Polyline - sequence of connected line segments (type ID = 5)
// TODO: not sure we want to keep the Polyline in the shape type.
Polyline = 5,
/// Triangle mesh (type ID = 6)
TriMesh = 6,
/// A convex polygon or polyhedron (type ID = 7)
ConvexPoly = 7,
}
/// Auxiliary buffers for complex shape types like polylines and triangle meshes.
///
/// Some shapes (polylines and triangle meshes) reference external vertex data
/// rather than storing all data inline. This struct holds those vertex buffers.
#[derive(Default, Clone, Debug)]
pub struct ShapeBuffers {
/// Vertex buffer for polylines and triangle meshes.
///
/// Polyline and TriMesh shapes reference ranges within this buffer.
/// The shape stores the start and end indices of its vertices in this buffer.
pub vertices: Vec<Point<f32>>,
/// Index buffers for polylines, triangle meshes, and convex polyhedrons.
pub indices: Vec<u32>,
}
/// GPU-compatible shape representation using a tagged union encoded in two `Vector4<f32>` values.
///
/// This struct provides a compact, cache-friendly representation of various geometric
/// primitives suitable for GPU buffer storage. The shape type is encoded in the `w`
/// component of the first vector as a bit-cast `u32` value.
///
/// # Memory Layout
///
/// The struct is `#[repr(C)]` with `bytemuck::Pod` for direct GPU buffer uploads.
/// Total size: 32 bytes (2 × `Vector4<f32>`).
///
/// # Supported Operations
///
/// - Construct shapes from primitive parameters (e.g., [`ball`](Self::ball), [`cuboid`](Self::cuboid))
/// - Convert from parry shape types via [`from_parry`](Self::from_parry)
/// - Query shape type with [`shape_type`](Self::shape_type)
/// - Extract typed shapes with [`to_ball`](Self::to_ball), [`to_cuboid`](Self::to_cuboid), etc.
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
#[repr(C)]
pub struct GpuShape {
a: Vector4<f32>,
b: Vector4<f32>,
// Needed for triangles. While we don’t construct explicitly triangle shapes on the rust side,
// this is useful on the GPU so that collision-detection involving triangle meshes can then
// call the pfm-pfm collision-detection (which relies on dynamic dispatch) by storing the
// triangle as a Shape.
c: Vector4<f32>,
}
impl GpuShape {
/// Creates a ball (sphere/circle) shape.
///
/// # Parameters
///
/// - `radius`: The radius of the ball
///
/// # Returns
///
/// A [`GpuShape`] encoding a ball with the specified radius
pub fn ball(radius: f32) -> Self {
let tag = f32::from_bits(GpuShapeType::Ball as u32);
Self {
a: vector![radius, 0.0, 0.0, tag],
b: vector![0.0, 0.0, 0.0, 0.0],
c: vector![0.0, 0.0, 0.0, 0.0],
}
}
/// Creates a cuboid (box/rectangle) shape.
///
/// # Parameters
///
/// - `half_extents`: Half-widths along each axis (vec2 for 2D, vec3 for 3D)
///
/// # Returns
///
/// A [`GpuShape`] encoding a cuboid with the specified dimensions
pub fn cuboid(half_extents: Vector<f32>) -> Self {
let tag = f32::from_bits(GpuShapeType::Cuboid as u32);
Self {
#[cfg(feature = "dim2")]
a: vector![half_extents.x, half_extents.y, 0.0, tag],
#[cfg(feature = "dim3")]
a: vector![half_extents.x, half_extents.y, half_extents.z, tag],
b: vector![0.0, 0.0, 0.0, 0.0],
c: vector![0.0, 0.0, 0.0, 0.0],
}
}
/// Creates a capsule shape.
///
/// # Parameters
///
/// - `a`: First endpoint of the capsule's central segment
/// - `b`: Second endpoint of the capsule's central segment
/// - `radius`: Radius of the capsule (distance from segment to surface)
///
/// # Returns
///
/// A [`GpuShape`] encoding a capsule with the specified parameters
pub fn capsule(a: Point<f32>, b: Point<f32>, radius: f32) -> Self {
let tag = f32::from_bits(GpuShapeType::Capsule as u32);
#[cfg(feature = "dim2")]
return Self {
a: vector![a.x, a.y, 0.0, tag],
b: vector![b.x, b.y, 0.0, radius],
c: vector![0.0, 0.0, 0.0, 0.0],
};
#[cfg(feature = "dim3")]
return Self {
a: vector![a.x, a.y, a.z, tag],
b: vector![b.x, b.y, b.z, radius],
c: vector![0.0, 0.0, 0.0, 0.0],
};
}
/// Creates a polyline shape from a range of vertices.
///
/// A polyline is a connected sequence of line segments defined by vertices.
///
/// # Parameters
///
/// - `vertex_range`: Start and end indices into the vertex buffer
pub fn polyline(
bvh_vtx_root_id: u32,
bvh_idx_root_id: u32,
bvh_node_len: u32,
aabb: Aabb,
) -> Self {
let tag = f32::from_bits(GpuShapeType::Polyline as u32);
let a0 = f32::from_bits(bvh_vtx_root_id);
let a1 = f32::from_bits(bvh_idx_root_id);
let a2 = f32::from_bits(bvh_node_len);
#[cfg(feature = "dim2")]
return Self {
a: vector![a0, a1, a2, tag],
b: vector![aabb.mins.x, aabb.mins.y, 0.0, 0.0],
c: vector![aabb.maxs.x, aabb.maxs.y, 0.0, 0.0],
};
#[cfg(feature = "dim3")]
return Self {
a: vector![a0, a1, a2, tag],
b: aabb.mins.coords.push(0.0),
c: aabb.maxs.coords.push(0.0),
};
}
/// Creates a triangle mesh shape from a range of vertices.
///
/// A trimesh is a collection of triangles sharing vertices.
pub fn trimesh(
bvh_vtx_root_id: u32,
bvh_idx_root_id: u32,
bvh_node_len: u32,
aabb: Aabb,
) -> Self {
let tag = f32::from_bits(GpuShapeType::TriMesh as u32);
let a0 = f32::from_bits(bvh_vtx_root_id);
let a1 = f32::from_bits(bvh_idx_root_id);
let a2 = f32::from_bits(bvh_node_len);
#[cfg(feature = "dim2")]
return Self {
a: vector![a0, a1, a2, tag],
b: vector![aabb.mins.x, aabb.mins.y, 0.0, 0.0],
c: vector![aabb.maxs.x, aabb.maxs.y, 0.0, 0.0],
};
#[cfg(feature = "dim3")]
return Self {
a: vector![a0, a1, a2, tag],
b: aabb.mins.coords.push(0.0),
c: aabb.maxs.coords.push(0.0),
};
}
/// Creates a convex polyhedron definition from its vertex and index buffer anges.
pub fn convex_poly(
first_vtx_id: u32,
end_vtx_id: u32,
first_face_id: u32,
end_face_id: u32,
) -> Self {
let tag = f32::from_bits(GpuShapeType::ConvexPoly as u32);
let a0 = f32::from_bits(first_vtx_id);
let a1 = f32::from_bits(end_vtx_id);
let b0 = f32::from_bits(first_face_id);
let b1 = f32::from_bits(end_face_id);
Self {
a: vector![a0, a1, 0.0, tag],
b: vector![b0, b1, 0.0, 0.0],
c: vector![0.0, 0.0, 0.0, 0.0],
}
}
/// Creates a cone shape (3D only).
///
/// # Parameters
///
/// - `half_height`: Half the height of the cone (from base to apex)
/// - `radius`: Radius of the cone's base
#[cfg(feature = "dim3")]
pub fn cone(half_height: f32, radius: f32) -> Self {
let tag = f32::from_bits(GpuShapeType::Cone as u32);
Self {
a: vector![half_height, radius, 0.0, tag],
b: vector![0.0, 0.0, 0.0, 0.0],
c: vector![0.0, 0.0, 0.0, 0.0],
}
}
/// Creates a cylinder shape (3D only).
///
/// # Parameters
///
/// - `half_height`: Half the height of the cylinder (distance from center to end caps)
/// - `radius`: Radius of the cylinder
#[cfg(feature = "dim3")]
pub fn cylinder(half_height: f32, radius: f32) -> Self {
let tag = f32::from_bits(GpuShapeType::Cylinder as u32);
Self {
a: vector![half_height, radius, 0.0, tag],
b: vector![0.0, 0.0, 0.0, 0.0],
c: vector![0.0, 0.0, 0.0, 0.0],
}
}
/// Converts a parry shape to a [`GpuShape`].
///
/// This method handles conversion from parry's CPU-side shape types to the GPU-compatible
/// representation. For complex shapes like polylines and triangle meshes, vertex data is
/// appended to the provided buffers.
///
/// # Parameters
///
/// - `shape`: The parry shape to convert
/// - `buffers`: Vertex buffers for storing polyline/mesh vertex data
///
/// # Returns
///
/// - `Some(GpuShape)` if the shape type is supported
/// - `None` if the shape type is not yet supported on GPU
///
/// # Supported Shape Types
///
/// - Ball, Cuboid, Capsule (primitives)
/// - Cone, Cylinder (3D only)
/// - Polyline, TriMesh, HeightField (complex shapes stored as vertex ranges)
pub fn from_parry(shape: &(impl Shape + ?Sized), buffers: &mut ShapeBuffers) -> Option<Self> {
match shape.as_typed_shape() {
TypedShape::Ball(shape) => Some(Self::ball(shape.radius)),
TypedShape::Cuboid(shape) => Some(Self::cuboid(shape.half_extents)),
TypedShape::Capsule(shape) => Some(Self::capsule(
shape.segment.a,
shape.segment.b,
shape.radius,
)),
TypedShape::Polyline(shape) => {
let bvh_vtx_root_id = buffers.vertices.len();
let bvh_idx_root_id = buffers.indices.len();
// Append the BVH data to the vertex/index buffers.
// TODO: we are constructing a BVH using the `bvh` crate.
// While the Polyline shape technically already has a BVH, parry’s BVH
// doesn’t provide explicit access to the BVH topology. So, for now,
// let’s just build a new BVH that exposes its internal.
struct BvhObject {
aabb: bvh::aabb::Aabb<f32, DIM>,
node_index: usize,
}
impl bvh::aabb::Bounded<f32, DIM> for BvhObject {
fn aabb(&self) -> bvh::aabb::Aabb<f32, DIM> {
self.aabb
}
}
impl bvh::bounding_hierarchy::BHShape<f32, DIM> for BvhObject {
fn set_bh_node_index(&mut self, index: usize) {
self.node_index = index;
}
fn bh_node_index(&self) -> usize {
self.node_index
}
}
let mut objects: Vec<_> = shape
.segments()
.map(|tri| {
let aabb = tri.local_aabb();
BvhObject {
aabb: bvh::aabb::Aabb::with_bounds(aabb.mins, aabb.maxs),
node_index: 0,
}
})
.collect();
let bvh = bvh::bvh::Bvh::build(&mut objects);
let flat_bvh = bvh.flatten();
buffers
.vertices
.extend(flat_bvh.iter().flat_map(|n| [n.aabb.min, n.aabb.max]));
let bvh_node_len = flat_bvh.len();
buffers.indices.extend(
flat_bvh
.iter()
.flat_map(|n| [n.entry_index, n.exit_index, n.shape_index]),
);
// Append the actual mesh vertex/index buffers.
buffers.vertices.extend_from_slice(shape.vertices());
buffers
.indices
.extend(shape.indices().iter().flat_map(|seg| seg.iter().copied()));
Some(Self::polyline(
bvh_vtx_root_id as u32,
bvh_idx_root_id as u32,
bvh_node_len as u32,
shape.local_aabb(),
))
}
TypedShape::TriMesh(shape) => {
let bvh_vtx_root_id = buffers.vertices.len();
let bvh_idx_root_id = buffers.indices.len();
// Append the BVH data to the vertex/index buffers.
// TODO: we are constructing a BVH using the `bvh` crate.
// While the TriMesh shape technically already has a BVH, parry’s BVH
// doesn’t provide explicit access to the BVH topology. So, for now,
// let’s just build a new BVH that exposes its internal.
struct BvhObject {
aabb: bvh::aabb::Aabb<f32, DIM>,
node_index: usize,
}
impl bvh::aabb::Bounded<f32, DIM> for BvhObject {
fn aabb(&self) -> bvh::aabb::Aabb<f32, DIM> {
self.aabb
}
}
impl bvh::bounding_hierarchy::BHShape<f32, DIM> for BvhObject {
fn set_bh_node_index(&mut self, index: usize) {
self.node_index = index;
}
fn bh_node_index(&self) -> usize {
self.node_index
}
}
let mut objects: Vec<_> = shape
.triangles()
.map(|tri| {
let aabb = tri.local_aabb();
BvhObject {
aabb: bvh::aabb::Aabb::with_bounds(aabb.mins, aabb.maxs),
node_index: 0,
}
})
.collect();
let bvh = bvh::bvh::Bvh::build(&mut objects);
let flat_bvh = bvh.flatten();
buffers
.vertices
.extend(flat_bvh.iter().flat_map(|n| [n.aabb.min, n.aabb.max]));
let bvh_node_len = flat_bvh.len();
buffers.indices.extend(
flat_bvh
.iter()
.flat_map(|n| [n.entry_index, n.exit_index, n.shape_index]),
);
// Append the actual mesh vertex/index buffers.
buffers.vertices.extend_from_slice(shape.vertices());
buffers
.indices
.extend(shape.indices().iter().flat_map(|tri| tri.iter().copied()));
Some(Self::trimesh(
bvh_vtx_root_id as u32,
bvh_idx_root_id as u32,
bvh_node_len as u32,
shape.local_aabb(),
))
}
#[cfg(feature = "dim2")]
TypedShape::ConvexPolygon(poly) => {
let first_vtx_id = buffers.vertices.len() as u32;
buffers.vertices.extend_from_slice(poly.points());
let end_vtx_id = buffers.vertices.len() as u32;
// NOTE: face ids are not relevant for 2D convex polyhedron since it dosn’t have
// an index buffer.
Some(Self::convex_poly(first_vtx_id, end_vtx_id, 0, 0))
}
#[cfg(feature = "dim3")]
TypedShape::ConvexPolyhedron(poly) => {
let first_vtx_id = buffers.vertices.len();
let first_face_id = buffers.indices.len();
let all_idx = poly.vertices_adj_to_face();
buffers.vertices.extend_from_slice(poly.points());
for face in poly.faces() {
let id = face.first_vertex_or_edge as usize;
if face.num_vertices_or_edges < 3 {
println!("found convex poly with degenerate faces?")
} else {
buffers.indices.push(all_idx[id]);
buffers.indices.push(all_idx[id + 1]);
buffers.indices.push(all_idx[id + 2]);
}
}
let end_vtx_id = buffers.vertices.len();
let end_face_id = buffers.indices.len();
Some(Self::convex_poly(
first_vtx_id as u32,
end_vtx_id as u32,
first_face_id as u32,
end_face_id as u32,
))
}
// HACK: we currently emulate heightfields as trimeshes or polylines
#[cfg(feature = "dim2")]
TypedShape::HeightField(_shape) => {
todo!()
}
#[cfg(feature = "dim3")]
TypedShape::HeightField(_shape) => {
todo!()
}
#[cfg(feature = "dim3")]
TypedShape::Cone(shape) => Some(Self::cone(shape.half_height, shape.radius)),
#[cfg(feature = "dim3")]
TypedShape::Cylinder(shape) => Some(Self::cylinder(shape.half_height, shape.radius)),
_ => None,
}
}
/// Returns the shape type of this [`GpuShape`].
///
/// Extracts the shape type tag from the `w` component of the first vector.
///
/// # Returns
///
/// The [`ShapeType`] corresponding to the encoded shape
pub fn shape_type(&self) -> ShapeType {
let tag = self.a.w.to_bits();
match tag {
0 => ShapeType::Ball,
1 => ShapeType::Cuboid,
2 => ShapeType::Capsule,
#[cfg(feature = "dim3")]
3 => ShapeType::Cone,
#[cfg(feature = "dim3")]
4 => ShapeType::Cylinder,
5 => ShapeType::Polyline,
6 => ShapeType::TriMesh,
_ => panic!("Unknown shape type: {}", tag),
}
}
/// Extracts a [`Ball`] if this shape is a ball.
///
/// # Returns
///
/// - `Some(Ball)` if the shape type is Ball
/// - `None` otherwise
pub fn to_ball(&self) -> Option<Ball> {
(self.shape_type() == ShapeType::Ball).then_some(Ball::new(self.a.x))
}
/// Extracts a [`Cuboid`] if this shape is a cuboid.
///
/// # Returns
///
/// - `Some(Cuboid)` if the shape type is Cuboid
/// - `None` otherwise
pub fn to_cuboid(&self) -> Option<Cuboid> {
#[cfg(feature = "dim2")]
return (self.shape_type() == ShapeType::Cuboid).then_some(Cuboid::new(self.a.xy()));
#[cfg(feature = "dim3")]
return (self.shape_type() == ShapeType::Cuboid).then_some(Cuboid::new(self.a.xyz()));
}
/// Returns the vertex buffer range for a polyline shape.
///
/// # Returns
///
/// A `[start, end)` index range into the vertex buffer
///
/// # Panics
///
/// Panics if this shape is not a polyline
pub fn polyline_rngs(&self) -> [u32; 2] {
assert!(self.shape_type() == ShapeType::Polyline);
[self.a.x.to_bits(), self.a.y.to_bits()]
}
/// Returns the vertex buffer range for a triangle mesh shape.
///
/// # Returns
///
/// A `[start, end)` index range into the vertex buffer
///
/// # Panics
///
/// Panics if this shape is not a triangle mesh
pub fn trimesh_rngs(&self) -> [u32; 2] {
assert!(self.shape_type() == ShapeType::TriMesh);
[self.a.x.to_bits(), self.a.y.to_bits()]
}
}
#[cfg(feature = "dim2")]
#[derive(Shader)]
#[shader(src = "shape_fake_cone.wgsl")]
/// Fake cone shader for 2D builds to satisfy shader composition dependencies.
///
/// In 2D builds, cones don't exist but are still referenced by the unified shape shader.
/// This provides a stub implementation to prevent compilation errors.
struct WgCone;
#[cfg(feature = "dim2")]
#[derive(Shader)]
#[shader(src = "shape_fake_cylinder.wgsl")]
/// Fake cylinder shader for 2D builds to satisfy shader composition dependencies.
///
/// In 2D builds, cylinders don't exist but are still referenced by the unified shape shader.
/// This provides a stub implementation to prevent compilation errors.
struct WgCylinder;
#[derive(Shader)]
#[shader(
derive(
WgSim3,
WgSim2,
WgRay,
WgProjection,
WgBall,
WgCapsule,
WgCone,
WgCuboid,
WgCylinder,
WgPolygonalFeature,
WgConvexPolyhedron,
WgTriangle,
WgTriMesh,
WgPolyline,
WgAabb,
),
src = "shape.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// Unified GPU shader for all shape types.
///
/// This shader provides a unified interface for geometric queries on any supported
/// shape type. It uses dynamic dispatch based on the shape's type tag to call the
/// appropriate specialized shader implementation.
pub struct WgShape;
test_shader_compilation!(WgShape, wgcore, crate::dim_shader_defs());
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/cone.rs | crates/wgparry/src/shapes/cone.rs | //! Cone shape (3D only).
//!
//! A cone is defined by a half-height (distance from base to apex) and a base radius.
//! The cone is centered at the origin with its axis aligned along the Y-axis, with the
//! apex pointing in the positive Y direction.
//!
//! **Note:** This shape is only available with the `dim3` feature.
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::shapes::segment::WgSegment;
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgRay, WgProjection, WgSegment, WgPolygonalFeature),
src = "cone.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the cone shape (3D only).
///
/// This shader provides WGSL implementations for:
/// - Ray-casting against cones
/// - Point projection onto cone surfaces
///
/// A cone is parameterized by:
/// - Half-height: Distance from the center to the apex (and to the base)
/// - Radius: Radius of the circular base
///
/// The cone is aligned along the Y-axis with the apex at `+half_height` and
/// the base centered at `-half_height`.
pub struct WgCone;
#[cfg(test)]
mod test {
use super::WgCone;
use parry::shape::Cone;
use wgcore::tensor::GpuVector;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_cone() {
crate::queries::test_utils::test_point_projection::<WgCone, _>(
"Cone",
Cone::new(1.0, 0.5),
|device, shapes, usages| GpuVector::init(device, shapes, usages).into_inner(),
)
.await;
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/polyline.rs | crates/wgparry/src/shapes/polyline.rs | //! Triangle mesh shape.
use crate::bounding_volumes::WgAabb;
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::shapes::{WgConvexPolyhedron, WgSegment, WgVtxIdx};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(
WgSim3,
WgSim2,
WgRay,
WgProjection,
WgPolygonalFeature,
WgAabb,
WgSegment,
WgConvexPolyhedron,
WgVtxIdx,
),
src = "polyline.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the polyline shape.
///
/// The polyline is defined by a BVH, vertex buffer and a triangle index buffer.
/// The BVH is stored as part of the vertex and index buffer, before the actual vertices and indices.
/// It stores two vectors and one index per AABB.
pub struct WgPolyline;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/cuboid.rs | crates/wgparry/src/shapes/cuboid.rs | //! Cuboid shape - box (3D) or rectangle (2D).
//!
//! A cuboid is an axis-aligned box defined by its half-extents (half-widths along each axis).
//! In 2D, this represents a rectangle; in 3D, a rectangular prism. The cuboid is centered
//! at the origin in local space.
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgRay, WgProjection, WgPolygonalFeature),
src = "cuboid.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the cuboid (box/rectangle) shape.
///
/// The cuboid is defined by half-extents (a vector of half-widths along each axis) and is
/// axis-aligned in local space. Use transformations to rotate and position cuboids.
pub struct WgCuboid;
#[cfg(test)]
mod test {
use super::WgCuboid;
use na::vector;
#[cfg(feature = "dim2")]
use parry2d::shape::Cuboid;
#[cfg(feature = "dim3")]
use parry3d::shape::Cuboid;
use wgcore::tensor::GpuVector;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_cuboid() {
crate::queries::test_utils::test_point_projection::<WgCuboid, _>(
"Cuboid",
#[cfg(feature = "dim2")]
Cuboid::new(vector![1.0, 2.0]),
#[cfg(feature = "dim3")]
Cuboid::new(vector![1.0, 2.0, 3.0]),
|device, shapes, usages| GpuVector::encase(device, shapes, usages).into_inner(),
)
.await;
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/tetrahedron.rs | crates/wgparry/src/shapes/tetrahedron.rs | //! Tetrahedron shape.
//!
//! A tetrahedron is defined by three vertices (A, B, C).
use crate::queries::WgProjection;
use crate::substitute_aliases;
use wgcore::Shader;
#[derive(Shader)]
#[shader(
derive(WgProjection),
src = "tetrahedron.wgsl",
src_fn = "substitute_aliases"
)]
/// GPU shader for the tetrahedron shape.
pub struct WgTetrahedron;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/vtx_idx.rs | crates/wgparry/src/shapes/vtx_idx.rs | //! Buffer bindings for all the complex shapes requiring an index and vertex buffer
//! (trimesh, convex polyhedrons, etc.)
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
#[derive(Shader)]
#[shader(
src = "vtx_idx.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// Buffer bindings for index-buffers and vertex-buffers.
pub struct WgVtxIdx;
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
wgmath/wgmath | https://github.com/wgmath/wgmath/blob/ac89dceb8a33778e895303cd606da38afb015192/crates/wgparry/src/shapes/cylinder.rs | crates/wgparry/src/shapes/cylinder.rs | //! Cylinder shape (3D only).
//!
//! A cylinder is defined by a half-height and a radius. The cylinder is centered at
//! the origin with its axis aligned along the Y-axis.
//!
//! **Note:** This shape is only available with the `dim3` feature.
use crate::queries::{WgPolygonalFeature, WgProjection, WgRay};
use crate::{dim_shader_defs, substitute_aliases};
use wgcore::Shader;
use wgebra::{WgSim2, WgSim3};
#[derive(Shader)]
#[shader(
derive(WgSim3, WgSim2, WgRay, WgProjection, WgPolygonalFeature),
src = "cylinder.wgsl",
src_fn = "substitute_aliases",
shader_defs = "dim_shader_defs"
)]
/// GPU shader for the cylinder shape (3D only).
///
/// This shader provides WGSL implementations for:
/// - Ray-casting against cylinders.
/// - Point projection onto cylinder surfaces.
///
/// A cylinder is parameterized by:
/// - Half-height: Distance from the center to each flat circular end.
/// - Radius: Radius of the circular cross-section.
///
/// The cylinder is aligned along the Y-axis, extending from `-half_height` to
/// `+half_height`, with circular caps at both ends.
pub struct WgCylinder;
#[cfg(test)]
mod test {
use super::WgCylinder;
use parry::shape::Cylinder;
use wgcore::tensor::GpuVector;
#[futures_test::test]
#[serial_test::serial]
async fn gpu_cylinder() {
crate::queries::test_utils::test_point_projection::<WgCylinder, _>(
"Cylinder",
Cylinder::new(1.0, 0.5),
|device, shapes, usages| GpuVector::init(device, shapes, usages).into_inner(),
)
.await;
}
}
| rust | Apache-2.0 | ac89dceb8a33778e895303cd606da38afb015192 | 2026-01-04T20:20:10.684006Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.