/*
Stockfish, a UCI chess playing engine derived from Glaurung 2.1
Copyright (C) 2004-2026 The Stockfish developers (see AUTHORS file)
Stockfish is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Stockfish is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see .
*/
#ifndef MEMORY_H_INCLUDED
#define MEMORY_H_INCLUDED
#include
#include
#include
#include
#include
#include
#include
#include "types.h"
#if defined(_WIN64)
#if _WIN32_WINNT < 0x0601
#undef _WIN32_WINNT
#define _WIN32_WINNT 0x0601 // Force to include needed API prototypes
#endif
#if !defined(NOMINMAX)
#define NOMINMAX
#endif
#include
// Some Windows headers (RPC/old headers) define short macros such
// as 'small' expanding to 'char', which breaks identifiers in the code.
// Undefine those macros immediately after including .
#ifdef small
#undef small
#endif
#include
extern "C" {
using OpenProcessToken_t = bool (*)(HANDLE, DWORD, PHANDLE);
using LookupPrivilegeValueA_t = bool (*)(LPCSTR, LPCSTR, PLUID);
using AdjustTokenPrivileges_t =
bool (*)(HANDLE, BOOL, PTOKEN_PRIVILEGES, DWORD, PTOKEN_PRIVILEGES, PDWORD);
}
#endif
namespace Stockfish {
void* std_aligned_alloc(size_t alignment, size_t size);
void std_aligned_free(void* ptr);
// Memory aligned by page size, min alignment: 4096 bytes
void* aligned_large_pages_alloc(size_t size);
void aligned_large_pages_free(void* mem);
bool has_large_pages();
// Frees memory which was placed there with placement new.
// Works for both single objects and arrays of unknown bound.
template
void memory_deleter(T* ptr, FREE_FUNC free_func) {
if (!ptr)
return;
// Explicitly needed to call the destructor
if constexpr (!std::is_trivially_destructible_v)
ptr->~T();
free_func(ptr);
}
// Frees memory which was placed there with placement new.
// Works for both single objects and arrays of unknown bound.
template
void memory_deleter_array(T* ptr, FREE_FUNC free_func) {
if (!ptr)
return;
// Move back on the pointer to where the size is allocated
const size_t array_offset = std::max(sizeof(size_t), alignof(T));
char* raw_memory = reinterpret_cast(ptr) - array_offset;
if constexpr (!std::is_trivially_destructible_v)
{
const size_t size = *reinterpret_cast(raw_memory);
// Explicitly call the destructor for each element in reverse order
for (size_t i = size; i-- > 0;)
ptr[i].~T();
}
free_func(raw_memory);
}
// Allocates memory for a single object and places it there with placement new
template
inline std::enable_if_t, T*> memory_allocator(ALLOC_FUNC alloc_func,
Args&&... args) {
void* raw_memory = alloc_func(sizeof(T));
ASSERT_ALIGNED(raw_memory, alignof(T));
return new (raw_memory) T(std::forward(args)...);
}
// Allocates memory for an array of unknown bound and places it there with placement new
template
inline std::enable_if_t, std::remove_extent_t*>
memory_allocator(ALLOC_FUNC alloc_func, size_t num) {
using ElementType = std::remove_extent_t;
const size_t array_offset = std::max(sizeof(size_t), alignof(ElementType));
// Save the array size in the memory location
char* raw_memory =
reinterpret_cast(alloc_func(array_offset + num * sizeof(ElementType)));
ASSERT_ALIGNED(raw_memory, alignof(T));
new (raw_memory) size_t(num);
for (size_t i = 0; i < num; ++i)
new (raw_memory + array_offset + i * sizeof(ElementType)) ElementType();
// Need to return the pointer at the start of the array so that
// the indexing in unique_ptr works.
return reinterpret_cast(raw_memory + array_offset);
}
//
//
// aligned large page unique ptr
//
//
template
struct LargePageDeleter {
void operator()(T* ptr) const { return memory_deleter(ptr, aligned_large_pages_free); }
};
template
struct LargePageArrayDeleter {
void operator()(T* ptr) const { return memory_deleter_array(ptr, aligned_large_pages_free); }
};
template
using LargePagePtr =
std::conditional_t,
std::unique_ptr>>,
std::unique_ptr>>;
// make_unique_large_page for single objects
template
std::enable_if_t, LargePagePtr> make_unique_large_page(Args&&... args) {
static_assert(alignof(T) <= 4096,
"aligned_large_pages_alloc() may fail for such a big alignment requirement of T");
T* obj = memory_allocator(aligned_large_pages_alloc, std::forward(args)...);
return LargePagePtr(obj);
}
// make_unique_large_page for arrays of unknown bound
template
std::enable_if_t, LargePagePtr> make_unique_large_page(size_t num) {
using ElementType = std::remove_extent_t;
static_assert(alignof(ElementType) <= 4096,
"aligned_large_pages_alloc() may fail for such a big alignment requirement of T");
ElementType* memory = memory_allocator(aligned_large_pages_alloc, num);
return LargePagePtr(memory);
}
//
//
// aligned unique ptr
//
//
template
struct AlignedDeleter {
void operator()(T* ptr) const { return memory_deleter(ptr, std_aligned_free); }
};
template
struct AlignedArrayDeleter {
void operator()(T* ptr) const { return memory_deleter_array(ptr, std_aligned_free); }
};
template
using AlignedPtr =
std::conditional_t,
std::unique_ptr>>,
std::unique_ptr>>;
// make_unique_aligned for single objects
template
std::enable_if_t, AlignedPtr> make_unique_aligned(Args&&... args) {
const auto func = [](size_t size) { return std_aligned_alloc(alignof(T), size); };
T* obj = memory_allocator(func, std::forward(args)...);
return AlignedPtr(obj);
}
// make_unique_aligned for arrays of unknown bound
template
std::enable_if_t, AlignedPtr> make_unique_aligned(size_t num) {
using ElementType = std::remove_extent_t;
const auto func = [](size_t size) { return std_aligned_alloc(alignof(ElementType), size); };
ElementType* memory = memory_allocator(func, num);
return AlignedPtr(memory);
}
// Get the first aligned element of an array.
// ptr must point to an array of size at least `sizeof(T) * N + alignment` bytes,
// where N is the number of elements in the array.
template
T* align_ptr_up(T* ptr) {
static_assert(alignof(T) < Alignment);
const uintptr_t ptrint = reinterpret_cast(reinterpret_cast(ptr));
return reinterpret_cast(
reinterpret_cast((ptrint + (Alignment - 1)) / Alignment * Alignment));
}
#if defined(_WIN32)
template
auto windows_try_with_large_page_priviliges([[maybe_unused]] FuncYesT&& fyes, FuncNoT&& fno) {
#if !defined(_WIN64)
return fno();
#else
HANDLE hProcessToken{};
LUID luid{};
const size_t largePageSize = GetLargePageMinimum();
if (!largePageSize)
return fno();
// Dynamically link OpenProcessToken, LookupPrivilegeValue and AdjustTokenPrivileges
HMODULE hAdvapi32 = GetModuleHandle(TEXT("advapi32.dll"));
if (!hAdvapi32)
hAdvapi32 = LoadLibrary(TEXT("advapi32.dll"));
auto OpenProcessToken_f =
OpenProcessToken_t((void (*)()) GetProcAddress(hAdvapi32, "OpenProcessToken"));
if (!OpenProcessToken_f)
return fno();
auto LookupPrivilegeValueA_f =
LookupPrivilegeValueA_t((void (*)()) GetProcAddress(hAdvapi32, "LookupPrivilegeValueA"));
if (!LookupPrivilegeValueA_f)
return fno();
auto AdjustTokenPrivileges_f =
AdjustTokenPrivileges_t((void (*)()) GetProcAddress(hAdvapi32, "AdjustTokenPrivileges"));
if (!AdjustTokenPrivileges_f)
return fno();
// We need SeLockMemoryPrivilege, so try to enable it for the process
if (!OpenProcessToken_f( // OpenProcessToken()
GetCurrentProcess(), TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY, &hProcessToken))
return fno();
if (!LookupPrivilegeValueA_f(nullptr, "SeLockMemoryPrivilege", &luid))
return fno();
TOKEN_PRIVILEGES tp{};
TOKEN_PRIVILEGES prevTp{};
DWORD prevTpLen = 0;
tp.PrivilegeCount = 1;
tp.Privileges[0].Luid = luid;
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED;
// Try to enable SeLockMemoryPrivilege. Note that even if AdjustTokenPrivileges()
// succeeds, we still need to query GetLastError() to ensure that the privileges
// were actually obtained.
if (!AdjustTokenPrivileges_f(hProcessToken, FALSE, &tp, sizeof(TOKEN_PRIVILEGES), &prevTp,
&prevTpLen)
|| GetLastError() != ERROR_SUCCESS)
return fno();
auto&& ret = fyes(largePageSize);
// Privilege no longer needed, restore previous state
AdjustTokenPrivileges_f(hProcessToken, FALSE, &prevTp, 0, nullptr, nullptr);
CloseHandle(hProcessToken);
return std::forward(ret);
#endif
}
#endif
template
T load_as(const ByteT* buffer) {
static_assert(std::is_trivially_copyable::value, "Type must be trivially copyable");
static_assert(sizeof(ByteT) == 1);
T value;
std::memcpy(&value, buffer, sizeof(T));
return value;
}
} // namespace Stockfish
#endif // #ifndef MEMORY_H_INCLUDED