id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8,771
|
kern_k_target_system.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_target_system.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_system_control.hpp>
namespace ams::kern {
class KTargetSystem {
private:
friend class KSystemControlBase;
friend class KSystemControl;
private:
struct KTargetSystemData {
bool is_debug_mode;
bool enable_debug_logging;
bool enable_user_exception_handlers;
bool enable_debug_memory_fill;
bool enable_user_pmu_access;
bool enable_kernel_debugging;
bool enable_dynamic_resource_limits;
};
private:
static inline constinit bool s_is_initialized = false;
static inline constinit const volatile KTargetSystemData s_data = {
.is_debug_mode = true,
.enable_debug_logging = true,
.enable_user_exception_handlers = true,
.enable_debug_memory_fill = true,
.enable_user_pmu_access = true,
.enable_kernel_debugging = true,
.enable_dynamic_resource_limits = false,
};
private:
static ALWAYS_INLINE void SetInitialized() { s_is_initialized = true; }
public:
static ALWAYS_INLINE bool IsDebugMode() { return s_is_initialized && s_data.is_debug_mode; }
static ALWAYS_INLINE bool IsDebugLoggingEnabled() { return s_is_initialized && s_data.enable_debug_logging; }
static ALWAYS_INLINE bool IsUserExceptionHandlersEnabled() { return s_is_initialized && s_data.enable_user_exception_handlers; }
static ALWAYS_INLINE bool IsDebugMemoryFillEnabled() { return s_is_initialized && s_data.enable_debug_memory_fill; }
static ALWAYS_INLINE bool IsUserPmuAccessEnabled() { return s_is_initialized && s_data.enable_user_pmu_access; }
static ALWAYS_INLINE bool IsKernelDebuggingEnabled() { return s_is_initialized && s_data.enable_kernel_debugging; }
static ALWAYS_INLINE bool IsDynamicResourceLimitsEnabled() { return s_is_initialized && s_data.enable_dynamic_resource_limits; }
};
}
| 2,875
|
C++
|
.h
| 56
| 41.982143
| 140
| 0.649982
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,772
|
kern_k_readable_event.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_readable_event.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
namespace ams::kern {
class KEvent;
class KReadableEvent : public KSynchronizationObject {
MESOSPHERE_AUTOOBJECT_TRAITS(KReadableEvent, KSynchronizationObject);
private:
bool m_is_signaled;
KEvent *m_parent;
public:
constexpr explicit KReadableEvent(util::ConstantInitializeTag) : KSynchronizationObject(util::ConstantInitialize), m_is_signaled(), m_parent() { MESOSPHERE_ASSERT_THIS(); }
explicit KReadableEvent() { /* ... */ }
void Initialize(KEvent *parent);
constexpr KEvent *GetParent() const { return m_parent; }
Result Signal();
Result Reset();
Result Clear() {
MESOSPHERE_ASSERT_THIS();
/* Try to perform a reset, succeeding unconditionally. */
this->Reset();
R_SUCCEED();
}
virtual bool IsSignaled() const override;
virtual void Destroy() override;
/* NOTE: This is a virtual function in Nintendo's kernel. */
/* virtual Result Reset(); */
};
}
| 1,877
|
C++
|
.h
| 44
| 34.954545
| 184
| 0.656593
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,773
|
kern_k_light_session.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_light_session.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_k_light_server_session.hpp>
#include <mesosphere/kern_k_light_client_session.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KClientPort;
class KProcess;
class KLightSession final : public KAutoObjectWithSlabHeapAndContainer<KLightSession, KAutoObjectWithList, true> {
MESOSPHERE_AUTOOBJECT_TRAITS(KLightSession, KAutoObject);
private:
enum class State : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
public:
static constexpr size_t DataSize = sizeof(u32) * 7;
static constexpr u32 ReplyFlag = (1u << (BITSIZEOF(u32) - 1));
private:
KLightServerSession m_server;
KLightClientSession m_client;
State m_state;
KClientPort *m_port;
uintptr_t m_name;
KProcess *m_process;
bool m_initialized;
public:
explicit KLightSession() : m_state(State::Invalid), m_process(), m_initialized() { /* ... */ }
void Initialize(KClientPort *client_port, uintptr_t name);
void Finalize();
bool IsInitialized() const { return m_initialized; }
uintptr_t GetPostDestroyArgument() const { return reinterpret_cast<uintptr_t>(m_process); }
static void PostDestroy(uintptr_t arg);
void OnServerClosed();
void OnClientClosed();
bool IsServerClosed() const { return m_state != State::Normal; }
bool IsClientClosed() const { return m_state != State::Normal; }
Result OnRequest(KThread *request_thread) { R_RETURN(m_server.OnRequest(request_thread)); }
KLightClientSession &GetClientSession() { return m_client; }
KLightServerSession &GetServerSession() { return m_server; }
const KLightClientSession &GetClientSession() const { return m_client; }
const KLightServerSession &GetServerSession() const { return m_server; }
};
}
| 2,869
|
C++
|
.h
| 62
| 37.903226
| 118
| 0.654864
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,774
|
kern_k_page_table_slab_heap.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_page_table_slab_heap.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_dynamic_slab_heap.hpp>
namespace ams::kern {
namespace impl {
class PageTablePage {
private:
u8 m_buffer[PageSize];
public:
ALWAYS_INLINE PageTablePage() { /* Do not initialize anything. */ }
};
static_assert(sizeof(PageTablePage) == PageSize);
}
class KPageTableSlabHeap : public KDynamicSlabHeap<impl::PageTablePage, true> {
public:
using RefCount = u16;
static constexpr size_t PageTableSize = sizeof(impl::PageTablePage);
static_assert(PageTableSize == PageSize);
private:
using BaseHeap = KDynamicSlabHeap<impl::PageTablePage, true>;
private:
RefCount *m_ref_counts{};
public:
static constexpr ALWAYS_INLINE size_t CalculateReferenceCountSize(size_t size) {
return (size / PageSize) * sizeof(RefCount);
}
public:
constexpr KPageTableSlabHeap() = default;
private:
ALWAYS_INLINE void Initialize(RefCount *rc) {
m_ref_counts = rc;
for (size_t i = 0; i < this->GetSize() / PageSize; i++) {
m_ref_counts[i] = 0;
}
}
constexpr ALWAYS_INLINE RefCount *GetRefCountPointer(KVirtualAddress addr) const {
return m_ref_counts + ((addr - this->GetAddress()) / PageSize);
}
public:
ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, size_t object_count, RefCount *rc) {
BaseHeap::Initialize(page_allocator, object_count);
this->Initialize(rc);
}
ALWAYS_INLINE RefCount GetRefCount(KVirtualAddress addr) const {
MESOSPHERE_ASSERT(this->IsInRange(addr));
return *this->GetRefCountPointer(addr);
}
ALWAYS_INLINE void Open(KVirtualAddress addr, int count) {
MESOSPHERE_ASSERT(this->IsInRange(addr));
*this->GetRefCountPointer(addr) += count;
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) > 0);
}
ALWAYS_INLINE bool Close(KVirtualAddress addr, int count) {
MESOSPHERE_ASSERT(this->IsInRange(addr));
MESOSPHERE_ABORT_UNLESS(this->GetRefCount(addr) >= count);
*this->GetRefCountPointer(addr) -= count;
return this->GetRefCount(addr) == 0;
}
constexpr ALWAYS_INLINE bool IsInPageTableHeap(KVirtualAddress addr) const {
return this->IsInRange(addr);
}
};
}
| 3,438
|
C++
|
.h
| 79
| 33.202532
| 115
| 0.614649
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,775
|
kern_k_page_buffer.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_page_buffer.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
#include <mesosphere/kern_k_dynamic_page_manager.hpp>
namespace ams::kern {
class KDynamicPageManager;
class KPageBuffer;
class KPageBufferSlabHeap : protected impl::KSlabHeapImpl {
public:
static constexpr size_t BufferSize = PageSize;
static constinit inline size_t s_buffer_count = 0;
private:
size_t m_obj_size{};
public:
constexpr KPageBufferSlabHeap() = default;
/* See kern_init_slab_setup.cpp for definition. */
void Initialize(KDynamicPageManager &allocator);
KPageBuffer *Allocate();
void Free(KPageBuffer *pb);
};
class KPageBuffer {
private:
u8 m_buffer[KPageBufferSlabHeap::BufferSize];
public:
KPageBuffer() {
std::memset(m_buffer, 0, sizeof(m_buffer));
}
ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const {
return KMemoryLayout::GetLinearPhysicalAddress(KVirtualAddress(this));
}
static ALWAYS_INLINE KPageBuffer *FromPhysicalAddress(KPhysicalAddress phys_addr) {
const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(phys_addr);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
return GetPointer<KPageBuffer>(virt_addr);
}
private:
static constinit inline KPageBufferSlabHeap s_slab_heap;
public:
static void InitializeSlabHeap(KDynamicPageManager &allocator) {
s_slab_heap.Initialize(allocator);
}
static KPageBuffer *Allocate() {
return s_slab_heap.Allocate();
}
static void Free(KPageBuffer *obj) {
s_slab_heap.Free(obj);
}
template<size_t ExpectedSize>
static ALWAYS_INLINE KPageBuffer *AllocateChecked() {
/* Check that the allocation is valid. */
MESOSPHERE_ABORT_UNLESS(sizeof(KPageBuffer) == ExpectedSize);
return Allocate();
}
template<size_t ExpectedSize>
static ALWAYS_INLINE void FreeChecked(KPageBuffer *obj) {
/* Check that the free is valid. */
MESOSPHERE_ABORT_UNLESS(sizeof(KPageBuffer) == ExpectedSize);
return Free(obj);
}
};
static_assert(sizeof(KPageBuffer) == KPageBufferSlabHeap::BufferSize);
ALWAYS_INLINE KPageBuffer *KPageBufferSlabHeap::Allocate() {
KPageBuffer *pb = static_cast<KPageBuffer *>(KSlabHeapImpl::Allocate());
if (AMS_LIKELY(pb != nullptr)) {
std::construct_at(pb);
}
return pb;
}
ALWAYS_INLINE void KPageBufferSlabHeap::Free(KPageBuffer *pb) {
KSlabHeapImpl::Free(pb);
}
}
| 3,744
|
C++
|
.h
| 88
| 32.886364
| 100
| 0.634763
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,776
|
kern_k_memory_region_type.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_memory_region_type.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
enum KMemoryRegionType : u32 {};
enum KMemoryRegionAttr : typename std::underlying_type<KMemoryRegionType>::type {
KMemoryRegionAttr_CarveoutProtected = 0x02000000,
KMemoryRegionAttr_Uncached = 0x04000000,
KMemoryRegionAttr_DidKernelMap = 0x08000000,
KMemoryRegionAttr_ShouldKernelMap = 0x10000000,
KMemoryRegionAttr_UserReadOnly = 0x20000000,
KMemoryRegionAttr_NoUserMap = 0x40000000,
KMemoryRegionAttr_LinearMapped = 0x80000000,
};
namespace impl {
consteval size_t BitsForDeriveSparse(size_t n) {
return n + 1;
}
consteval size_t BitsForDeriveDense(size_t n) {
AMS_ASSUME(n > 0);
size_t low = 0, high = 1;
for (size_t i = 0; i < n - 1; ++i) {
if ((++low) == high) {
++high;
low = 0;
}
}
return high + 1;
}
class KMemoryRegionTypeValue {
private:
using ValueType = typename std::underlying_type<KMemoryRegionType>::type;
private:
ValueType m_value;
size_t m_next_bit;
bool m_finalized;
bool m_sparse_only;
bool m_dense_only;
private:
consteval KMemoryRegionTypeValue(ValueType v) : m_value(v), m_next_bit(0), m_finalized(false), m_sparse_only(false), m_dense_only(false) { /* ... */ }
public:
consteval KMemoryRegionTypeValue() : KMemoryRegionTypeValue(0) { /* ... */ }
consteval operator KMemoryRegionType() const { return static_cast<KMemoryRegionType>(m_value); }
consteval ValueType GetValue() const { return m_value; }
consteval const KMemoryRegionTypeValue Finalize() {
AMS_ASSUME(!m_finalized);
KMemoryRegionTypeValue new_type = *this;
new_type.m_finalized = true;
return new_type;
}
consteval const KMemoryRegionTypeValue SetSparseOnly() {
AMS_ASSUME(!m_finalized);
AMS_ASSUME(!m_sparse_only);
AMS_ASSUME(!m_dense_only);
KMemoryRegionTypeValue new_type = *this;
new_type.m_sparse_only = true;
return new_type;
}
consteval const KMemoryRegionTypeValue SetDenseOnly() {
AMS_ASSUME(!m_finalized);
AMS_ASSUME(!m_sparse_only);
AMS_ASSUME(!m_dense_only);
KMemoryRegionTypeValue new_type = *this;
new_type.m_dense_only = true;
return new_type;
}
consteval KMemoryRegionTypeValue SetAttribute(KMemoryRegionAttr attr) {
AMS_ASSUME(!m_finalized);
KMemoryRegionTypeValue new_type = *this;
new_type.m_value |= attr;
return new_type;
}
consteval KMemoryRegionTypeValue DeriveInitial(size_t i, size_t next = BITSIZEOF(ValueType)) const {
AMS_ASSUME(!m_finalized);
AMS_ASSUME(!m_value);
AMS_ASSUME(!m_next_bit);
AMS_ASSUME(next > i);
KMemoryRegionTypeValue new_type = *this;
new_type.m_value = (ValueType{1} << i);
new_type.m_next_bit = next;
return new_type;
}
consteval KMemoryRegionTypeValue DeriveAttribute(KMemoryRegionAttr attr) const {
AMS_ASSUME(!m_finalized);
KMemoryRegionTypeValue new_type = *this;
new_type.m_value |= attr;
return new_type;
}
consteval KMemoryRegionTypeValue DeriveTransition(size_t ofs = 0, size_t adv = 1) const {
AMS_ASSUME(!m_finalized);
AMS_ASSUME(ofs < adv);
AMS_ASSUME(m_next_bit + adv <= BITSIZEOF(ValueType));
KMemoryRegionTypeValue new_type = *this;
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs));
new_type.m_next_bit += adv;
return new_type;
}
consteval KMemoryRegionTypeValue DeriveSparse(size_t ofs, size_t n, size_t i) const {
AMS_ASSUME(!m_finalized);
AMS_ASSUME(!m_dense_only);
AMS_ASSUME(m_next_bit + ofs + n + 1 <= BITSIZEOF(ValueType));
AMS_ASSUME(i < n);
KMemoryRegionTypeValue new_type = *this;
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs));
new_type.m_value |= (ValueType{1} << (m_next_bit + ofs + 1 + i));
new_type.m_next_bit += ofs + n + 1;
return new_type;
}
consteval KMemoryRegionTypeValue Derive(size_t n, size_t i) const {
AMS_ASSUME(!m_finalized);
AMS_ASSUME(!m_sparse_only);
AMS_ASSUME(m_next_bit + BitsForDeriveDense(n) <= BITSIZEOF(ValueType));
AMS_ASSUME(i < n);
size_t low = 0, high = 1;
for (size_t j = 0; j < i; ++j) {
if ((++low) == high) {
++high;
low = 0;
}
}
AMS_ASSUME(high < BitsForDeriveDense(n));
KMemoryRegionTypeValue new_type = *this;
new_type.m_value |= (ValueType{1} << (m_next_bit + low));
new_type.m_value |= (ValueType{1} << (m_next_bit + high));
new_type.m_next_bit += BitsForDeriveDense(n);
return new_type;
}
consteval KMemoryRegionTypeValue Advance(size_t n) const {
AMS_ASSUME(!m_finalized);
AMS_ASSUME(m_next_bit + n <= BITSIZEOF(ValueType));
KMemoryRegionTypeValue new_type = *this;
new_type.m_next_bit += n;
return new_type;
}
constexpr ALWAYS_INLINE bool IsAncestorOf(ValueType v) const {
return (m_value | v) == v;
}
};
}
constexpr inline const auto KMemoryRegionType_None = impl::KMemoryRegionTypeValue();
constexpr inline const auto KMemoryRegionType_Kernel = KMemoryRegionType_None.DeriveInitial(0, 2);
constexpr inline const auto KMemoryRegionType_Dram = KMemoryRegionType_None.DeriveInitial(1, 2);
static_assert(KMemoryRegionType_Kernel.GetValue() == 0x1);
static_assert(KMemoryRegionType_Dram .GetValue() == 0x2);
/* constexpr inline const auto KMemoryRegionType_CoreLocalRegion = KMemoryRegionType_None.DeriveInitial(2).Finalize(); */
/* static_assert(KMemoryRegionType_CoreLocalRegion.GetValue() == 0x4); */
constexpr inline const auto KMemoryRegionType_DramKernelBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 0).SetAttribute(KMemoryRegionAttr_NoUserMap).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
constexpr inline const auto KMemoryRegionType_DramReservedBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 1);
constexpr inline const auto KMemoryRegionType_DramHeapBase = KMemoryRegionType_Dram.DeriveSparse(0, 3, 2).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelBase .GetValue() == (0xE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramReservedBase.GetValue() == (0x16));
static_assert(KMemoryRegionType_DramHeapBase .GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramKernelCode = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 0);
constexpr inline const auto KMemoryRegionType_DramKernelSlab = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 1);
constexpr inline const auto KMemoryRegionType_DramKernelPtHeap = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 2).SetAttribute(KMemoryRegionAttr_LinearMapped);
constexpr inline const auto KMemoryRegionType_DramKernelInitPt = KMemoryRegionType_DramKernelBase.DeriveSparse(0, 4, 3).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelCode .GetValue() == (0xCE | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramKernelSlab .GetValue() == (0x14E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramKernelPtHeap.GetValue() == (0x24E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
static_assert(KMemoryRegionType_DramKernelInitPt.GetValue() == (0x44E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramKernelSecureAppletMemory = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 0).SetAttribute(KMemoryRegionAttr_LinearMapped);
constexpr inline const auto KMemoryRegionType_DramKernelSecureUnknown = KMemoryRegionType_DramKernelBase.DeriveSparse(1, 3, 1).SetAttribute(KMemoryRegionAttr_LinearMapped);
static_assert(KMemoryRegionType_DramKernelSecureAppletMemory.GetValue() == (0x18E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
static_assert(KMemoryRegionType_DramKernelSecureUnknown.GetValue() == (0x28E | KMemoryRegionAttr_CarveoutProtected | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_LinearMapped));
constexpr inline const auto KMemoryRegionType_DramReservedEarly = KMemoryRegionType_DramReservedBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramReservedEarly.GetValue() == (0x16 | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_KernelTraceBuffer = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 0).SetAttribute(KMemoryRegionAttr_LinearMapped).SetAttribute(KMemoryRegionAttr_UserReadOnly);
constexpr inline const auto KMemoryRegionType_OnMemoryBootImage = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 1);
constexpr inline const auto KMemoryRegionType_DTB = KMemoryRegionType_DramReservedBase.DeriveSparse(0, 3, 2);
static_assert(KMemoryRegionType_KernelTraceBuffer.GetValue() == (0xD6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_UserReadOnly));
static_assert(KMemoryRegionType_OnMemoryBootImage.GetValue() == 0x156);
static_assert(KMemoryRegionType_DTB.GetValue() == 0x256);
constexpr inline const auto KMemoryRegionType_DramPoolPartition = KMemoryRegionType_DramHeapBase.DeriveAttribute(KMemoryRegionAttr_NoUserMap);
static_assert(KMemoryRegionType_DramPoolPartition.GetValue() == (0x26 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramPoolManagement = KMemoryRegionType_DramPoolPartition.Derive(4, 0).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
/* UNUSED: .Derive(4, 1); */
/* UNUSED: .Derive(4, 2); */
constexpr inline const auto KMemoryRegionType_DramUserPool = KMemoryRegionType_DramPoolPartition.Derive(4, 3);
static_assert(KMemoryRegionType_DramPoolManagement.GetValue() == (0xE6 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
static_assert(KMemoryRegionType_DramUserPool .GetValue() == (0x266 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
constexpr inline const auto KMemoryRegionType_DramApplicationPool = KMemoryRegionType_DramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_DramAppletPool = KMemoryRegionType_DramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_DramSystemNonSecurePool = KMemoryRegionType_DramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_DramSystemPool = KMemoryRegionType_DramUserPool.Derive(4, 3).SetAttribute(KMemoryRegionAttr_CarveoutProtected);
static_assert(KMemoryRegionType_DramApplicationPool .GetValue() == (0xE66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramAppletPool .GetValue() == (0x1666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemNonSecurePool.GetValue() == (0x1A66 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap));
static_assert(KMemoryRegionType_DramSystemPool .GetValue() == (0x2666 | KMemoryRegionAttr_LinearMapped | KMemoryRegionAttr_NoUserMap | KMemoryRegionAttr_CarveoutProtected));
constexpr inline const auto KMemoryRegionType_VirtualDramHeapBase = KMemoryRegionType_Dram.DeriveSparse(1, 4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelPtHeap = KMemoryRegionType_Dram.DeriveSparse(1, 4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelTraceBuffer = KMemoryRegionType_Dram.DeriveSparse(1, 4, 2);
static_assert(KMemoryRegionType_VirtualDramHeapBase .GetValue() == 0x1A);
static_assert(KMemoryRegionType_VirtualDramKernelPtHeap .GetValue() == 0x2A);
static_assert(KMemoryRegionType_VirtualDramKernelTraceBuffer.GetValue() == 0x4A);
/* UNUSED: .DeriveSparse(2, 2, 0); */
constexpr inline const auto KMemoryRegionType_VirtualDramUnknownDebug = KMemoryRegionType_Dram.Advance(2).Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureAppletMemory = KMemoryRegionType_Dram.Advance(2).Derive(4, 1);
/* UNUSED: .Derive(4, 2); */
constexpr inline const auto KMemoryRegionType_VirtualDramKernelSecureUnknown = KMemoryRegionType_Dram.Advance(2).Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramUnknownDebug .GetValue() == (0x32));
static_assert(KMemoryRegionType_VirtualDramKernelSecureAppletMemory.GetValue() == (0x52));
static_assert(KMemoryRegionType_VirtualDramKernelSecureUnknown .GetValue() == (0x92));
constexpr inline const auto KMemoryRegionType_VirtualDramKernelInitPt = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramPoolManagement = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramUserPool = KMemoryRegionType_VirtualDramHeapBase.Derive(4, 2);
/* UNUSED: .Derive(4, 3); */
static_assert(KMemoryRegionType_VirtualDramKernelInitPt .GetValue() == 0x31A);
static_assert(KMemoryRegionType_VirtualDramPoolManagement.GetValue() == 0x51A);
static_assert(KMemoryRegionType_VirtualDramUserPool .GetValue() == 0x61A);
constexpr inline const auto KMemoryRegionType_VirtualDramApplicationPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 0);
constexpr inline const auto KMemoryRegionType_VirtualDramAppletPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 1);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemNonSecurePool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 2);
constexpr inline const auto KMemoryRegionType_VirtualDramSystemPool = KMemoryRegionType_VirtualDramUserPool.Derive(4, 3);
static_assert(KMemoryRegionType_VirtualDramApplicationPool .GetValue() == 0x361A);
static_assert(KMemoryRegionType_VirtualDramAppletPool .GetValue() == 0x561A);
static_assert(KMemoryRegionType_VirtualDramSystemNonSecurePool.GetValue() == 0x661A);
static_assert(KMemoryRegionType_VirtualDramSystemPool .GetValue() == 0x961A);
constexpr inline const auto KMemoryRegionType_ArchDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 1).SetSparseOnly();
constexpr inline const auto KMemoryRegionType_BoardDeviceBase = KMemoryRegionType_Kernel.DeriveTransition(0, 2).SetDenseOnly();
static_assert(KMemoryRegionType_ArchDeviceBase .GetValue() == 0x5);
static_assert(KMemoryRegionType_BoardDeviceBase.GetValue() == 0x5);
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_k_memory_region_device_types.inc>
#elif defined(ATMOSPHERE_ARCH_ARM)
#include <mesosphere/arch/arm/kern_k_memory_region_device_types.inc>
#else
/* Default to no architecture devices. */
constexpr inline const auto NumArchitectureDeviceRegions = 0;
#endif
static_assert(NumArchitectureDeviceRegions >= 0);
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
#include <mesosphere/board/nintendo/nx/kern_k_memory_region_device_types.inc>
#else
/* Default to no board devices. */
constexpr inline const auto NumBoardDeviceRegions = 0;
#endif
static_assert(NumBoardDeviceRegions >= 0);
constexpr inline const auto KMemoryRegionType_KernelCode = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 0);
constexpr inline const auto KMemoryRegionType_KernelStack = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 1);
constexpr inline const auto KMemoryRegionType_KernelMisc = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 2);
constexpr inline const auto KMemoryRegionType_KernelSlab = KMemoryRegionType_Kernel.DeriveSparse(1, 4, 3);
static_assert(KMemoryRegionType_KernelCode .GetValue() == 0x19);
static_assert(KMemoryRegionType_KernelStack.GetValue() == 0x29);
static_assert(KMemoryRegionType_KernelMisc .GetValue() == 0x49);
static_assert(KMemoryRegionType_KernelSlab .GetValue() == 0x89);
constexpr inline const auto KMemoryRegionType_KernelMiscDerivedBase = KMemoryRegionType_KernelMisc.DeriveTransition();
static_assert(KMemoryRegionType_KernelMiscDerivedBase.GetValue() == 0x149);
/* UNUSED: .Derive(7, 0); */
constexpr inline const auto KMemoryRegionType_KernelMiscMainStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 1);
constexpr inline const auto KMemoryRegionType_KernelMiscMappedDevice = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 2);
constexpr inline const auto KMemoryRegionType_KernelMiscExceptionStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 3);
constexpr inline const auto KMemoryRegionType_KernelMiscUnknownDebug = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 4);
/* UNUSED: .Derive(7, 5); */
constexpr inline const auto KMemoryRegionType_KernelMiscIdleStack = KMemoryRegionType_KernelMiscDerivedBase.Derive(7, 6);
static_assert(KMemoryRegionType_KernelMiscMainStack .GetValue() == 0xB49);
static_assert(KMemoryRegionType_KernelMiscMappedDevice .GetValue() == 0xD49);
static_assert(KMemoryRegionType_KernelMiscExceptionStack.GetValue() == 0x1349);
static_assert(KMemoryRegionType_KernelMiscUnknownDebug .GetValue() == 0x1549);
static_assert(KMemoryRegionType_KernelMiscIdleStack .GetValue() == 0x2349);
constexpr inline const auto KMemoryRegionType_KernelTemp = KMemoryRegionType_Kernel.Advance(2).Derive(2, 0);
static_assert(KMemoryRegionType_KernelTemp.GetValue() == 0x31);
constexpr ALWAYS_INLINE KMemoryRegionType GetTypeForVirtualLinearMapping(u32 type_id) {
if (KMemoryRegionType_DramKernelPtHeap.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelPtHeap;
} else if (KMemoryRegionType_DramKernelSecureAppletMemory.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureAppletMemory;
} else if (KMemoryRegionType_DramKernelSecureUnknown.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelSecureUnknown;
} else if (KMemoryRegionType_KernelTraceBuffer.IsAncestorOf(type_id)) {
return KMemoryRegionType_VirtualDramKernelTraceBuffer;
} else if ((type_id | KMemoryRegionAttr_ShouldKernelMap) == type_id) {
return KMemoryRegionType_VirtualDramUnknownDebug;
} else {
return KMemoryRegionType_Dram;
}
}
}
| 21,943
|
C++
|
.h
| 291
| 61.793814
| 217
| 0.671931
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,777
|
kern_k_spin_lock.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_spin_lock.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_scoped_lock.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_k_spin_lock.hpp>
namespace ams::kern {
using ams::kern::arch::arm64::KAlignedSpinLock;
using ams::kern::arch::arm64::KNotAlignedSpinLock;
using ams::kern::arch::arm64::KSpinLock;
}
#else
#error "Unknown architecture for KInterruptManager"
#endif
namespace ams::kern {
using KScopedSpinLock = KScopedLock<KSpinLock>;
using KScopedAlignedSpinLock = KScopedLock<KAlignedSpinLock>;
using KScopedNotAlignedSpinLock = KScopedLock<KNotAlignedSpinLock>;
}
| 1,325
|
C++
|
.h
| 33
| 36.878788
| 76
| 0.748246
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,778
|
kern_k_session_request.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_session_request.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_auto_object.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_event.hpp>
#include <mesosphere/kern_k_thread.hpp>
#include <mesosphere/kern_k_process.hpp>
#include <mesosphere/kern_k_memory_block.hpp>
namespace ams::kern {
class KSessionRequest final : public KSlabAllocated<KSessionRequest, true>, public KAutoObject, public util::IntrusiveListBaseNode<KSessionRequest> {
MESOSPHERE_AUTOOBJECT_TRAITS(KSessionRequest, KAutoObject);
public:
class SessionMappings {
private:
/* At most 15 buffers of each type (4-bit descriptor counts), for 45 total. */
static constexpr size_t NumMappings = ((1ul << 4) - 1) * 3;
static constexpr size_t NumStaticMappings = 8;
static constexpr size_t NumDynamicMappings = NumMappings - NumStaticMappings;
class Mapping {
private:
uintptr_t m_client_address;
uintptr_t m_server_address;
size_t m_size;
KMemoryState m_state;
public:
constexpr void Set(KProcessAddress c, KProcessAddress s, size_t sz, KMemoryState st) {
m_client_address = GetInteger(c);
m_server_address = GetInteger(s);
m_size = sz;
m_state = st;
}
constexpr ALWAYS_INLINE KProcessAddress GetClientAddress() const { return m_client_address; }
constexpr ALWAYS_INLINE KProcessAddress GetServerAddress() const { return m_server_address; }
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
constexpr ALWAYS_INLINE KMemoryState GetMemoryState() const { return m_state; }
};
public:
class DynamicMappings : public KSlabAllocated<DynamicMappings, true> {
private:
Mapping m_mappings[NumDynamicMappings];
public:
constexpr explicit DynamicMappings() : m_mappings() { /* ... */ }
constexpr ALWAYS_INLINE Mapping &Get(size_t idx) { return m_mappings[idx]; }
constexpr ALWAYS_INLINE const Mapping &Get(size_t idx) const { return m_mappings[idx]; }
};
static_assert(sizeof(DynamicMappings) == sizeof(Mapping) * NumDynamicMappings);
private:
Mapping m_static_mappings[NumStaticMappings];
DynamicMappings *m_dynamic_mappings;
u8 m_num_send;
u8 m_num_recv;
u8 m_num_exch;
public:
constexpr explicit SessionMappings(util::ConstantInitializeTag) : m_static_mappings(), m_dynamic_mappings(), m_num_send(), m_num_recv(), m_num_exch() { /* ... */ }
explicit SessionMappings() : m_dynamic_mappings(nullptr), m_num_send(), m_num_recv(), m_num_exch() { /* ... */ }
void Initialize() { /* ... */ }
void Finalize();
constexpr ALWAYS_INLINE size_t GetSendCount() const { return m_num_send; }
constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return m_num_recv; }
constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return m_num_exch; }
Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state);
Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state);
Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state);
constexpr ALWAYS_INLINE KProcessAddress GetSendClientAddress(size_t i) const { return GetSendMapping(i).GetClientAddress(); }
constexpr ALWAYS_INLINE KProcessAddress GetSendServerAddress(size_t i) const { return GetSendMapping(i).GetServerAddress(); }
constexpr ALWAYS_INLINE size_t GetSendSize(size_t i) const { return GetSendMapping(i).GetSize(); }
constexpr ALWAYS_INLINE KMemoryState GetSendMemoryState(size_t i) const { return GetSendMapping(i).GetMemoryState(); }
constexpr ALWAYS_INLINE KProcessAddress GetReceiveClientAddress(size_t i) const { return GetReceiveMapping(i).GetClientAddress(); }
constexpr ALWAYS_INLINE KProcessAddress GetReceiveServerAddress(size_t i) const { return GetReceiveMapping(i).GetServerAddress(); }
constexpr ALWAYS_INLINE size_t GetReceiveSize(size_t i) const { return GetReceiveMapping(i).GetSize(); }
constexpr ALWAYS_INLINE KMemoryState GetReceiveMemoryState(size_t i) const { return GetReceiveMapping(i).GetMemoryState(); }
constexpr ALWAYS_INLINE KProcessAddress GetExchangeClientAddress(size_t i) const { return GetExchangeMapping(i).GetClientAddress(); }
constexpr ALWAYS_INLINE KProcessAddress GetExchangeServerAddress(size_t i) const { return GetExchangeMapping(i).GetServerAddress(); }
constexpr ALWAYS_INLINE size_t GetExchangeSize(size_t i) const { return GetExchangeMapping(i).GetSize(); }
constexpr ALWAYS_INLINE KMemoryState GetExchangeMemoryState(size_t i) const { return GetExchangeMapping(i).GetMemoryState(); }
private:
Result PushMap(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state, size_t index);
constexpr ALWAYS_INLINE const Mapping &GetSendMapping(size_t i) const {
MESOSPHERE_ASSERT(i < m_num_send);
const size_t index = i;
if (index < NumStaticMappings) {
return m_static_mappings[index];
} else {
return m_dynamic_mappings->Get(index - NumStaticMappings);
}
}
constexpr ALWAYS_INLINE const Mapping &GetReceiveMapping(size_t i) const {
MESOSPHERE_ASSERT(i < m_num_recv);
const size_t index = m_num_send + i;
if (index < NumStaticMappings) {
return m_static_mappings[index];
} else {
return m_dynamic_mappings->Get(index - NumStaticMappings);
}
}
constexpr ALWAYS_INLINE const Mapping &GetExchangeMapping(size_t i) const {
MESOSPHERE_ASSERT(i < m_num_exch);
const size_t index = m_num_send + m_num_recv + i;
if (index < NumStaticMappings) {
return m_static_mappings[index];
} else {
return m_dynamic_mappings->Get(index - NumStaticMappings);
}
}
};
private:
SessionMappings m_mappings;
KThread *m_thread;
KProcess *m_server;
KEvent *m_event;
uintptr_t m_address;
size_t m_size;
public:
constexpr explicit KSessionRequest(util::ConstantInitializeTag) : KAutoObject(util::ConstantInitialize), m_mappings(util::ConstantInitialize), m_thread(), m_server(), m_event(), m_address(), m_size() { /* ... */ }
explicit KSessionRequest() : m_thread(nullptr), m_server(nullptr), m_event(nullptr) { /* ... */ }
static KSessionRequest *Create() {
KSessionRequest *req = KSessionRequest::Allocate();
if (AMS_LIKELY(req != nullptr)) {
KAutoObject::Create<KSessionRequest>(req);
}
return req;
}
static KSessionRequest *CreateFromUnusedSlabMemory() {
KSessionRequest *req = KSessionRequest::AllocateFromUnusedSlabMemory();
if (AMS_LIKELY(req != nullptr)) {
KAutoObject::Create<KSessionRequest>(req);
}
return req;
}
virtual void Destroy() override {
this->Finalize();
KSessionRequest::Free(this);
}
void Initialize(KEvent *event, uintptr_t address, size_t size) {
m_mappings.Initialize();
m_thread = std::addressof(GetCurrentThread());
m_event = event;
m_address = address;
m_size = size;
m_thread->Open();
if (m_event != nullptr) {
m_event->Open();
}
}
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
constexpr ALWAYS_INLINE KThread *GetThread() const { return m_thread; }
constexpr ALWAYS_INLINE KEvent *GetEvent() const { return m_event; }
constexpr ALWAYS_INLINE uintptr_t GetAddress() const { return m_address; }
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
constexpr ALWAYS_INLINE KProcess *GetServerProcess() const { return m_server; }
void ALWAYS_INLINE SetServerProcess(KProcess *process) {
m_server = process;
m_server->Open();
}
constexpr ALWAYS_INLINE void ClearThread() { m_thread = nullptr; }
constexpr ALWAYS_INLINE void ClearEvent() { m_event = nullptr; }
constexpr ALWAYS_INLINE size_t GetSendCount() const { return m_mappings.GetSendCount(); }
constexpr ALWAYS_INLINE size_t GetReceiveCount() const { return m_mappings.GetReceiveCount(); }
constexpr ALWAYS_INLINE size_t GetExchangeCount() const { return m_mappings.GetExchangeCount(); }
ALWAYS_INLINE Result PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
R_RETURN(m_mappings.PushSend(client, server, size, state));
}
ALWAYS_INLINE Result PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
R_RETURN(m_mappings.PushReceive(client, server, size, state));
}
ALWAYS_INLINE Result PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
R_RETURN(m_mappings.PushExchange(client, server, size, state));
}
constexpr ALWAYS_INLINE KProcessAddress GetSendClientAddress(size_t i) const { return m_mappings.GetSendClientAddress(i); }
constexpr ALWAYS_INLINE KProcessAddress GetSendServerAddress(size_t i) const { return m_mappings.GetSendServerAddress(i); }
constexpr ALWAYS_INLINE size_t GetSendSize(size_t i) const { return m_mappings.GetSendSize(i); }
constexpr ALWAYS_INLINE KMemoryState GetSendMemoryState(size_t i) const { return m_mappings.GetSendMemoryState(i); }
constexpr ALWAYS_INLINE KProcessAddress GetReceiveClientAddress(size_t i) const { return m_mappings.GetReceiveClientAddress(i); }
constexpr ALWAYS_INLINE KProcessAddress GetReceiveServerAddress(size_t i) const { return m_mappings.GetReceiveServerAddress(i); }
constexpr ALWAYS_INLINE size_t GetReceiveSize(size_t i) const { return m_mappings.GetReceiveSize(i); }
constexpr ALWAYS_INLINE KMemoryState GetReceiveMemoryState(size_t i) const { return m_mappings.GetReceiveMemoryState(i); }
constexpr ALWAYS_INLINE KProcessAddress GetExchangeClientAddress(size_t i) const { return m_mappings.GetExchangeClientAddress(i); }
constexpr ALWAYS_INLINE KProcessAddress GetExchangeServerAddress(size_t i) const { return m_mappings.GetExchangeServerAddress(i); }
constexpr ALWAYS_INLINE size_t GetExchangeSize(size_t i) const { return m_mappings.GetExchangeSize(i); }
constexpr ALWAYS_INLINE KMemoryState GetExchangeMemoryState(size_t i) const { return m_mappings.GetExchangeMemoryState(i); }
private:
/* NOTE: This is public and virtual in Nintendo's kernel. */
void Finalize() {
m_mappings.Finalize();
if (m_thread) {
m_thread->Close();
}
if (m_event) {
m_event->Close();
}
if (m_server) {
m_server->Close();
}
}
};
}
| 13,985
|
C++
|
.h
| 211
| 49.127962
| 225
| 0.581974
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,779
|
kern_k_memory_region.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_memory_region.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_memory_region_type.hpp>
namespace ams::kern {
class KMemoryRegionTree;
class KMemoryRegion : public util::IntrusiveRedBlackTreeBaseNode<KMemoryRegion> {
NON_COPYABLE(KMemoryRegion);
NON_MOVEABLE(KMemoryRegion);
private:
friend class KMemoryRegionTree;
private:
uintptr_t m_address;
uintptr_t m_pair_address;
uintptr_t m_last_address;
u32 m_attributes;
u32 m_type_id;
public:
static constexpr ALWAYS_INLINE int Compare(const KMemoryRegion &lhs, const KMemoryRegion &rhs) {
if (lhs.GetAddress() < rhs.GetAddress()) {
return -1;
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
return 0;
} else {
return 1;
}
}
public:
constexpr ALWAYS_INLINE KMemoryRegion() : util::IntrusiveRedBlackTreeBaseNode<KMemoryRegion>(util::ConstantInitialize), m_address(0), m_pair_address(0), m_last_address(0), m_attributes(0), m_type_id(0) { /* ... */ }
ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, uintptr_t p, u32 r, u32 t) :
m_address(a), m_pair_address(p), m_last_address(la), m_attributes(r), m_type_id(t)
{
/* ... */
}
ALWAYS_INLINE KMemoryRegion(uintptr_t a, size_t la, u32 r, u32 t) : KMemoryRegion(a, la, std::numeric_limits<uintptr_t>::max(), r, t) { /* ... */ }
private:
constexpr ALWAYS_INLINE void Reset(uintptr_t a, uintptr_t la, uintptr_t p, u32 r, u32 t) {
m_address = a;
m_pair_address = p;
m_last_address = la;
m_attributes = r;
m_type_id = t;
}
public:
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
return m_address;
}
constexpr ALWAYS_INLINE uintptr_t GetPairAddress() const {
return m_pair_address;
}
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
return m_last_address;
}
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
return this->GetLastAddress() + 1;
}
constexpr ALWAYS_INLINE size_t GetSize() const {
return this->GetEndAddress() - this->GetAddress();
}
constexpr ALWAYS_INLINE u32 GetAttributes() const {
return m_attributes;
}
constexpr ALWAYS_INLINE u32 GetType() const {
return m_type_id;
}
constexpr ALWAYS_INLINE void SetType(u32 type) {
MESOSPHERE_INIT_ABORT_UNLESS(this->CanDerive(type));
m_type_id = type;
}
constexpr ALWAYS_INLINE bool Contains(uintptr_t address) const {
MESOSPHERE_INIT_ABORT_UNLESS(this->GetEndAddress() != 0);
return this->GetAddress() <= address && address <= this->GetLastAddress();
}
constexpr ALWAYS_INLINE bool IsDerivedFrom(u32 type) const {
return (this->GetType() | type) == this->GetType();
}
constexpr ALWAYS_INLINE bool HasTypeAttribute(KMemoryRegionAttr attr) const {
return (this->GetType() | attr) == this->GetType();
}
constexpr ALWAYS_INLINE bool CanDerive(u32 type) const {
return (this->GetType() | type) == type;
}
constexpr ALWAYS_INLINE void SetPairAddress(uintptr_t a) {
m_pair_address = a;
}
constexpr ALWAYS_INLINE void SetTypeAttribute(KMemoryRegionAttr attr) {
m_type_id |= attr;
}
};
static_assert(std::is_trivially_destructible<KMemoryRegion>::value);
class KMemoryRegionTree {
public:
struct DerivedRegionExtents {
const KMemoryRegion *first_region;
const KMemoryRegion *last_region;
constexpr DerivedRegionExtents() : first_region(nullptr), last_region(nullptr) { /* ... */ }
constexpr ALWAYS_INLINE uintptr_t GetAddress() const {
return this->first_region->GetAddress();
}
constexpr ALWAYS_INLINE uintptr_t GetLastAddress() const {
return this->last_region->GetLastAddress();
}
constexpr ALWAYS_INLINE uintptr_t GetEndAddress() const {
return this->GetLastAddress() + 1;
}
constexpr ALWAYS_INLINE size_t GetSize() const {
return this->GetEndAddress() - this->GetAddress();
}
};
private:
using TreeType = util::IntrusiveRedBlackTreeBaseTraits<KMemoryRegion>::TreeType<KMemoryRegion>;
public:
using value_type = TreeType::value_type;
using size_type = TreeType::size_type;
using difference_type = TreeType::difference_type;
using pointer = TreeType::pointer;
using const_pointer = TreeType::const_pointer;
using reference = TreeType::reference;
using const_reference = TreeType::const_reference;
using iterator = TreeType::iterator;
using const_iterator = TreeType::const_iterator;
private:
TreeType m_tree;
public:
constexpr ALWAYS_INLINE KMemoryRegionTree() : m_tree() { /* ... */ }
public:
KMemoryRegion *FindModifiable(uintptr_t address) {
if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->end()) {
return std::addressof(*it);
} else {
return nullptr;
}
}
const KMemoryRegion *Find(uintptr_t address) const {
if (auto it = this->find(KMemoryRegion(address, address, 0, 0)); it != this->cend()) {
return std::addressof(*it);
} else {
return nullptr;
}
}
const KMemoryRegion *FindByType(u32 type_id) const {
for (auto it = this->cbegin(); it != this->cend(); ++it) {
if (it->GetType() == type_id) {
return std::addressof(*it);
}
}
return nullptr;
}
const KMemoryRegion *FindByTypeAndAttribute(u32 type_id, u32 attr) const {
for (auto it = this->cbegin(); it != this->cend(); ++it) {
if (it->GetType() == type_id && it->GetAttributes() == attr) {
return std::addressof(*it);
}
}
return nullptr;
}
const KMemoryRegion *FindFirstDerived(u32 type_id) const {
for (auto it = this->cbegin(); it != this->cend(); it++) {
if (it->IsDerivedFrom(type_id)) {
return std::addressof(*it);
}
}
return nullptr;
}
const KMemoryRegion *FindLastDerived(u32 type_id) const {
const KMemoryRegion *region = nullptr;
for (auto it = this->begin(); it != this->end(); it++) {
if (it->IsDerivedFrom(type_id)) {
region = std::addressof(*it);
}
}
return region;
}
DerivedRegionExtents GetDerivedRegionExtents(u32 type_id) const {
DerivedRegionExtents extents;
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region == nullptr);
MESOSPHERE_INIT_ABORT_UNLESS(extents.last_region == nullptr);
for (auto it = this->cbegin(); it != this->cend(); it++) {
if (it->IsDerivedFrom(type_id)) {
if (extents.first_region == nullptr) {
extents.first_region = std::addressof(*it);
}
extents.last_region = std::addressof(*it);
}
}
MESOSPHERE_INIT_ABORT_UNLESS(extents.first_region != nullptr);
MESOSPHERE_INIT_ABORT_UNLESS(extents.last_region != nullptr);
return extents;
}
public:
NOINLINE void InsertDirectly(uintptr_t address, uintptr_t last_address, u32 attr = 0, u32 type_id = 0);
NOINLINE bool Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr = 0, u32 old_attr = 0);
public:
/* Iterator accessors. */
iterator begin() {
return m_tree.begin();
}
const_iterator begin() const {
return m_tree.begin();
}
iterator end() {
return m_tree.end();
}
const_iterator end() const {
return m_tree.end();
}
const_iterator cbegin() const {
return this->begin();
}
const_iterator cend() const {
return this->end();
}
iterator iterator_to(reference ref) {
return m_tree.iterator_to(ref);
}
const_iterator iterator_to(const_reference ref) const {
return m_tree.iterator_to(ref);
}
/* Content management. */
bool empty() const {
return m_tree.empty();
}
reference back() {
return m_tree.back();
}
const_reference back() const {
return m_tree.back();
}
reference front() {
return m_tree.front();
}
const_reference front() const {
return m_tree.front();
}
/* GCC over-eagerly inlines this operation. */
NOINLINE iterator insert(reference ref) {
return m_tree.insert(ref);
}
NOINLINE iterator erase(iterator it) {
return m_tree.erase(it);
}
iterator find(const_reference ref) const {
return m_tree.find(ref);
}
iterator nfind(const_reference ref) const {
return m_tree.nfind(ref);
}
};
}
| 11,510
|
C++
|
.h
| 263
| 29.589354
| 227
| 0.518667
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,780
|
kern_select_cpu.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_select_cpu.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#ifdef ATMOSPHERE_ARCH_ARM64
#include <mesosphere/arch/arm64/kern_cpu.hpp>
namespace ams::kern::cpu {
using namespace ams::kern::arch::arm64::cpu;
}
#else
#error "Unknown architecture for CPU"
#endif
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
#include <mesosphere/board/nintendo/nx/kern_cpu_map.hpp>
namespace ams::kern::cpu {
using namespace ams::kern::board::nintendo::nx::impl::cpu;
}
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#include <mesosphere/board/qemu/virt/kern_cpu_map.hpp>
namespace ams::kern::cpu {
using namespace ams::kern::board::qemu::virt::impl::cpu;
}
#else
#error "Unknown board for CPU Map"
#endif
namespace ams::kern {
namespace cpu {
static constexpr inline size_t NumVirtualCores = BITSIZEOF(u64);
static constexpr inline u64 VirtualCoreMask = [] {
u64 mask = 0;
for (size_t i = 0; i < NumVirtualCores; ++i) {
mask |= (UINT64_C(1) << i);
}
return mask;
}();
static constexpr inline u64 ConvertVirtualCoreMaskToPhysical(u64 v_core_mask) {
u64 p_core_mask = 0;
while (v_core_mask != 0) {
const u64 next = util::CountTrailingZeros(v_core_mask);
v_core_mask &= ~(static_cast<u64>(1) << next);
p_core_mask |= (static_cast<u64>(1) << cpu::VirtualToPhysicalCoreMap[next]);
}
return p_core_mask;
}
}
static_assert(cpu::NumCores <= cpu::NumVirtualCores);
static_assert(util::size(cpu::VirtualToPhysicalCoreMap) == cpu::NumVirtualCores);
}
| 2,345
|
C++
|
.h
| 61
| 32.163934
| 93
| 0.658407
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,781
|
kern_k_client_port.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_client_port.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
namespace ams::kern {
class KPort;
class KSession;
class KClientSession;
class KLightSession;
class KLightClientSession;
class KClientPort final : public KSynchronizationObject {
MESOSPHERE_AUTOOBJECT_TRAITS(KClientPort, KSynchronizationObject);
private:
util::Atomic<s32> m_num_sessions;
util::Atomic<s32> m_peak_sessions;
s32 m_max_sessions;
KPort *m_parent;
public:
constexpr explicit KClientPort(util::ConstantInitializeTag) : KSynchronizationObject(util::ConstantInitialize), m_num_sessions(0), m_peak_sessions(0), m_max_sessions(), m_parent() { /* ... */ }
explicit KClientPort() { /* ... */ }
void Initialize(KPort *parent, s32 max_sessions);
void OnSessionFinalized();
void OnServerClosed();
constexpr const KPort *GetParent() const { return m_parent; }
ALWAYS_INLINE s32 GetNumSessions() const { return m_num_sessions.Load(); }
ALWAYS_INLINE s32 GetPeakSessions() const { return m_peak_sessions.Load(); }
ALWAYS_INLINE s32 GetMaxSessions() const { return m_max_sessions; }
bool IsLight() const;
bool IsServerClosed() const;
/* Overridden virtual functions. */
virtual void Destroy() override;
virtual bool IsSignaled() const override;
Result CreateSession(KClientSession **out);
Result CreateLightSession(KLightClientSession **out);
};
}
| 2,294
|
C++
|
.h
| 50
| 38.54
| 205
| 0.678012
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,782
|
kern_k_process.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_process.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_k_handle_table.hpp>
#include <mesosphere/kern_k_thread.hpp>
#include <mesosphere/kern_k_thread_local_page.hpp>
#include <mesosphere/kern_k_shared_memory_info.hpp>
#include <mesosphere/kern_k_io_region.hpp>
#include <mesosphere/kern_k_worker_task.hpp>
#include <mesosphere/kern_select_page_table.hpp>
#include <mesosphere/kern_k_condition_variable.hpp>
#include <mesosphere/kern_k_address_arbiter.hpp>
#include <mesosphere/kern_k_capabilities.hpp>
#include <mesosphere/kern_k_wait_object.hpp>
#include <mesosphere/kern_k_dynamic_resource_manager.hpp>
#include <mesosphere/kern_k_page_table_manager.hpp>
#include <mesosphere/kern_k_system_resource.hpp>
namespace ams::kern {
class KProcess final : public KAutoObjectWithSlabHeapAndContainer<KProcess, KWorkerTask> {
MESOSPHERE_AUTOOBJECT_TRAITS(KProcess, KSynchronizationObject);
public:
enum State {
State_Created = ams::svc::ProcessState_Created,
State_CreatedAttached = ams::svc::ProcessState_CreatedAttached,
State_Running = ams::svc::ProcessState_Running,
State_Crashed = ams::svc::ProcessState_Crashed,
State_RunningAttached = ams::svc::ProcessState_RunningAttached,
State_Terminating = ams::svc::ProcessState_Terminating,
State_Terminated = ams::svc::ProcessState_Terminated,
State_DebugBreak = ams::svc::ProcessState_DebugBreak,
};
using ThreadList = util::IntrusiveListMemberTraits<&KThread::m_process_list_node>::ListType;
static constexpr size_t AslrAlignment = KernelAslrAlignment;
private:
using SharedMemoryInfoList = util::IntrusiveListBaseTraits<KSharedMemoryInfo>::ListType;
using IoRegionList = util::IntrusiveListMemberTraits<&KIoRegion::m_process_list_node>::ListType;
using TLPTree = util::IntrusiveRedBlackTreeBaseTraits<KThreadLocalPage>::TreeType<KThreadLocalPage>;
using TLPIterator = TLPTree::iterator;
private:
KProcessPageTable m_page_table;
util::Atomic<size_t> m_used_kernel_memory_size;
TLPTree m_fully_used_tlp_tree;
TLPTree m_partially_used_tlp_tree;
s32 m_ideal_core_id;
void *m_attached_object;
KResourceLimit *m_resource_limit;
KSystemResource *m_system_resource;
size_t m_memory_release_hint;
State m_state;
KLightLock m_state_lock;
KLightLock m_list_lock;
KConditionVariable m_cond_var;
KAddressArbiter m_address_arbiter;
u64 m_entropy[4];
bool m_is_signaled;
bool m_is_initialized;
bool m_is_application;
bool m_is_default_application_system_resource;
char m_name[13];
util::Atomic<u16> m_num_running_threads;
u32 m_flags;
KMemoryManager::Pool m_memory_pool;
s64 m_schedule_count;
KCapabilities m_capabilities;
ams::svc::ProgramId m_program_id;
u64 m_process_id;
#if defined(MESOSPHERE_ENABLE_PROCESS_CREATION_TIME)
s64 m_creation_time;
#endif
KProcessAddress m_code_address;
size_t m_code_size;
size_t m_main_thread_stack_size;
size_t m_max_process_memory;
u32 m_version;
KHandleTable m_handle_table;
KProcessAddress m_plr_address;
void *m_plr_heap_address;
KThread *m_exception_thread;
ThreadList m_thread_list;
SharedMemoryInfoList m_shared_memory_list;
IoRegionList m_io_region_list;
bool m_is_suspended;
bool m_is_immortal;
bool m_is_jit_debug;
bool m_is_handle_table_initialized;
ams::svc::DebugEvent m_jit_debug_event_type;
ams::svc::DebugException m_jit_debug_exception_type;
uintptr_t m_jit_debug_params[4];
u64 m_jit_debug_thread_id;
KWaitObject m_wait_object;
KThread *m_running_threads[cpu::NumCores];
u64 m_running_thread_idle_counts[cpu::NumCores];
u64 m_running_thread_switch_counts[cpu::NumCores];
KThread *m_pinned_threads[cpu::NumCores];
util::Atomic<s64> m_cpu_time;
util::Atomic<s64> m_num_process_switches;
util::Atomic<s64> m_num_thread_switches;
util::Atomic<s64> m_num_fpu_switches;
util::Atomic<s64> m_num_supervisor_calls;
util::Atomic<s64> m_num_ipc_messages;
util::Atomic<s64> m_num_ipc_replies;
util::Atomic<s64> m_num_ipc_receives;
private:
Result Initialize(const ams::svc::CreateProcessParameter ¶ms);
Result StartTermination();
void FinishTermination();
ALWAYS_INLINE void PinThread(s32 core_id, KThread *thread) {
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
MESOSPHERE_ASSERT(thread != nullptr);
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == nullptr);
m_pinned_threads[core_id] = thread;
}
ALWAYS_INLINE void UnpinThread(s32 core_id, KThread *thread) {
MESOSPHERE_UNUSED(thread);
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
MESOSPHERE_ASSERT(thread != nullptr);
MESOSPHERE_ASSERT(m_pinned_threads[core_id] == thread);
m_pinned_threads[core_id] = nullptr;
}
public:
explicit KProcess() : m_is_initialized(false) { /* ... */ }
Result Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool, bool immortal);
Result Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer<const u32 *> caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool);
void Exit();
constexpr const char *GetName() const { return m_name; }
constexpr ams::svc::ProgramId GetProgramId() const { return m_program_id; }
constexpr u64 GetProcessId() const { return m_process_id; }
constexpr State GetState() const { return m_state; }
constexpr u64 GetCoreMask() const { return m_capabilities.GetCoreMask(); }
constexpr u64 GetPhysicalCoreMask() const { return m_capabilities.GetPhysicalCoreMask(); }
constexpr u64 GetPriorityMask() const { return m_capabilities.GetPriorityMask(); }
constexpr s32 GetIdealCoreId() const { return m_ideal_core_id; }
constexpr void SetIdealCoreId(s32 core_id) { m_ideal_core_id = core_id; }
constexpr bool CheckThreadPriority(s32 prio) const { return ((1ul << prio) & this->GetPriorityMask()) != 0; }
constexpr u32 GetCreateProcessFlags() const { return m_flags; }
constexpr bool Is64Bit() const { return m_flags & ams::svc::CreateProcessFlag_Is64Bit; }
constexpr KProcessAddress GetEntryPoint() const { return m_code_address; }
constexpr size_t GetMainStackSize() const { return m_main_thread_stack_size; }
constexpr KMemoryManager::Pool GetMemoryPool() const { return m_memory_pool; }
constexpr u64 GetRandomEntropy(size_t i) const { return m_entropy[i]; }
constexpr bool IsApplication() const { return m_is_application; }
constexpr bool IsDefaultApplicationSystemResource() const { return m_is_default_application_system_resource; }
constexpr bool IsSuspended() const { return m_is_suspended; }
constexpr void SetSuspended(bool suspended) { m_is_suspended = suspended; }
Result Terminate();
constexpr bool IsTerminated() const {
return m_state == State_Terminated;
}
constexpr bool IsAttachedToDebugger() const {
return m_attached_object != nullptr;
}
constexpr bool IsPermittedSvc(svc::SvcId svc_id) const {
return m_capabilities.IsPermittedSvc(svc_id);
}
constexpr bool IsPermittedInterrupt(int32_t interrupt_id) const {
return m_capabilities.IsPermittedInterrupt(interrupt_id);
}
constexpr bool IsPermittedDebug() const {
return m_capabilities.IsPermittedDebug();
}
constexpr bool CanForceDebugProd() const {
return m_capabilities.CanForceDebugProd();
}
constexpr bool CanForceDebug() const {
return m_capabilities.CanForceDebug();
}
u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); }
ThreadList &GetThreadList() { return m_thread_list; }
const ThreadList &GetThreadList() const { return m_thread_list; }
constexpr void *GetDebugObject() const { return m_attached_object; }
KProcess::State SetDebugObject(void *debug_object);
void ClearDebugObject(KProcess::State state);
bool EnterJitDebug(ams::svc::DebugEvent event, ams::svc::DebugException exception, uintptr_t param1 = 0, uintptr_t param2 = 0, uintptr_t param3 = 0, uintptr_t param4 = 0);
KEventInfo *GetJitDebugInfo();
void ClearJitDebugInfo();
bool EnterUserException();
bool LeaveUserException();
bool ReleaseUserException(KThread *thread);
KThread *GetPinnedThread(s32 core_id) const {
MESOSPHERE_ASSERT(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
return m_pinned_threads[core_id];
}
const svc::SvcAccessFlagSet &GetSvcPermissions() const { return m_capabilities.GetSvcPermissions(); }
constexpr KResourceLimit *GetResourceLimit() const { return m_resource_limit; }
bool ReserveResource(ams::svc::LimitableResource which, s64 value);
bool ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout);
void ReleaseResource(ams::svc::LimitableResource which, s64 value);
void ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint);
constexpr KLightLock &GetStateLock() { return m_state_lock; }
constexpr KLightLock &GetListLock() { return m_list_lock; }
constexpr KProcessPageTable &GetPageTable() { return m_page_table; }
constexpr const KProcessPageTable &GetPageTable() const { return m_page_table; }
constexpr KHandleTable &GetHandleTable() { return m_handle_table; }
constexpr const KHandleTable &GetHandleTable() const { return m_handle_table; }
KWaitObject *GetWaitObjectPointer() { return std::addressof(m_wait_object); }
size_t GetUsedUserPhysicalMemorySize() const;
size_t GetTotalUserPhysicalMemorySize() const;
size_t GetUsedNonSystemUserPhysicalMemorySize() const;
size_t GetTotalNonSystemUserPhysicalMemorySize() const;
Result AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size);
void RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size);
void AddIoRegion(KIoRegion *io_region);
void RemoveIoRegion(KIoRegion *io_region);
Result CreateThreadLocalRegion(KProcessAddress *out);
Result DeleteThreadLocalRegion(KProcessAddress addr);
void *GetThreadLocalRegionPointer(KProcessAddress addr);
constexpr KProcessAddress GetProcessLocalRegionAddress() const { return m_plr_address; }
constexpr void *GetProcessLocalRegionHeapAddress() const { return m_plr_heap_address; }
KThread *GetExceptionThread() const { return m_exception_thread; }
void AddCpuTime(s64 diff) { m_cpu_time += diff; }
s64 GetCpuTime() { return m_cpu_time.Load(); }
constexpr s64 GetScheduledCount() const { return m_schedule_count; }
void IncrementScheduledCount() { ++m_schedule_count; }
void IncrementRunningThreadCount();
void DecrementRunningThreadCount();
size_t GetRequiredSecureMemorySizeNonDefault() const {
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
}
size_t GetRequiredSecureMemorySize() const {
return m_system_resource->IsSecureResource() ? static_cast<KSecureSystemResource *>(m_system_resource)->CalculateRequiredSecureMemorySize() : 0;
}
size_t GetTotalSystemResourceSize() const {
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->GetSize() : 0;
}
size_t GetUsedSystemResourceSize() const {
return (!this->IsDefaultApplicationSystemResource() && m_system_resource->IsSecureResource()) ? static_cast<KSecureSystemResource *>(m_system_resource)->GetUsedSize() : 0;
}
void SetRunningThread(s32 core, KThread *thread, u64 idle_count, u64 switch_count) {
m_running_threads[core] = thread;
m_running_thread_idle_counts[core] = idle_count;
m_running_thread_switch_counts[core] = switch_count;
}
void ClearRunningThread(KThread *thread) {
for (size_t i = 0; i < util::size(m_running_threads); ++i) {
if (m_running_threads[i] == thread) {
m_running_threads[i] = nullptr;
}
}
}
const KSystemResource &GetSystemResource() const { return *m_system_resource; }
const KMemoryBlockSlabManager &GetMemoryBlockSlabManager() const { return m_system_resource->GetMemoryBlockSlabManager(); }
const KBlockInfoManager &GetBlockInfoManager() const { return m_system_resource->GetBlockInfoManager(); }
const KPageTableManager &GetPageTableManager() const { return m_system_resource->GetPageTableManager(); }
constexpr KThread *GetRunningThread(s32 core) const { return m_running_threads[core]; }
constexpr u64 GetRunningThreadIdleCount(s32 core) const { return m_running_thread_idle_counts[core]; }
constexpr u64 GetRunningThreadSwitchCount(s32 core) const { return m_running_thread_switch_counts[core]; }
void RegisterThread(KThread *thread);
void UnregisterThread(KThread *thread);
Result Run(s32 priority, size_t stack_size);
Result Reset();
void SetDebugBreak() {
if (m_state == State_RunningAttached) {
this->ChangeState(State_DebugBreak);
}
}
void SetAttached() {
if (m_state == State_DebugBreak) {
this->ChangeState(State_RunningAttached);
}
}
Result SetActivity(ams::svc::ProcessActivity activity);
void PinCurrentThread();
void UnpinCurrentThread();
void UnpinThread(KThread *thread);
void SignalConditionVariable(uintptr_t cv_key, int32_t count) {
return m_cond_var.Signal(cv_key, count);
}
Result WaitConditionVariable(KProcessAddress address, uintptr_t cv_key, u32 tag, s64 ns) {
R_RETURN(m_cond_var.Wait(address, cv_key, tag, ns));
}
Result SignalAddressArbiter(uintptr_t address, ams::svc::SignalType signal_type, s32 value, s32 count) {
R_RETURN(m_address_arbiter.SignalToAddress(address, signal_type, value, count));
}
Result WaitAddressArbiter(uintptr_t address, ams::svc::ArbitrationType arb_type, s64 value, s64 timeout) {
R_RETURN(m_address_arbiter.WaitForAddress(address, arb_type, value, timeout));
}
Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count);
static KProcess *GetProcessFromId(u64 process_id);
static Result GetProcessList(s32 *out_num_processes, ams::kern::svc::KUserPointer<u64 *> out_process_ids, s32 max_out_count);
static void Switch(KProcess *cur_process, KProcess *next_process) {
MESOSPHERE_UNUSED(cur_process);
/* Update the current page table. */
if (next_process) {
next_process->GetPageTable().Activate(next_process->GetSlabIndex(), next_process->GetProcessId());
} else {
Kernel::GetKernelPageTable().Activate();
}
}
public:
/* Overridden parent functions. */
bool IsInitialized() const { return m_is_initialized; }
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
void Finalize();
ALWAYS_INLINE u64 GetIdImpl() const { return this->GetProcessId(); }
ALWAYS_INLINE u64 GetId() const { return this->GetIdImpl(); }
virtual bool IsSignaled() const override {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
return m_is_signaled;
}
void DoWorkerTaskImpl();
private:
void ChangeState(State new_state) {
if (m_state != new_state) {
m_state = new_state;
m_is_signaled = true;
this->NotifyAvailable();
}
}
ALWAYS_INLINE Result InitializeHandleTable(s32 size) {
/* Try to initialize the handle table. */
R_TRY(m_handle_table.Initialize(size));
/* We succeeded, so note that we did. */
m_is_handle_table_initialized = true;
R_SUCCEED();
}
ALWAYS_INLINE void FinalizeHandleTable() {
/* Finalize the table. */
m_handle_table.Finalize();
/* Note that the table is finalized. */
m_is_handle_table_initialized = false;
}
};
}
| 20,806
|
C++
|
.h
| 341
| 48.143695
| 209
| 0.597252
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,783
|
kern_k_thread_local_page.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_thread_local_page.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_page_buffer.hpp>
namespace ams::kern {
class KThread;
class KProcess;
class KThreadLocalPage : public util::IntrusiveRedBlackTreeBaseNode<KThreadLocalPage>, public KSlabAllocated<KThreadLocalPage> {
public:
static constexpr size_t RegionsPerPage = PageSize / ams::svc::ThreadLocalRegionSize;
static_assert(RegionsPerPage > 0);
private:
KProcessAddress m_virt_addr;
KProcess *m_owner;
bool m_is_region_free[RegionsPerPage];
public:
explicit KThreadLocalPage(KProcessAddress addr) : m_virt_addr(addr), m_owner(nullptr) {
for (size_t i = 0; i < util::size(m_is_region_free); i++) {
m_is_region_free[i] = true;
}
}
explicit KThreadLocalPage() : KThreadLocalPage(Null<KProcessAddress>) { /* ... */ }
constexpr ALWAYS_INLINE KProcessAddress GetAddress() const { return m_virt_addr; }
public:
using RedBlackKeyType = KProcessAddress;
static constexpr ALWAYS_INLINE RedBlackKeyType GetRedBlackKey(const RedBlackKeyType &v) { return v; }
static constexpr ALWAYS_INLINE RedBlackKeyType GetRedBlackKey(const KThreadLocalPage &v) { return v.GetAddress(); }
template<typename T> requires (std::same_as<T, KThreadLocalPage> || std::same_as<T, RedBlackKeyType>)
static constexpr ALWAYS_INLINE int Compare(const T &lhs, const KThreadLocalPage &rhs) {
const KProcessAddress lval = GetRedBlackKey(lhs);
const KProcessAddress rval = GetRedBlackKey(rhs);
if (lval < rval) {
return -1;
} else if (lval == rval) {
return 0;
} else {
return 1;
}
}
private:
constexpr ALWAYS_INLINE KProcessAddress GetRegionAddress(size_t i) {
return this->GetAddress() + i * ams::svc::ThreadLocalRegionSize;
}
constexpr ALWAYS_INLINE bool Contains(KProcessAddress addr) {
return this->GetAddress() <= addr && addr < this->GetAddress() + PageSize;
}
constexpr ALWAYS_INLINE size_t GetRegionIndex(KProcessAddress addr) {
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), ams::svc::ThreadLocalRegionSize));
MESOSPHERE_ASSERT(this->Contains(addr));
return (addr - this->GetAddress()) / ams::svc::ThreadLocalRegionSize;
}
public:
Result Initialize(KProcess *process);
Result Finalize();
KProcessAddress Reserve();
void Release(KProcessAddress addr);
void *GetPointer() const;
bool IsAllUsed() const {
for (size_t i = 0; i < RegionsPerPage; i++) {
if (m_is_region_free[i]) {
return false;
}
}
return true;
}
bool IsAllFree() const {
for (size_t i = 0; i < RegionsPerPage; i++) {
if (!m_is_region_free[i]) {
return false;
}
}
return true;
}
bool IsAnyUsed() const {
return !this->IsAllFree();
}
bool IsAnyFree() const {
return !this->IsAllUsed();
}
};
/* Miscellaneous sanity checking. */
static_assert(ams::svc::ThreadLocalRegionSize == THREAD_LOCAL_REGION_SIZE);
static_assert(AMS_OFFSETOF(ams::svc::ThreadLocalRegion, message_buffer) == THREAD_LOCAL_REGION_MESSAGE_BUFFER);
static_assert(AMS_OFFSETOF(ams::svc::ThreadLocalRegion, disable_count) == THREAD_LOCAL_REGION_DISABLE_COUNT);
static_assert(AMS_OFFSETOF(ams::svc::ThreadLocalRegion, interrupt_flag) == THREAD_LOCAL_REGION_INTERRUPT_FLAG);
}
| 4,812
|
C++
|
.h
| 102
| 35.705882
| 132
| 0.602728
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,784
|
kern_k_page_table_base.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_page_table_base.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_page_table_impl.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
#include <mesosphere/kern_k_page_group.hpp>
#include <mesosphere/kern_k_memory_manager.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
#include <mesosphere/kern_k_memory_block_manager.hpp>
namespace ams::kern {
enum DisableMergeAttribute : u8 {
DisableMergeAttribute_None = (0u << 0),
DisableMergeAttribute_DisableHead = (1u << 0),
DisableMergeAttribute_DisableHeadAndBody = (1u << 1),
DisableMergeAttribute_EnableHeadAndBody = (1u << 2),
DisableMergeAttribute_DisableTail = (1u << 3),
DisableMergeAttribute_EnableTail = (1u << 4),
DisableMergeAttribute_EnableAndMergeHeadBodyTail = (1u << 5),
DisableMergeAttribute_EnableHeadBodyTail = DisableMergeAttribute_EnableHeadAndBody | DisableMergeAttribute_EnableTail,
DisableMergeAttribute_DisableHeadBodyTail = DisableMergeAttribute_DisableHeadAndBody | DisableMergeAttribute_DisableTail,
};
struct KPageProperties {
KMemoryPermission perm;
bool io;
bool uncached;
DisableMergeAttribute disable_merge_attributes;
};
static_assert(std::is_trivial<KPageProperties>::value);
static_assert(sizeof(KPageProperties) == sizeof(u32));
class KResourceLimit;
class KSystemResource;
class KPageTableBase {
NON_COPYABLE(KPageTableBase);
NON_MOVEABLE(KPageTableBase);
public:
using TraversalEntry = KPageTableImpl::TraversalEntry;
using TraversalContext = KPageTableImpl::TraversalContext;
class MemoryRange {
private:
KPhysicalAddress m_address;
size_t m_size;
bool m_heap;
u8 m_attr;
public:
constexpr MemoryRange() : m_address(Null<KPhysicalAddress>), m_size(0), m_heap(false), m_attr(0) { /* ... */ }
void Set(KPhysicalAddress address, size_t size, bool heap, u8 attr) {
m_address = address;
m_size = size;
m_heap = heap;
m_attr = attr;
}
constexpr KPhysicalAddress GetAddress() const { return m_address; }
constexpr size_t GetSize() const { return m_size; }
constexpr bool IsHeap() const { return m_heap; }
constexpr u8 GetAttribute() const { return m_attr; }
void Open();
void Close();
};
protected:
enum MemoryFillValue {
MemoryFillValue_Zero = 0,
MemoryFillValue_Stack = 'X',
MemoryFillValue_Ipc = 'Y',
MemoryFillValue_Heap = 'Z',
};
enum RegionType {
RegionType_KernelMap = 0,
RegionType_Stack = 1,
RegionType_Alias = 2,
RegionType_Heap = 3,
RegionType_Count,
};
enum OperationType {
OperationType_Map = 0,
OperationType_MapGroup = 1,
OperationType_MapFirstGroup = 2,
OperationType_Unmap = 3,
OperationType_ChangePermissions = 4,
OperationType_ChangePermissionsAndRefresh = 5,
OperationType_ChangePermissionsAndRefreshAndFlush = 6,
OperationType_Separate = 7,
};
static constexpr size_t MaxPhysicalMapAlignment = 1_GB;
static constexpr size_t RegionAlignment = 2_MB;
static_assert(RegionAlignment == KernelAslrAlignment);
struct PageLinkedList {
private:
struct Node {
Node *m_next;
u8 m_buffer[PageSize - sizeof(Node *)];
};
static_assert(util::is_pod<Node>::value);
private:
Node *m_root;
public:
constexpr PageLinkedList() : m_root(nullptr) { /* ... */ }
void Push(Node *n) {
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(n), PageSize));
n->m_next = m_root;
m_root = n;
}
void Push(KVirtualAddress addr) {
this->Push(GetPointer<Node>(addr));
}
Node *Peek() const { return m_root; }
Node *Pop() {
Node * const r = m_root;
m_root = r->m_next;
r->m_next = nullptr;
return r;
}
};
static_assert(std::is_trivially_destructible<PageLinkedList>::value);
static constexpr u32 DefaultMemoryIgnoreAttr = KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
static constexpr size_t GetAddressSpaceWidth(ams::svc::CreateProcessFlag as_type) {
switch (static_cast<ams::svc::CreateProcessFlag>(as_type & ams::svc::CreateProcessFlag_AddressSpaceMask)) {
case ams::svc::CreateProcessFlag_AddressSpace64Bit:
return 39;
case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated:
return 36;
case ams::svc::CreateProcessFlag_AddressSpace32Bit:
case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias:
return 32;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
private:
class KScopedPageTableUpdater {
private:
KPageTableBase *m_pt;
PageLinkedList m_ll;
public:
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase *pt) : m_pt(pt), m_ll() { /* ... */ }
ALWAYS_INLINE explicit KScopedPageTableUpdater(KPageTableBase &pt) : KScopedPageTableUpdater(std::addressof(pt)) { /* ... */ }
ALWAYS_INLINE ~KScopedPageTableUpdater() { m_pt->FinalizeUpdate(this->GetPageList()); }
PageLinkedList *GetPageList() { return std::addressof(m_ll); }
};
private:
KProcessAddress m_address_space_start;
KProcessAddress m_address_space_end;
KProcessAddress m_region_starts[RegionType_Count];
KProcessAddress m_region_ends[RegionType_Count];
KProcessAddress m_current_heap_end;
KProcessAddress m_alias_code_region_start;
KProcessAddress m_alias_code_region_end;
KProcessAddress m_code_region_start;
KProcessAddress m_code_region_end;
size_t m_max_heap_size;
size_t m_mapped_physical_memory_size;
size_t m_mapped_unsafe_physical_memory;
size_t m_mapped_insecure_memory;
size_t m_mapped_ipc_server_memory;
size_t m_alias_region_extra_size;
mutable KLightLock m_general_lock;
mutable KLightLock m_map_physical_memory_lock;
KLightLock m_device_map_lock;
KPageTableImpl m_impl;
KMemoryBlockManager m_memory_block_manager;
u32 m_allocate_option;
u32 m_address_space_width;
bool m_is_kernel;
bool m_enable_aslr;
bool m_enable_device_address_space_merge;
KMemoryBlockSlabManager *m_memory_block_slab_manager;
KBlockInfoManager *m_block_info_manager;
KResourceLimit *m_resource_limit;
const KMemoryRegion *m_cached_physical_linear_region;
const KMemoryRegion *m_cached_physical_heap_region;
MemoryFillValue m_heap_fill_value;
MemoryFillValue m_ipc_fill_value;
MemoryFillValue m_stack_fill_value;
public:
constexpr explicit KPageTableBase(util::ConstantInitializeTag)
: m_address_space_start(Null<KProcessAddress>), m_address_space_end(Null<KProcessAddress>),
m_region_starts{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>},
m_region_ends{Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>, Null<KProcessAddress>},
m_current_heap_end(Null<KProcessAddress>), m_alias_code_region_start(Null<KProcessAddress>),
m_alias_code_region_end(Null<KProcessAddress>), m_code_region_start(Null<KProcessAddress>), m_code_region_end(Null<KProcessAddress>),
m_max_heap_size(), m_mapped_physical_memory_size(), m_mapped_unsafe_physical_memory(), m_mapped_insecure_memory(), m_mapped_ipc_server_memory(), m_alias_region_extra_size(),
m_general_lock(), m_map_physical_memory_lock(), m_device_map_lock(), m_impl(util::ConstantInitialize), m_memory_block_manager(util::ConstantInitialize),
m_allocate_option(), m_address_space_width(), m_is_kernel(), m_enable_aslr(), m_enable_device_address_space_merge(),
m_memory_block_slab_manager(), m_block_info_manager(), m_resource_limit(), m_cached_physical_linear_region(), m_cached_physical_heap_region(),
m_heap_fill_value(), m_ipc_fill_value(), m_stack_fill_value()
{
/* ... */
}
explicit KPageTableBase() { /* ... */ }
NOINLINE Result InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit);
void Finalize();
constexpr bool IsKernel() const { return m_is_kernel; }
constexpr bool IsAslrEnabled() const { return m_enable_aslr; }
constexpr bool Contains(KProcessAddress addr) const {
return m_address_space_start <= addr && addr <= m_address_space_end - 1;
}
constexpr bool Contains(KProcessAddress addr, size_t size) const {
return m_address_space_start <= addr && addr < addr + size && addr + size - 1 <= m_address_space_end - 1;
}
constexpr bool IsInAliasRegion(KProcessAddress addr, size_t size) const {
return this->Contains(addr, size) && m_region_starts[RegionType_Alias] <= addr && addr + size - 1 <= m_region_ends[RegionType_Alias] - 1;
}
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const {
/* Even though Unsafe physical memory is KMemoryState_Normal, it must be mapped inside the alias code region. */
return this->CanContain(addr, size, ams::svc::MemoryState_AliasCode);
}
ALWAYS_INLINE KScopedLightLock AcquireDeviceMapLock() {
return KScopedLightLock(m_device_map_lock);
}
KProcessAddress GetRegionAddress(ams::svc::MemoryState state) const;
size_t GetRegionSize(ams::svc::MemoryState state) const;
bool CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const;
ALWAYS_INLINE KProcessAddress GetRegionAddress(KMemoryState state) const { return this->GetRegionAddress(static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
ALWAYS_INLINE size_t GetRegionSize(KMemoryState state) const { return this->GetRegionSize(static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
ALWAYS_INLINE bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return this->CanContain(addr, size, static_cast<ams::svc::MemoryState>(state & KMemoryState_Mask)); }
protected:
/* NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions */
/* in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived */
/* class, and this avoids unnecessary virtual function calls. See "kern_select_page_table.hpp" */
/* for definition of these functions. */
Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll);
Result Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll);
void FinalizeUpdate(PageLinkedList *page_list);
ALWAYS_INLINE KPageTableImpl &GetImpl() { return m_impl; }
ALWAYS_INLINE const KPageTableImpl &GetImpl() const { return m_impl; }
ALWAYS_INLINE bool IsLockedByCurrentThread() const { return m_general_lock.IsLockedByCurrentThread(); }
ALWAYS_INLINE bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr);
}
ALWAYS_INLINE bool IsLinearMappedPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsLinearMappedPhysicalAddress(m_cached_physical_linear_region, phys_addr, size);
}
ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr, size);
}
ALWAYS_INLINE bool IsHeapPhysicalAddressForFinalize(KPhysicalAddress phys_addr) {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
return KMemoryLayout::IsHeapPhysicalAddress(m_cached_physical_heap_region, phys_addr);
}
ALWAYS_INLINE bool ContainsPages(KProcessAddress addr, size_t num_pages) const {
return (m_address_space_start <= addr) && (num_pages <= (m_address_space_end - m_address_space_start) / PageSize) && (addr + num_pages * PageSize - 1 <= m_address_space_end - 1);
}
private:
constexpr size_t GetNumGuardPages() const { return this->IsKernel() ? 1 : 4; }
ALWAYS_INLINE KProcessAddress FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const;
Result CheckMemoryStateContiguous(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryStateContiguous(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
R_RETURN(this->CheckMemoryStateContiguous(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr));
}
Result CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const;
Result CheckMemoryState(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
R_RETURN(this->CheckMemoryState(nullptr, nullptr, nullptr, out_blocks_needed, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
}
Result CheckMemoryState(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr = DefaultMemoryIgnoreAttr) const {
R_RETURN(this->CheckMemoryState(nullptr, addr, size, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
}
bool CanReadWriteDebugMemory(KProcessAddress addr, size_t size, bool force_debug_prod);
Result LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr);
Result UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg);
Result QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const;
Result QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const;
Result AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties &properties);
Result MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll);
void RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg);
Result MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages);
bool IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages);
Result GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr);
NOINLINE Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
Result MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryState state, KMemoryPermission perm);
Result ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size, KMemoryState state);
Result WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size, KMemoryState state);
Result SetupForIpcClient(PageLinkedList *page_list, size_t *out_blocks_needed, KProcessAddress address, size_t size, KMemoryPermission test_perm, KMemoryState dst_state);
Result SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send);
void CleanupForIpcClientOnServerSetupFailure(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission prot_perm);
size_t GetSize(KMemoryState state) const;
ALWAYS_INLINE bool GetPhysicalAddressLocked(KPhysicalAddress *out, KProcessAddress virt_addr) const {
/* Validate pre-conditions. */
MESOSPHERE_AUDIT(this->IsLockedByCurrentThread());
return this->GetImpl().GetPhysicalAddress(out, virt_addr);
}
public:
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const {
/* Validate pre-conditions. */
MESOSPHERE_AUDIT(!this->IsLockedByCurrentThread());
/* Acquire exclusive access to the table while doing address translation. */
KScopedLightLock lk(m_general_lock);
return this->GetPhysicalAddressLocked(out, virt_addr);
}
KBlockInfoManager *GetBlockInfoManager() const { return m_block_info_manager; }
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm);
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr);
Result SetHeapSize(KProcessAddress *out, size_t size);
Result SetMaxHeapSize(size_t size);
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const;
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const;
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, ams::svc::MemoryState_Static)); }
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const { R_RETURN(this->QueryMappingImpl(out, address, size, ams::svc::MemoryState_Io)); }
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size);
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm);
Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping);
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm);
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm);
Result MapInsecurePhysicalMemory(KProcessAddress address, size_t size);
Result UnmapInsecurePhysicalMemory(KProcessAddress address, size_t size);
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, region_start, region_num_pages, state, perm));
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, alignment, phys_addr, true, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm));
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(this->MapPages(out_addr, num_pages, PageSize, Null<KPhysicalAddress>, false, this->GetRegionAddress(state), this->GetRegionSize(state) / PageSize, state, perm));
}
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm);
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state);
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm);
Result MapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm);
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state);
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr);
Result InvalidateProcessDataCache(KProcessAddress address, size_t size);
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size);
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod);
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state);
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size);
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state);
Result LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap);
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap);
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size);
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size);
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned);
Result OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size);
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size);
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size);
Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm);
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg);
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size);
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg);
Result OpenMemoryRangeForProcessCacheOperation(MemoryRange *out, KProcessAddress address, size_t size);
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr);
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr);
Result CopyMemoryFromHeapToHeap(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr);
Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KPageTableBase &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send);
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state);
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state);
Result MapPhysicalMemory(KProcessAddress address, size_t size);
Result UnmapPhysicalMemory(KProcessAddress address, size_t size);
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size);
Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase &src_pt, KProcessAddress src_address);
void DumpMemoryBlocksLocked() const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
m_memory_block_manager.DumpBlocks();
}
void DumpMemoryBlocks() const {
KScopedLightLock lk(m_general_lock);
this->DumpMemoryBlocksLocked();
}
void DumpPageTable() const {
KScopedLightLock lk(m_general_lock);
this->GetImpl().Dump(GetInteger(m_address_space_start), m_address_space_end - m_address_space_start);
}
size_t CountPageTables() const {
KScopedLightLock lk(m_general_lock);
return this->GetImpl().CountPageTables();
}
public:
KProcessAddress GetAddressSpaceStart() const { return m_address_space_start; }
KProcessAddress GetHeapRegionStart() const { return m_region_starts[RegionType_Heap]; }
KProcessAddress GetAliasRegionStart() const { return m_region_starts[RegionType_Alias]; }
KProcessAddress GetStackRegionStart() const { return m_region_starts[RegionType_Stack]; }
KProcessAddress GetKernelMapRegionStart() const { return m_region_starts[RegionType_KernelMap]; }
KProcessAddress GetAliasCodeRegionStart() const { return m_alias_code_region_start; }
size_t GetAddressSpaceSize() const { return m_address_space_end - m_address_space_start; }
size_t GetHeapRegionSize() const { return m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]; }
size_t GetAliasRegionSize() const { return m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias]; }
size_t GetStackRegionSize() const { return m_region_ends[RegionType_Stack] - m_region_starts[RegionType_Stack]; }
size_t GetKernelMapRegionSize() const { return m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap]; }
size_t GetAliasCodeRegionSize() const { return m_alias_code_region_end - m_alias_code_region_start; }
size_t GetAliasRegionExtraSize() const { return m_alias_region_extra_size; }
size_t GetNormalMemorySize() const {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
return (m_current_heap_end - m_region_starts[RegionType_Heap]) + m_mapped_physical_memory_size;
}
size_t GetCodeSize() const;
size_t GetCodeDataSize() const;
size_t GetAliasCodeSize() const;
size_t GetAliasCodeDataSize() const;
u32 GetAllocateOption() const { return m_allocate_option; }
public:
static ALWAYS_INLINE KVirtualAddress GetLinearMappedVirtualAddress(KPhysicalAddress addr) {
return KMemoryLayout::GetLinearVirtualAddress(addr);
}
static ALWAYS_INLINE KPhysicalAddress GetLinearMappedPhysicalAddress(KVirtualAddress addr) {
return KMemoryLayout::GetLinearPhysicalAddress(addr);
}
static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(addr);
}
static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(addr);
}
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
return GetLinearMappedVirtualAddress(addr);
}
static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) {
return GetLinearMappedPhysicalAddress(addr);
}
};
}
| 34,556
|
C++
|
.h
| 440
| 64.415909
| 366
| 0.666833
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,785
|
kern_k_debug_base.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_debug_base.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_k_process.hpp>
#include <mesosphere/kern_k_event_info.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
namespace ams::kern {
class KDebugBase : public KSynchronizationObject {
protected:
using DebugEventList = util::IntrusiveListBaseTraits<KEventInfo>::ListType;
private:
DebugEventList m_event_info_list;
u32 m_continue_flags;
KSharedAutoObject<KProcess> m_process_holder;
KLightLock m_lock;
KProcess::State m_old_process_state;
bool m_is_attached;
bool m_is_force_debug_prod;
public:
explicit KDebugBase() { /* ... */ }
protected:
bool Is64Bit() const;
public:
void Initialize();
void Finalize();
Result Attach(KProcess *process);
Result BreakProcess();
Result TerminateProcess();
Result ContinueDebug(const u32 flags, const u64 *thread_ids, size_t num_thread_ids);
Result QueryMemoryInfo(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, KProcessAddress address);
Result ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size);
Result WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size);
Result GetThreadContext(ams::svc::ThreadContext *out, u64 thread_id, u32 context_flags);
Result SetThreadContext(const ams::svc::ThreadContext &ctx, u64 thread_id, u32 context_flags);
Result GetRunningThreadInfo(ams::svc::LastThreadContext *out_context, u64 *out_thread_id);
Result GetDebugEventInfo(ams::svc::lp64::DebugEventInfo *out);
Result GetDebugEventInfo(ams::svc::ilp32::DebugEventInfo *out);
ALWAYS_INLINE bool IsAttached() const {
return m_is_attached;
}
ALWAYS_INLINE bool IsForceDebugProd() const {
return m_is_force_debug_prod;
}
ALWAYS_INLINE bool OpenProcess() {
return m_process_holder.Open();
}
ALWAYS_INLINE void CloseProcess() {
return m_process_holder.Close();
}
ALWAYS_INLINE KProcess *GetProcessUnsafe() const {
return m_process_holder.Get();
}
private:
void PushDebugEvent(ams::svc::DebugEvent event, const uintptr_t *params, size_t num_params);
void EnqueueDebugEventInfo(KEventInfo *info);
template<typename T> requires (std::same_as<T, ams::svc::lp64::DebugEventInfo> || std::same_as<T, ams::svc::ilp32::DebugEventInfo>)
Result GetDebugEventInfoImpl(T *out);
public:
virtual bool IsSignaled() const override;
private:
/* NOTE: This is public/virtual override in Nintendo's kernel. */
void OnFinalizeSynchronizationObject();
private:
static Result ProcessDebugEvent(ams::svc::DebugEvent event, const uintptr_t *params, size_t num_params);
public:
static Result OnDebugEvent(ams::svc::DebugEvent event, const uintptr_t *params, size_t num_params);
static Result OnExitProcess(KProcess *process);
static Result OnTerminateProcess(KProcess *process);
static Result OnExitThread(KThread *thread);
static KEventInfo *CreateDebugEvent(ams::svc::DebugEvent event, u64 thread_id, const uintptr_t *params, size_t num_params);
};
}
| 4,317
|
C++
|
.h
| 87
| 39.873563
| 143
| 0.656465
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,786
|
kern_k_timer_task.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_timer_task.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
class KTimerTask : public util::IntrusiveRedBlackTreeBaseNode<KTimerTask> {
private:
s64 m_time;
public:
static constexpr ALWAYS_INLINE int Compare(const KTimerTask &lhs, const KTimerTask &rhs) {
if (lhs.GetTime() < rhs.GetTime()) {
return -1;
} else {
return 1;
}
}
public:
constexpr explicit ALWAYS_INLINE KTimerTask(util::ConstantInitializeTag) : util::IntrusiveRedBlackTreeBaseNode<KTimerTask>(util::ConstantInitialize), m_time(0) { /* ... */ }
explicit ALWAYS_INLINE KTimerTask() : m_time(0) { /* ... */ }
constexpr ALWAYS_INLINE void SetTime(s64 t) {
m_time = t;
}
constexpr ALWAYS_INLINE s64 GetTime() const {
return m_time;
}
/* NOTE: This is virtual in Nintendo's kernel. Prior to 13.0.0, KWaitObject was also a TimerTask; this is no longer the case. */
/* Since this is now KThread exclusive, we have devirtualized (see inline declaration for this inside kern_kthread.hpp). */
void OnTimer();
};
}
| 1,918
|
C++
|
.h
| 43
| 36.255814
| 185
| 0.636704
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,787
|
kern_debug_log.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_debug_log.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/svc/kern_svc_k_user_pointer.hpp>
namespace ams::kern {
class KDebugLog {
private:
static NOINLINE void VSNPrintf(char *dst, const size_t dst_size, const char *format, ::std::va_list vl);
public:
static NOINLINE void Initialize();
static NOINLINE void Printf(const char *format, ...) __attribute__((format(printf, 1, 2)));
static NOINLINE void VPrintf(const char *format, ::std::va_list vl);
static NOINLINE void LogException(const char *str);
static NOINLINE Result PrintUserString(ams::kern::svc::KUserPointer<const char *> user_str, size_t len);
/* Functionality for preserving across sleep. */
static NOINLINE void Save();
static NOINLINE void Restore();
};
}
#ifndef MESOSPHERE_DEBUG_LOG_SELECTED
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
#define MESOSPHERE_DEBUG_LOG_USE_UART
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#define MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING
#else
#error "Unknown board for Default Debug Log Source"
#endif
#define MESOSPHERE_DEBUG_LOG_SELECTED
#endif
#define MESOSPHERE_EXCEPTION_LOG(str) ::ams::kern::KDebugLog::LogException(str)
#define MESOSPHERE_RELEASE_LOG(fmt, ...) ::ams::kern::KDebugLog::Printf((fmt), ## __VA_ARGS__)
#define MESOSPHERE_RELEASE_VLOG(fmt, vl) ::ams::kern::KDebugLog::VPrintf((fmt), (vl))
#ifdef MESOSPHERE_ENABLE_DEBUG_PRINT
#define MESOSPHERE_LOG(fmt, ...) MESOSPHERE_RELEASE_LOG((fmt), ## __VA_ARGS__)
#define MESOSPHERE_VLOG(fmt, vl) MESOSPHERE_RELEASE_VLOG((fmt), (vl))
#else
#define MESOSPHERE_LOG(fmt, ...) do { MESOSPHERE_UNUSED(fmt); MESOSPHERE_UNUSED(__VA_ARGS__); } while (0)
#define MESOSPHERE_VLOG(fmt, vl) do { MESOSPHERE_UNUSED(fmt); MESOSPHERE_UNUSED(vl); } while (0)
#endif
| 2,532
|
C++
|
.h
| 53
| 42.924528
| 116
| 0.70426
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,788
|
kern_k_shared_memory.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_auto_object.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_select_page_table.hpp>
namespace ams::kern {
class KProcess;
class KResourceLimit;
class KSharedMemory final : public KAutoObjectWithSlabHeapAndContainer<KSharedMemory, KAutoObjectWithList> {
MESOSPHERE_AUTOOBJECT_TRAITS(KSharedMemory, KAutoObject);
private:
KPageGroup m_page_group;
KResourceLimit *m_resource_limit;
u64 m_owner_process_id;
ams::svc::MemoryPermission m_owner_perm;
ams::svc::MemoryPermission m_remote_perm;
bool m_is_initialized;
public:
explicit KSharedMemory()
: m_page_group(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer()), m_resource_limit(nullptr), m_owner_process_id(std::numeric_limits<u64>::max()),
m_owner_perm(ams::svc::MemoryPermission_None), m_remote_perm(ams::svc::MemoryPermission_None), m_is_initialized(false)
{
/* ... */
}
Result Initialize(KProcess *owner, size_t size, ams::svc::MemoryPermission own_perm, ams::svc::MemoryPermission rem_perm);
void Finalize();
bool IsInitialized() const { return m_is_initialized; }
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
Result Map(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process, ams::svc::MemoryPermission map_perm);
Result Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process);
u64 GetOwnerProcessId() const { return m_owner_process_id; }
size_t GetSize() const { return m_page_group.GetNumPages() * PageSize; }
};
}
| 2,518
|
C++
|
.h
| 49
| 43.938776
| 175
| 0.688338
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,789
|
kern_slab_helpers.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_slab_helpers.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_auto_object.hpp>
#include <mesosphere/kern_k_slab_heap.hpp>
#include <mesosphere/kern_k_auto_object_container.hpp>
#include <mesosphere/kern_k_unused_slab_memory.hpp>
namespace ams::kern {
template<class Derived, bool SupportDynamicExpansion = false>
class KSlabAllocated {
private:
static constinit inline KSlabHeap<Derived, SupportDynamicExpansion> s_slab_heap;
public:
constexpr KSlabAllocated() = default;
size_t GetSlabIndex() const {
return s_slab_heap.GetIndex(static_cast<const Derived *>(this));
}
public:
static void InitializeSlabHeap(void *memory, size_t memory_size) {
s_slab_heap.Initialize(memory, memory_size);
}
static Derived *Allocate() {
return s_slab_heap.Allocate();
}
static void Free(Derived *obj) {
s_slab_heap.Free(obj);
}
template<bool Enable = SupportDynamicExpansion, typename = typename std::enable_if<Enable>::type>
static Derived *AllocateFromUnusedSlabMemory() {
static_assert(Enable == SupportDynamicExpansion);
Derived * const obj = GetPointer<Derived>(AllocateUnusedSlabMemory(sizeof(Derived), alignof(Derived)));
if (AMS_LIKELY(obj != nullptr)) {
std::construct_at(obj);
}
return obj;
}
static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); }
static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); }
static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); }
static uintptr_t GetSlabHeapAddress() { return s_slab_heap.GetSlabHeapAddress(); }
static size_t GetNumRemaining() { return s_slab_heap.GetNumRemaining(); }
};
template<typename Derived, typename Base, bool SupportDynamicExpansion> requires std::derived_from<Base, KAutoObject>
class KAutoObjectWithSlabHeapBase : public Base {
private:
template<typename, typename, bool> friend class KAutoObjectWithSlabHeap;
template<typename, typename, bool> friend class KAutoObjectWithSlabHeapAndContainer;
private:
static constinit inline KSlabHeap<Derived, SupportDynamicExpansion> s_slab_heap;
private:
static ALWAYS_INLINE Derived *Allocate() {
return s_slab_heap.Allocate();
}
static ALWAYS_INLINE void Free(Derived *obj) {
s_slab_heap.Free(obj);
}
private:
static ALWAYS_INLINE bool IsInitialized(const Derived *obj) {
if constexpr (requires { { obj->IsInitialized() } -> std::same_as<bool>; }) {
return obj->IsInitialized();
} else {
return true;
}
}
static ALWAYS_INLINE uintptr_t GetPostDestroyArgument(const Derived *obj) {
if constexpr (requires { { obj->GetPostDestroyArgument() } -> std::same_as<uintptr_t>; }) {
return obj->GetPostDestroyArgument();
} else {
return 0;
}
}
public:
constexpr explicit KAutoObjectWithSlabHeapBase(util::ConstantInitializeTag) : Base(util::ConstantInitialize) { /* ... */ }
explicit KAutoObjectWithSlabHeapBase() { /* ... */ }
/* NOTE: IsInitialized() and GetPostDestroyArgument() are virtual functions declared in this class, */
/* in Nintendo's kernel. We fully devirtualize them, as Destroy() is the only user of them. */
/* We also devirtualize KAutoObject::Finalize(), which is only used by this function in Nintendo's kernel. */
virtual void Destroy() override final {
Derived * const derived = static_cast<Derived *>(this);
if (KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion>::IsInitialized(derived)) {
Derived::PreFinalize(derived);
const uintptr_t arg = KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion>::GetPostDestroyArgument(derived);
derived->Finalize();
KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion>::Free(derived);
Derived::PostDestroy(arg);
} else {
KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion>::Free(derived);
}
}
size_t GetSlabIndex() const {
return s_slab_heap.GetObjectIndex(static_cast<const Derived *>(this));
}
public:
static void InitializeSlabHeap(void *memory, size_t memory_size) {
s_slab_heap.Initialize(memory, memory_size);
}
static Derived *Create() {
Derived *obj = Allocate();
if (AMS_LIKELY(obj != nullptr)) {
KAutoObject::Create<Derived>(obj);
}
return obj;
}
template<bool Enable = SupportDynamicExpansion, typename = typename std::enable_if<Enable>::type>
static Derived *CreateFromUnusedSlabMemory() {
static_assert(Enable == SupportDynamicExpansion);
Derived * const obj = GetPointer<Derived>(AllocateUnusedSlabMemory(sizeof(Derived), alignof(Derived)));
if (AMS_LIKELY(obj != nullptr)) {
std::construct_at(obj);
KAutoObject::Create<Derived>(obj);
}
return obj;
}
static size_t GetObjectSize() { return s_slab_heap.GetObjectSize(); }
static size_t GetSlabHeapSize() { return s_slab_heap.GetSlabHeapSize(); }
static size_t GetPeakIndex() { return s_slab_heap.GetPeakIndex(); }
static uintptr_t GetSlabHeapAddress() { return s_slab_heap.GetSlabHeapAddress(); }
static size_t GetNumRemaining() { return s_slab_heap.GetNumRemaining(); }
};
template<typename Derived, typename Base, bool SupportDynamicExpansion = false>
class KAutoObjectWithSlabHeap : public KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion> {
public:
constexpr explicit KAutoObjectWithSlabHeap(util::ConstantInitializeTag) : KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion>(util::ConstantInitialize) { /* ... */ }
explicit KAutoObjectWithSlabHeap() { /* ... */ }
static ALWAYS_INLINE void PreFinalize(Derived *) { /* ... */ }
};
template<typename Derived, typename Base, bool SupportDynamicExpansion = false>
class KAutoObjectWithSlabHeapAndContainer : public KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion> {
static_assert(std::derived_from<Base, KAutoObjectWithList>);
private:
static constinit inline KAutoObjectWithListContainer<Derived> s_container;
public:
class ListAccessor : public KAutoObjectWithListContainer<Derived>::ListAccessor {
public:
ALWAYS_INLINE ListAccessor() : KAutoObjectWithListContainer<Derived>::ListAccessor(s_container) { /* ... */ }
ALWAYS_INLINE ~ListAccessor() { /* ... */ }
};
public:
constexpr explicit KAutoObjectWithSlabHeapAndContainer(util::ConstantInitializeTag) : KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion>(util::ConstantInitialize) { /* ... */ }
explicit KAutoObjectWithSlabHeapAndContainer() { /* ... */ }
public:
static void InitializeSlabHeap(void *memory, size_t memory_size) {
KAutoObjectWithSlabHeapBase<Derived, Base, SupportDynamicExpansion>::InitializeSlabHeap(memory, memory_size);
s_container.Initialize();
}
static void Register(Derived *obj) {
return s_container.Register(obj);
}
static ALWAYS_INLINE void PreFinalize(Derived *obj) { s_container.Unregister(obj); }
};
}
| 9,024
|
C++
|
.h
| 165
| 42.393939
| 205
| 0.626345
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,790
|
kern_k_thread.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_svc.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_k_affinity_mask.hpp>
#include <mesosphere/kern_k_thread_context.hpp>
#include <mesosphere/kern_k_current_context.hpp>
#include <mesosphere/kern_k_timer_task.hpp>
#include <mesosphere/kern_k_worker_task.hpp>
namespace ams::kern {
class KThreadQueue;
class KProcess;
class KConditionVariable;
class KAddressArbiter;
using KThreadFunction = void (*)(uintptr_t);
class KThread final : public KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>, public util::IntrusiveListBaseNode<KThread>, public KTimerTask {
MESOSPHERE_AUTOOBJECT_TRAITS(KThread, KSynchronizationObject);
private:
friend class KProcess;
friend class KConditionVariable;
friend class KAddressArbiter;
friend class KThreadQueue;
public:
static constexpr s32 MainThreadPriority = 1;
static constexpr s32 IdleThreadPriority = 64;
enum ThreadType : u32 {
ThreadType_Main = 0,
ThreadType_Kernel = 1,
ThreadType_HighPriority = 2,
ThreadType_User = 3,
};
enum SuspendType : u32 {
SuspendType_Process = 0,
SuspendType_Thread = 1,
SuspendType_Debug = 2,
SuspendType_Backtrace = 3,
SuspendType_Init = 4,
SuspendType_Count,
};
enum ThreadState : u16 {
ThreadState_Initialized = 0,
ThreadState_Waiting = 1,
ThreadState_Runnable = 2,
ThreadState_Terminated = 3,
ThreadState_SuspendShift = 4,
ThreadState_Mask = (1 << ThreadState_SuspendShift) - 1,
ThreadState_ProcessSuspended = (1 << (SuspendType_Process + ThreadState_SuspendShift)),
ThreadState_ThreadSuspended = (1 << (SuspendType_Thread + ThreadState_SuspendShift)),
ThreadState_DebugSuspended = (1 << (SuspendType_Debug + ThreadState_SuspendShift)),
ThreadState_BacktraceSuspended = (1 << (SuspendType_Backtrace + ThreadState_SuspendShift)),
ThreadState_InitSuspended = (1 << (SuspendType_Init + ThreadState_SuspendShift)),
ThreadState_SuspendFlagMask = ((1 << SuspendType_Count) - 1) << ThreadState_SuspendShift,
};
enum DpcFlag : u32 {
DpcFlag_Terminating = (1 << 0),
DpcFlag_Terminated = (1 << 1),
DpcFlag_PerformDestruction = (1 << 2),
};
enum ExceptionFlag : u32 {
ExceptionFlag_IsCallingSvc = (1 << 0),
ExceptionFlag_IsInExceptionHandler = (1 << 1),
ExceptionFlag_IsFpuContextRestoreNeeded = (1 << 2),
ExceptionFlag_IsFpu64Bit = (1 << 3),
ExceptionFlag_IsInUsermodeExceptionHandler = (1 << 4),
ExceptionFlag_IsInCacheMaintenanceOperation = (1 << 5),
ExceptionFlag_IsInTlbMaintenanceOperation = (1 << 6),
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
ExceptionFlag_IsHardwareSingleStep = (1 << 7),
#endif
};
struct StackParameters {
svc::SvcAccessFlagSet svc_access_flags;
KThreadContext::CallerSaveFpuRegisters *caller_save_fpu_registers;
KThread *cur_thread;
s16 disable_count;
util::Atomic<u8> dpc_flags;
u8 current_svc_id;
u8 reserved_2c;
u8 exception_flags;
bool is_pinned;
u8 reserved_2f;
u8 reserved_30[0x10];
KThreadContext context;
};
static_assert(util::IsAligned(AMS_OFFSETOF(StackParameters, context), 0x10));
static_assert(sizeof(StackParameters) == THREAD_STACK_PARAMETERS_SIZE);
static_assert(AMS_OFFSETOF(StackParameters, svc_access_flags) == THREAD_STACK_PARAMETERS_SVC_PERMISSION);
static_assert(AMS_OFFSETOF(StackParameters, caller_save_fpu_registers) == THREAD_STACK_PARAMETERS_CALLER_SAVE_FPU_REGISTERS);
static_assert(AMS_OFFSETOF(StackParameters, cur_thread) == THREAD_STACK_PARAMETERS_CUR_THREAD);
static_assert(AMS_OFFSETOF(StackParameters, disable_count) == THREAD_STACK_PARAMETERS_DISABLE_COUNT);
static_assert(AMS_OFFSETOF(StackParameters, dpc_flags) == THREAD_STACK_PARAMETERS_DPC_FLAGS);
static_assert(AMS_OFFSETOF(StackParameters, current_svc_id) == THREAD_STACK_PARAMETERS_CURRENT_SVC_ID);
static_assert(AMS_OFFSETOF(StackParameters, reserved_2c) == THREAD_STACK_PARAMETERS_RESERVED_2C);
static_assert(AMS_OFFSETOF(StackParameters, exception_flags) == THREAD_STACK_PARAMETERS_EXCEPTION_FLAGS);
static_assert(AMS_OFFSETOF(StackParameters, is_pinned) == THREAD_STACK_PARAMETERS_IS_PINNED);
static_assert(AMS_OFFSETOF(StackParameters, reserved_2f) == THREAD_STACK_PARAMETERS_RESERVED_2F);
static_assert(AMS_OFFSETOF(StackParameters, reserved_30) == THREAD_STACK_PARAMETERS_RESERVED_30);
static_assert(AMS_OFFSETOF(StackParameters, context) == THREAD_STACK_PARAMETERS_THREAD_CONTEXT);
static_assert(ExceptionFlag_IsCallingSvc == THREAD_EXCEPTION_FLAG_IS_CALLING_SVC);
static_assert(ExceptionFlag_IsInExceptionHandler == THREAD_EXCEPTION_FLAG_IS_IN_EXCEPTION_HANDLER);
static_assert(ExceptionFlag_IsFpuContextRestoreNeeded == THREAD_EXCEPTION_FLAG_IS_FPU_CONTEXT_RESTORE_NEEDED);
static_assert(ExceptionFlag_IsFpu64Bit == THREAD_EXCEPTION_FLAG_IS_FPU_64_BIT);
static_assert(ExceptionFlag_IsInUsermodeExceptionHandler == THREAD_EXCEPTION_FLAG_IS_IN_USERMODE_EXCEPTION_HANDLER);
static_assert(ExceptionFlag_IsInCacheMaintenanceOperation == THREAD_EXCEPTION_FLAG_IS_IN_CACHE_MAINTENANCE_OPERATION);
static_assert(ExceptionFlag_IsInTlbMaintenanceOperation == THREAD_EXCEPTION_FLAG_IS_IN_TLB_MAINTENANCE_OPERATION);
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
static_assert(ExceptionFlag_IsHardwareSingleStep == THREAD_EXCEPTION_FLAG_IS_HARDWARE_SINGLE_STEP);
#endif
struct QueueEntry {
private:
KThread *m_prev;
KThread *m_next;
public:
constexpr void Initialize() {
m_prev = nullptr;
m_next = nullptr;
}
constexpr KThread *GetPrev() const { return m_prev; }
constexpr KThread *GetNext() const { return m_next; }
constexpr void SetPrev(KThread *t) { m_prev = t; }
constexpr void SetNext(KThread *t) { m_next = t; }
};
using WaiterList = util::IntrusiveListBaseTraits<KThread>::ListType;
private:
static constexpr size_t PriorityInheritanceCountMax = 10;
union SyncObjectBuffer {
KSynchronizationObject *m_sync_objects[ams::svc::ArgumentHandleCountMax];
ams::svc::Handle m_handles[ams::svc::ArgumentHandleCountMax * (sizeof(KSynchronizationObject *) / sizeof(ams::svc::Handle))];
constexpr explicit SyncObjectBuffer(util::ConstantInitializeTag) : m_sync_objects() { /* ... */ }
explicit SyncObjectBuffer() { /* ... */ }
};
static_assert(sizeof(SyncObjectBuffer::m_sync_objects) == sizeof(SyncObjectBuffer::m_handles));
struct ConditionVariableComparator {
struct RedBlackKeyType {
uintptr_t m_cv_key;
s32 m_priority;
constexpr ALWAYS_INLINE uintptr_t GetConditionVariableKey() const {
return m_cv_key;
}
constexpr ALWAYS_INLINE s32 GetPriority() const {
return m_priority;
}
};
template<typename T> requires (std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>)
static constexpr ALWAYS_INLINE int Compare(const T &lhs, const KThread &rhs) {
const uintptr_t l_key = lhs.GetConditionVariableKey();
const uintptr_t r_key = rhs.GetConditionVariableKey();
if (l_key < r_key) {
/* Sort first by key */
return -1;
} else if (l_key == r_key && lhs.GetPriority() < rhs.GetPriority()) {
/* And then by priority. */
return -1;
} else {
return 1;
}
}
};
static_assert(ams::util::HasRedBlackKeyType<ConditionVariableComparator>);
static_assert(std::same_as<ams::util::RedBlackKeyType<ConditionVariableComparator, void>, ConditionVariableComparator::RedBlackKeyType>);
struct LockWithPriorityInheritanceComparator {
struct RedBlackKeyType {
s32 m_priority;
constexpr ALWAYS_INLINE s32 GetPriority() const {
return m_priority;
}
};
template<typename T> requires (std::same_as<T, KThread> || std::same_as<T, RedBlackKeyType>)
static constexpr ALWAYS_INLINE int Compare(const T &lhs, const KThread &rhs) {
if (lhs.GetPriority() < rhs.GetPriority()) {
/* Sort by priority. */
return -1;
} else {
return 1;
}
}
};
static_assert(ams::util::HasRedBlackKeyType<LockWithPriorityInheritanceComparator>);
static_assert(std::same_as<ams::util::RedBlackKeyType<LockWithPriorityInheritanceComparator, void>, LockWithPriorityInheritanceComparator::RedBlackKeyType>);
private:
util::IntrusiveListNode m_process_list_node;
util::IntrusiveRedBlackTreeNode m_condvar_arbiter_tree_node;
s32 m_priority;
using ConditionVariableThreadTreeTraits = util::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&KThread::m_condvar_arbiter_tree_node>;
using ConditionVariableThreadTree = ConditionVariableThreadTreeTraits::TreeType<ConditionVariableComparator>;
using LockWithPriorityInheritanceThreadTreeTraits = util::IntrusiveRedBlackTreeMemberTraitsDeferredAssert<&KThread::m_condvar_arbiter_tree_node>;
using LockWithPriorityInheritanceThreadTree = ConditionVariableThreadTreeTraits::TreeType<LockWithPriorityInheritanceComparator>;
public:
class LockWithPriorityInheritanceInfo : public KSlabAllocated<LockWithPriorityInheritanceInfo>, public util::IntrusiveListBaseNode<LockWithPriorityInheritanceInfo> {
private:
LockWithPriorityInheritanceThreadTree m_tree;
KProcessAddress m_address_key;
KThread *m_owner;
u32 m_waiter_count;
public:
constexpr LockWithPriorityInheritanceInfo() : m_tree(), m_address_key(Null<KProcessAddress>), m_owner(nullptr), m_waiter_count() {
/* ... */
}
static LockWithPriorityInheritanceInfo *Create(KProcessAddress address_key) {
/* Create a new lock info. */
auto *new_lock = LockWithPriorityInheritanceInfo::Allocate();
MESOSPHERE_ABORT_UNLESS(new_lock != nullptr);
/* Set the new lock's address key. */
new_lock->m_address_key = address_key;
return new_lock;
}
void SetOwner(KThread *new_owner) {
/* Set new owner. */
m_owner = new_owner;
}
void AddWaiter(KThread *waiter) {
/* Insert the waiter. */
m_tree.insert(*waiter);
m_waiter_count++;
waiter->SetWaitingLockInfo(this);
}
[[nodiscard]] bool RemoveWaiter(KThread *waiter) {
m_tree.erase(m_tree.iterator_to(*waiter));
waiter->SetWaitingLockInfo(nullptr);
return (--m_waiter_count) == 0;
}
KThread *GetHighestPriorityWaiter() { return std::addressof(m_tree.front()); }
const KThread *GetHighestPriorityWaiter() const { return std::addressof(m_tree.front()); }
LockWithPriorityInheritanceThreadTree &GetThreadTree() { return m_tree; }
const LockWithPriorityInheritanceThreadTree &GetThreadTree() const { return m_tree; }
constexpr KProcessAddress GetAddressKey() const { return m_address_key; }
constexpr KThread *GetOwner() const { return m_owner; }
constexpr u32 GetWaiterCount() const { return m_waiter_count; }
};
private:
using LockWithPriorityInheritanceInfoList = util::IntrusiveListBaseTraits<LockWithPriorityInheritanceInfo>::ListType;
ConditionVariableThreadTree *m_condvar_tree;
uintptr_t m_condvar_key;
alignas(16) KThreadContext::CallerSaveFpuRegisters m_caller_save_fpu_registers;
u64 m_virtual_affinity_mask;
KAffinityMask m_physical_affinity_mask;
u64 m_thread_id;
util::Atomic<s64> m_cpu_time;
KProcessAddress m_address_key;
KProcess *m_parent;
void *m_kernel_stack_top;
u32 *m_light_ipc_data;
KProcessAddress m_tls_address;
void *m_tls_heap_address;
KLightLock m_activity_pause_lock;
SyncObjectBuffer m_sync_object_buffer;
s64 m_schedule_count;
s64 m_last_scheduled_tick;
QueueEntry m_per_core_priority_queue_entry[cpu::NumCores];
KThreadQueue *m_wait_queue;
LockWithPriorityInheritanceInfoList m_held_lock_info_list;
LockWithPriorityInheritanceInfo *m_waiting_lock_info;
WaiterList m_pinned_waiter_list;
uintptr_t m_debug_params[3];
KAutoObject *m_closed_object;
u32 m_address_key_value;
u32 m_suspend_request_flags;
u32 m_suspend_allowed_flags;
s32 m_synced_index;
Result m_wait_result;
Result m_debug_exception_result;
s32 m_base_priority;
s32 m_base_priority_on_unpin;
s32 m_physical_ideal_core_id;
s32 m_virtual_ideal_core_id;
s32 m_num_kernel_waiters;
s32 m_current_core_id;
s32 m_core_id;
KAffinityMask m_original_physical_affinity_mask;
s32 m_original_physical_ideal_core_id;
s32 m_num_core_migration_disables;
ThreadState m_thread_state;
util::Atomic<bool> m_termination_requested;
bool m_wait_cancelled;
bool m_cancellable;
bool m_signaled;
bool m_initialized;
bool m_debug_attached;
s8 m_priority_inheritance_count;
bool m_resource_limit_release_hint;
public:
constexpr explicit KThread(util::ConstantInitializeTag)
: KAutoObjectWithSlabHeapAndContainer<KThread, KWorkerTask>(util::ConstantInitialize), KTimerTask(util::ConstantInitialize),
m_process_list_node{}, m_condvar_arbiter_tree_node{util::ConstantInitialize}, m_priority{-1}, m_condvar_tree{}, m_condvar_key{},
m_caller_save_fpu_registers{}, m_virtual_affinity_mask{}, m_physical_affinity_mask{}, m_thread_id{}, m_cpu_time{0}, m_address_key{Null<KProcessAddress>}, m_parent{},
m_kernel_stack_top{}, m_light_ipc_data{}, m_tls_address{Null<KProcessAddress>}, m_tls_heap_address{}, m_activity_pause_lock{}, m_sync_object_buffer{util::ConstantInitialize},
m_schedule_count{}, m_last_scheduled_tick{}, m_per_core_priority_queue_entry{}, m_wait_queue{}, m_held_lock_info_list{}, m_waiting_lock_info{},
m_pinned_waiter_list{}, m_debug_params{}, m_closed_object{}, m_address_key_value{}, m_suspend_request_flags{}, m_suspend_allowed_flags{}, m_synced_index{},
m_wait_result{svc::ResultNoSynchronizationObject()}, m_debug_exception_result{ResultSuccess()}, m_base_priority{}, m_base_priority_on_unpin{},
m_physical_ideal_core_id{}, m_virtual_ideal_core_id{}, m_num_kernel_waiters{}, m_current_core_id{}, m_core_id{}, m_original_physical_affinity_mask{},
m_original_physical_ideal_core_id{}, m_num_core_migration_disables{}, m_thread_state{}, m_termination_requested{false}, m_wait_cancelled{},
m_cancellable{}, m_signaled{}, m_initialized{}, m_debug_attached{}, m_priority_inheritance_count{}, m_resource_limit_release_hint{}
{
/* ... */
}
explicit KThread() : m_priority(-1), m_condvar_tree(nullptr), m_condvar_key(0), m_parent(nullptr), m_initialized(false) { /* ... */ }
Result Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type);
private:
static Result InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type);
public:
static Result InitializeKernelThread(KThread *thread, KThreadFunction func, uintptr_t arg, s32 prio, s32 virt_core) {
R_RETURN(InitializeThread(thread, func, arg, Null<KProcessAddress>, prio, virt_core, nullptr, ThreadType_Kernel));
}
static Result InitializeHighPriorityThread(KThread *thread, KThreadFunction func, uintptr_t arg) {
R_RETURN(InitializeThread(thread, func, arg, Null<KProcessAddress>, 0, GetCurrentCoreId(), nullptr, ThreadType_HighPriority));
}
static Result InitializeUserThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner) {
R_RETURN(InitializeThread(thread, func, arg, user_stack_top, prio, virt_core, owner, ThreadType_User));
}
static void ResumeThreadsSuspendedForInit();
private:
ALWAYS_INLINE StackParameters &GetStackParameters() { return *(reinterpret_cast< StackParameters *>(m_kernel_stack_top) - 1); }
ALWAYS_INLINE const StackParameters &GetStackParameters() const { return *(reinterpret_cast<const StackParameters *>(m_kernel_stack_top) - 1); }
public:
ALWAYS_INLINE s16 GetDisableDispatchCount() const {
MESOSPHERE_ASSERT_THIS();
return this->GetStackParameters().disable_count;
}
ALWAYS_INLINE void DisableDispatch() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() >= 0);
this->GetStackParameters().disable_count++;
}
ALWAYS_INLINE void EnableDispatch() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() > 0);
this->GetStackParameters().disable_count--;
}
void Pin();
void Unpin();
ALWAYS_INLINE void SaveDebugParams(uintptr_t param1, uintptr_t param2, uintptr_t param3) {
m_debug_params[0] = param1;
m_debug_params[1] = param2;
m_debug_params[2] = param3;
}
ALWAYS_INLINE void RestoreDebugParams(uintptr_t *param1, uintptr_t *param2, uintptr_t *param3) {
*param1 = m_debug_params[0];
*param2 = m_debug_params[1];
*param3 = m_debug_params[2];
}
NOINLINE void DisableCoreMigration();
NOINLINE void EnableCoreMigration();
private:
ALWAYS_INLINE void SetExceptionFlag(ExceptionFlag flag) {
MESOSPHERE_ASSERT_THIS();
this->GetStackParameters().exception_flags |= flag;
}
ALWAYS_INLINE void ClearExceptionFlag(ExceptionFlag flag) {
MESOSPHERE_ASSERT_THIS();
this->GetStackParameters().exception_flags &= ~flag;
}
ALWAYS_INLINE bool IsExceptionFlagSet(ExceptionFlag flag) const {
MESOSPHERE_ASSERT_THIS();
return this->GetStackParameters().exception_flags & flag;
}
public:
/* ALWAYS_INLINE void SetCallingSvc() { return this->SetExceptionFlag(ExceptionFlag_IsCallingSvc); } */
/* ALWAYS_INLINE void ClearCallingSvc() { return this->ClearExceptionFlag(ExceptionFlag_IsCallingSvc); } */
ALWAYS_INLINE bool IsCallingSvc() const { return this->IsExceptionFlagSet(ExceptionFlag_IsCallingSvc); }
ALWAYS_INLINE void SetInExceptionHandler() { return this->SetExceptionFlag(ExceptionFlag_IsInExceptionHandler); }
ALWAYS_INLINE void ClearInExceptionHandler() { return this->ClearExceptionFlag(ExceptionFlag_IsInExceptionHandler); }
ALWAYS_INLINE bool IsInExceptionHandler() const { return this->IsExceptionFlagSet(ExceptionFlag_IsInExceptionHandler); }
/* ALWAYS_INLINE void SetFpuContextRestoreNeeded() { return this->SetExceptionFlag(ExceptionFlag_IsFpuContextRestoreNeeded); } */
/* ALWAYS_INLINE void ClearFpuContextRestoreNeeded() { return this->ClearExceptionFlag(ExceptionFlag_IsFpuContextRestoreNeeded); } */
/* ALWAYS_INLINE bool IsFpuContextRestoreNeeded() const { return this->IsExceptionFlagSet(ExceptionFlag_IsFpuContextRestoreNeeded); } */
ALWAYS_INLINE void SetFpu64Bit() { return this->SetExceptionFlag(ExceptionFlag_IsFpu64Bit); }
/* ALWAYS_INLINE void ClearFpu64Bit() { return this->ClearExceptionFlag(ExceptionFlag_IsFpu64Bit); } */
/* ALWAYS_INLINE bool IsFpu64Bit() const { return this->IsExceptionFlagSet(ExceptionFlag_IsFpu64Bit); } */
ALWAYS_INLINE void SetInUsermodeExceptionHandler() { return this->SetExceptionFlag(ExceptionFlag_IsInUsermodeExceptionHandler); }
ALWAYS_INLINE void ClearInUsermodeExceptionHandler() { return this->ClearExceptionFlag(ExceptionFlag_IsInUsermodeExceptionHandler); }
ALWAYS_INLINE bool IsInUsermodeExceptionHandler() const { return this->IsExceptionFlagSet(ExceptionFlag_IsInUsermodeExceptionHandler); }
ALWAYS_INLINE void SetInCacheMaintenanceOperation() { return this->SetExceptionFlag(ExceptionFlag_IsInCacheMaintenanceOperation); }
ALWAYS_INLINE void ClearInCacheMaintenanceOperation() { return this->ClearExceptionFlag(ExceptionFlag_IsInCacheMaintenanceOperation); }
ALWAYS_INLINE bool IsInCacheMaintenanceOperation() const { return this->IsExceptionFlagSet(ExceptionFlag_IsInCacheMaintenanceOperation); }
ALWAYS_INLINE void SetInTlbMaintenanceOperation() { return this->SetExceptionFlag(ExceptionFlag_IsInTlbMaintenanceOperation); }
ALWAYS_INLINE void ClearInTlbMaintenanceOperation() { return this->ClearExceptionFlag(ExceptionFlag_IsInTlbMaintenanceOperation); }
ALWAYS_INLINE bool IsInTlbMaintenanceOperation() const { return this->IsExceptionFlagSet(ExceptionFlag_IsInTlbMaintenanceOperation); }
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
ALWAYS_INLINE void SetHardwareSingleStep() { return this->SetExceptionFlag(ExceptionFlag_IsHardwareSingleStep); }
ALWAYS_INLINE void ClearHardwareSingleStep() { return this->ClearExceptionFlag(ExceptionFlag_IsHardwareSingleStep); }
ALWAYS_INLINE bool IsHardwareSingleStep() const { return this->IsExceptionFlagSet(ExceptionFlag_IsHardwareSingleStep); }
#endif
ALWAYS_INLINE u8 GetSvcId() const {
MESOSPHERE_ASSERT_THIS();
return this->GetStackParameters().current_svc_id;
}
ALWAYS_INLINE void RegisterDpc(DpcFlag flag) {
this->GetStackParameters().dpc_flags |= flag;
}
ALWAYS_INLINE void ClearDpc(DpcFlag flag) {
this->GetStackParameters().dpc_flags &= ~flag;
}
ALWAYS_INLINE u8 GetDpc() const {
return this->GetStackParameters().dpc_flags.Load();
}
ALWAYS_INLINE bool HasDpc() const {
MESOSPHERE_ASSERT_THIS();
return this->GetDpc() != 0;
}
private:
void SetPinnedSvcPermissions();
void SetUnpinnedSvcPermissions();
void SetUsermodeExceptionSvcPermissions();
void ClearUsermodeExceptionSvcPermissions();
private:
void UpdateState();
ALWAYS_INLINE void AddHeldLock(LockWithPriorityInheritanceInfo *lock_info);
ALWAYS_INLINE LockWithPriorityInheritanceInfo *FindHeldLock(KProcessAddress address_key);
ALWAYS_INLINE void AddWaiterImpl(KThread *thread);
ALWAYS_INLINE void RemoveWaiterImpl(KThread *thread);
ALWAYS_INLINE static void RestorePriority(KThread *thread);
void StartTermination();
void FinishTermination();
void IncreaseBasePriority(s32 priority);
NOINLINE void SetState(ThreadState state);
public:
constexpr u64 GetThreadId() const { return m_thread_id; }
const KThreadContext &GetContext() const { return this->GetStackParameters().context; }
KThreadContext &GetContext() { return this->GetStackParameters().context; }
const auto &GetCallerSaveFpuRegisters() const { return m_caller_save_fpu_registers; }
auto &GetCallerSaveFpuRegisters() { return m_caller_save_fpu_registers; }
constexpr u64 GetVirtualAffinityMask() const { return m_virtual_affinity_mask; }
constexpr const KAffinityMask &GetAffinityMask() const { return m_physical_affinity_mask; }
Result GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
Result SetCoreMask(int32_t ideal_core, u64 affinity_mask);
Result GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask);
constexpr ThreadState GetState() const { return static_cast<ThreadState>(m_thread_state & ThreadState_Mask); }
constexpr ThreadState GetRawState() const { return m_thread_state; }
constexpr uintptr_t GetConditionVariableKey() const { return m_condvar_key; }
constexpr uintptr_t GetAddressArbiterKey() const { return m_condvar_key; }
constexpr void SetConditionVariable(ConditionVariableThreadTree *tree, KProcessAddress address, uintptr_t cv_key, u32 value) {
MESOSPHERE_ASSERT(m_waiting_lock_info == nullptr);
m_condvar_tree = tree;
m_condvar_key = cv_key;
m_address_key = address;
m_address_key_value = value;
}
constexpr void ClearConditionVariable() {
m_condvar_tree = nullptr;
}
constexpr bool IsWaitingForConditionVariable() const {
return m_condvar_tree != nullptr;
}
constexpr void SetAddressArbiter(ConditionVariableThreadTree *tree, uintptr_t address) {
MESOSPHERE_ASSERT(m_waiting_lock_info == nullptr);
m_condvar_tree = tree;
m_condvar_key = address;
}
constexpr void ClearAddressArbiter() {
m_condvar_tree = nullptr;
}
constexpr bool IsWaitingForAddressArbiter() const {
return m_condvar_tree != nullptr;
}
constexpr s32 GetIdealVirtualCore() const { return m_virtual_ideal_core_id; }
constexpr s32 GetIdealPhysicalCore() const { return m_physical_ideal_core_id; }
constexpr s32 GetActiveCore() const { return m_core_id; }
constexpr void SetActiveCore(s32 core) { m_core_id = core; }
constexpr ALWAYS_INLINE s32 GetCurrentCore() const { return m_current_core_id; }
constexpr void SetCurrentCore(s32 core) { m_current_core_id = core; }
constexpr s32 GetPriority() const { return m_priority; }
constexpr void SetPriority(s32 prio) { m_priority = prio; }
constexpr s32 GetBasePriority() const { return m_base_priority; }
constexpr QueueEntry &GetPriorityQueueEntry(s32 core) { return m_per_core_priority_queue_entry[core]; }
constexpr const QueueEntry &GetPriorityQueueEntry(s32 core) const { return m_per_core_priority_queue_entry[core]; }
constexpr ConditionVariableThreadTree *GetConditionVariableTree() const { return m_condvar_tree; }
constexpr s32 GetNumKernelWaiters() const { return m_num_kernel_waiters; }
void AddWaiter(KThread *thread);
void RemoveWaiter(KThread *thread);
KThread *RemoveWaiterByKey(bool *out_has_waiters, KProcessAddress key);
constexpr KProcessAddress GetAddressKey() const { return m_address_key; }
constexpr u32 GetAddressKeyValue() const { return m_address_key_value; }
constexpr void SetAddressKey(KProcessAddress key) { MESOSPHERE_ASSERT(m_waiting_lock_info == nullptr); m_address_key = key; }
constexpr void SetAddressKey(KProcessAddress key, u32 val) { MESOSPHERE_ASSERT(m_waiting_lock_info == nullptr); m_address_key = key; m_address_key_value = val; }
constexpr void SetWaitingLockInfo(LockWithPriorityInheritanceInfo *lock) { m_waiting_lock_info = lock; }
constexpr LockWithPriorityInheritanceInfo *GetWaitingLockInfo() { return m_waiting_lock_info; }
constexpr KThread *GetLockOwner() const { return m_waiting_lock_info != nullptr ? m_waiting_lock_info->GetOwner() : nullptr; }
constexpr void ClearWaitQueue() { m_wait_queue = nullptr; }
void BeginWait(KThreadQueue *queue);
void NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result);
void EndWait(Result wait_result);
void CancelWait(Result wait_result, bool cancel_timer_task);
constexpr void SetSyncedIndex(s32 index) { m_synced_index = index; }
constexpr s32 GetSyncedIndex() const { return m_synced_index; }
constexpr void SetWaitResult(Result wait_res) { m_wait_result = wait_res; }
constexpr Result GetWaitResult() const { return m_wait_result; }
constexpr void SetDebugExceptionResult(Result result) { m_debug_exception_result = result; }
constexpr Result GetDebugExceptionResult() const { return m_debug_exception_result; }
void WaitCancel();
bool IsWaitCancelled() const { return m_wait_cancelled; }
void ClearWaitCancelled() { m_wait_cancelled = false; }
void ClearCancellable() { m_cancellable = false; }
void SetCancellable() { m_cancellable = true; }
constexpr u32 *GetLightSessionData() const { return m_light_ipc_data; }
constexpr void SetLightSessionData(u32 *data) { m_light_ipc_data = data; }
constexpr s64 GetLastScheduledTick() const { return m_last_scheduled_tick; }
constexpr void SetLastScheduledTick(s64 tick) { m_last_scheduled_tick = tick; }
constexpr s64 GetYieldScheduleCount() const { return m_schedule_count; }
constexpr void SetYieldScheduleCount(s64 count) { m_schedule_count = count; }
constexpr KProcess *GetOwnerProcess() const { return m_parent; }
constexpr bool IsUserThread() const { return m_parent != nullptr; }
constexpr KProcessAddress GetThreadLocalRegionAddress() const { return m_tls_address; }
constexpr void *GetThreadLocalRegionHeapAddress() const { return m_tls_heap_address; }
constexpr KSynchronizationObject **GetSynchronizationObjectBuffer() { return std::addressof(m_sync_object_buffer.m_sync_objects[0]); }
constexpr ams::svc::Handle *GetHandleBuffer() { return std::addressof(m_sync_object_buffer.m_handles[sizeof(m_sync_object_buffer.m_sync_objects) / (sizeof(ams::svc::Handle)) - ams::svc::ArgumentHandleCountMax]); }
u16 GetUserDisableCount() const { return static_cast<ams::svc::ThreadLocalRegion *>(m_tls_heap_address)->disable_count; }
void SetInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(m_tls_heap_address)->interrupt_flag = 1; }
void ClearInterruptFlag() const { static_cast<ams::svc::ThreadLocalRegion *>(m_tls_heap_address)->interrupt_flag = 0; }
bool IsInUserCacheMaintenanceOperation() const { return static_cast<ams::svc::ThreadLocalRegion *>(m_tls_heap_address)->cache_maintenance_flag != 0; }
ALWAYS_INLINE KAutoObject *GetClosedObject() { return m_closed_object; }
ALWAYS_INLINE void SetClosedObject(KAutoObject *object) {
MESOSPHERE_ASSERT(object != nullptr);
/* Set the object to destroy. */
m_closed_object = object;
/* Schedule destruction DPC. */
if ((this->GetStackParameters().dpc_flags.Load<std::memory_order_relaxed>() & DpcFlag_PerformDestruction) == 0) {
this->RegisterDpc(DpcFlag_PerformDestruction);
}
}
ALWAYS_INLINE void DestroyClosedObjects() {
/* Destroy all objects that have been closed. */
if (KAutoObject *cur = m_closed_object; cur != nullptr) {
do {
/* Set our closed object as the next to close. */
m_closed_object = cur->GetNextClosedObject();
/* Destroy the current object. */
cur->Destroy();
/* Advance. */
cur = m_closed_object;
} while (cur != nullptr);
/* Clear the pending DPC. */
this->ClearDpc(DpcFlag_PerformDestruction);
}
}
constexpr void SetDebugAttached() { m_debug_attached = true; }
constexpr bool IsAttachedToDebugger() const { return m_debug_attached; }
void AddCpuTime(s32 core_id, s64 amount) {
m_cpu_time += amount;
/* TODO: Debug kernels track per-core tick counts. Should we? */
MESOSPHERE_UNUSED(core_id);
}
s64 GetCpuTime() const { return m_cpu_time.Load(); }
s64 GetCpuTime(s32 core_id) const {
MESOSPHERE_ABORT_UNLESS(0 <= core_id && core_id < static_cast<s32>(cpu::NumCores));
/* TODO: Debug kernels track per-core tick counts. Should we? */
return 0;
}
constexpr u32 GetSuspendFlags() const { return m_suspend_allowed_flags & m_suspend_request_flags; }
constexpr bool IsSuspended() const { return this->GetSuspendFlags() != 0; }
constexpr bool IsSuspendRequested(SuspendType type) const { return (m_suspend_request_flags & (1u << (util::ToUnderlying(ThreadState_SuspendShift) + util::ToUnderlying(type)))) != 0; }
constexpr bool IsSuspendRequested() const { return m_suspend_request_flags != 0; }
void RequestSuspend(SuspendType type);
void Resume(SuspendType type);
void TrySuspend();
void Continue();
Result SetActivity(ams::svc::ThreadActivity activity);
Result GetThreadContext3(ams::svc::ThreadContext *out);
void ContinueIfHasKernelWaiters() {
if (this->GetNumKernelWaiters() > 0) {
this->Continue();
}
}
void SetBasePriority(s32 priority);
Result SetPriorityToIdle();
Result Run();
void Exit();
Result Terminate();
ThreadState RequestTerminate();
Result Sleep(s64 timeout);
ALWAYS_INLINE void *GetStackTop() const { return reinterpret_cast<StackParameters *>(m_kernel_stack_top) - 1; }
ALWAYS_INLINE void *GetKernelStackTop() const { return m_kernel_stack_top; }
ALWAYS_INLINE bool IsTerminationRequested() const {
return m_termination_requested.Load() || this->GetRawState() == ThreadState_Terminated;
}
size_t GetKernelStackUsage() const;
void OnEnterUsermodeException();
void OnLeaveUsermodeException();
public:
/* Overridden parent functions. */
ALWAYS_INLINE u64 GetIdImpl() const { return this->GetThreadId(); }
ALWAYS_INLINE u64 GetId() const { return this->GetIdImpl(); }
bool IsInitialized() const { return m_initialized; }
uintptr_t GetPostDestroyArgument() const { return reinterpret_cast<uintptr_t>(m_parent) | (m_resource_limit_release_hint ? 1 : 0); }
static void PostDestroy(uintptr_t arg);
void Finalize();
virtual bool IsSignaled() const override;
void OnTimer();
void DoWorkerTaskImpl();
public:
static consteval bool IsKThreadStructurallyValid();
static KThread *GetThreadFromId(u64 thread_id);
static Result GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count);
using ConditionVariableThreadTreeType = ConditionVariableThreadTree;
};
static_assert(alignof(KThread) == 0x10);
consteval bool KThread::IsKThreadStructurallyValid() {
/* Check that the condition variable tree is valid. */
static_assert(ConditionVariableThreadTreeTraits::IsValid());
/* Check that the assembly offsets are valid. */
static_assert(AMS_OFFSETOF(KThread, m_kernel_stack_top) == THREAD_KERNEL_STACK_TOP);
return true;
}
static_assert(KThread::IsKThreadStructurallyValid());
class KScopedDisableDispatch {
public:
explicit ALWAYS_INLINE KScopedDisableDispatch() {
GetCurrentThread().DisableDispatch();
}
NOINLINE ~KScopedDisableDispatch();
};
ALWAYS_INLINE KExceptionContext *GetExceptionContext(KThread *thread) {
return reinterpret_cast<KExceptionContext *>(reinterpret_cast<uintptr_t>(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext));
}
ALWAYS_INLINE const KExceptionContext *GetExceptionContext(const KThread *thread) {
return reinterpret_cast<const KExceptionContext *>(reinterpret_cast<uintptr_t>(thread->GetKernelStackTop()) - sizeof(KThread::StackParameters) - sizeof(KExceptionContext));
}
ALWAYS_INLINE KProcess *GetCurrentProcessPointer() {
return GetCurrentThread().GetOwnerProcess();
}
ALWAYS_INLINE KProcess &GetCurrentProcess() {
return *GetCurrentProcessPointer();
}
ALWAYS_INLINE s32 GetCurrentCoreId() {
return GetCurrentThread().GetCurrentCore();
}
ALWAYS_INLINE void KTimerTask::OnTimer() {
static_cast<KThread *>(this)->OnTimer();
}
}
| 43,712
|
C++
|
.h
| 644
| 53.372671
| 225
| 0.591305
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,791
|
kern_k_current_context.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_current_context.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_select_cpu.hpp>
namespace ams::kern {
class KThread;
class KProcess;
class KScheduler;
ALWAYS_INLINE KThread *GetCurrentThreadPointer() {
return reinterpret_cast<KThread *>(cpu::GetCurrentThreadPointerValue());
}
ALWAYS_INLINE KThread &GetCurrentThread() {
return *GetCurrentThreadPointer();
}
ALWAYS_INLINE void SetCurrentThread(KThread *new_thread) {
cpu::SetCurrentThreadPointerValue(reinterpret_cast<uintptr_t>(new_thread));
}
ALWAYS_INLINE KProcess *GetCurrentProcessPointer();
ALWAYS_INLINE KProcess &GetCurrentProcess();
ALWAYS_INLINE s32 GetCurrentCoreId();
ALWAYS_INLINE KScheduler &GetCurrentScheduler();
}
| 1,376
|
C++
|
.h
| 35
| 35.485714
| 83
| 0.748498
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,792
|
kern_k_scoped_resource_reservation.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_scoped_resource_reservation.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_resource_limit.hpp>
#include <mesosphere/kern_k_process.hpp>
namespace ams::kern {
class KScopedResourceReservation {
private:
KResourceLimit *m_limit;
s64 m_value;
ams::svc::LimitableResource m_resource;
bool m_succeeded;
public:
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v, s64 timeout) : m_limit(l), m_value(v), m_resource(r) {
if (m_limit && m_value) {
m_succeeded = m_limit->Reserve(m_resource, m_value, timeout);
} else {
m_succeeded = true;
}
}
ALWAYS_INLINE KScopedResourceReservation(KResourceLimit *l, ams::svc::LimitableResource r, s64 v = 1) : m_limit(l), m_value(v), m_resource(r) {
if (m_limit && m_value) {
m_succeeded = m_limit->Reserve(m_resource, m_value);
} else {
m_succeeded = true;
}
}
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v, s64 t) : KScopedResourceReservation(p->GetResourceLimit(), r, v, t) { /* ... */ }
ALWAYS_INLINE KScopedResourceReservation(const KProcess *p, ams::svc::LimitableResource r, s64 v = 1) : KScopedResourceReservation(p->GetResourceLimit(), r, v) { /* ... */ }
ALWAYS_INLINE ~KScopedResourceReservation() {
if (m_limit && m_value && m_succeeded) {
m_limit->Release(m_resource, m_value);
}
}
ALWAYS_INLINE void Commit() {
m_limit = nullptr;
}
ALWAYS_INLINE bool Succeeded() const {
return m_succeeded;
}
};
}
| 2,560
|
C++
|
.h
| 56
| 35.821429
| 191
| 0.604567
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,793
|
kern_k_wait_object.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_wait_object.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_timer_task.hpp>
#include <mesosphere/kern_k_thread.hpp>
namespace ams::kern {
class KWaitObject {
private:
KThread::WaiterList m_wait_list;
KThread *m_next_thread;
public:
constexpr KWaitObject() : m_wait_list(), m_next_thread() { /* ... */ }
Result Synchronize(s64 timeout);
};
}
| 1,071
|
C++
|
.h
| 29
| 32.827586
| 82
| 0.703276
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,794
|
kern_select_page_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_select_page_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_k_page_table.hpp>
#include <mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp>
#include <mesosphere/arch/arm64/kern_k_process_page_table.hpp>
namespace ams::kern {
using ams::kern::arch::arm64::KPageTable;
using ams::kern::arch::arm64::KSupervisorPageTable;
using ams::kern::arch::arm64::KProcessPageTable;
}
#else
#error "Unknown architecture for KPageTable"
#endif
namespace ams::kern {
/* NOTE: These three functions (Operate, Operate, FinalizeUpdate) are virtual functions */
/* in Nintendo's kernel. We devirtualize them, since KPageTable is the only derived */
/* class, and this avoids unnecessary virtual function calls. */
static_assert(std::derived_from<KPageTable, KPageTableBase>);
ALWAYS_INLINE Result KPageTableBase::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
R_RETURN(static_cast<KPageTable *>(this)->OperateImpl(page_list, virt_addr, num_pages, phys_addr, is_pa_valid, properties, operation, reuse_ll));
}
ALWAYS_INLINE Result KPageTableBase::Operate(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) {
R_RETURN(static_cast<KPageTable *>(this)->OperateImpl(page_list, virt_addr, num_pages, page_group, properties, operation, reuse_ll));
}
ALWAYS_INLINE void KPageTableBase::FinalizeUpdate(PageLinkedList *page_list) {
static_cast<KPageTable *>(this)->FinalizeUpdateImpl(page_list);
}
}
| 2,472
|
C++
|
.h
| 44
| 52.090909
| 242
| 0.749793
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,795
|
kern_k_unsafe_memory.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_unsafe_memory.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
namespace ams::kern {
class KUnsafeMemory {
private:
mutable KLightLock m_lock;
size_t m_limit_size;
size_t m_current_size;
public:
constexpr KUnsafeMemory() : m_lock(), m_limit_size(), m_current_size() { /* ... */ }
bool TryReserve(size_t size) {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
/* Test for overflow. */
if (m_current_size > m_current_size + size) {
return false;
}
/* Test for limit allowance. */
if (m_current_size + size > m_limit_size) {
return false;
}
/* Reserve the size. */
m_current_size += size;
return true;
}
void Release(size_t size) {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
MESOSPHERE_ABORT_UNLESS(m_current_size >= size);
m_current_size -= size;
}
size_t GetLimitSize() const {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
return m_limit_size;
}
size_t GetCurrentSize() const {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
return m_current_size;
}
Result SetLimitSize(size_t size) {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
R_UNLESS(size >= m_current_size, svc::ResultLimitReached());
m_limit_size = size;
R_SUCCEED();
}
};
}
| 2,505
|
C++
|
.h
| 66
| 26.742424
| 96
| 0.549238
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,796
|
kern_k_auto_object_impls.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_auto_object_impls.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_class_token.hpp>
namespace ams::kern {
/* NOTE: This header is included after all other KAutoObjects. */
namespace impl {
template<typename T> requires std::derived_from<T, KAutoObject>
consteval bool IsAutoObjectInheritanceValidImpl() {
#define CLASS_TOKEN_HANDLER(CLASSNAME) \
if constexpr (std::same_as<T, CLASSNAME>) { \
if (T::GetStaticTypeObj().GetClassToken() != ::ams::kern::ClassToken<CLASSNAME>) { \
return false; \
} \
} else { \
if (T::GetStaticTypeObj().IsDerivedFrom(CLASSNAME::GetStaticTypeObj()) != std::derived_from<T, CLASSNAME>) { \
return false; \
} \
}
FOR_EACH_K_CLASS_TOKEN_OBJECT_TYPE(CLASS_TOKEN_HANDLER)
#undef CLASS_TOKEN_HANDLER
return true;
}
consteval bool IsEveryAutoObjectInheritanceValid() {
#define CLASS_TOKEN_HANDLER(CLASSNAME) if (!IsAutoObjectInheritanceValidImpl<CLASSNAME>()) { return false; }
FOR_EACH_K_CLASS_TOKEN_OBJECT_TYPE(CLASS_TOKEN_HANDLER)
#undef CLASS_TOKEN_HANDLER
return true;
}
static_assert(IsEveryAutoObjectInheritanceValid());
template<typename T>
concept IsAutoObjectWithSpecializedGetId = std::derived_from<T, KAutoObjectWithList> && requires (const T &t, const KAutoObjectWithList &l) {
{ t.GetIdImpl() } -> std::same_as<decltype(l.GetId())>;
};
template<typename T>
struct AutoObjectWithListComparatorImpl {
using RedBlackKeyType = u64;
static ALWAYS_INLINE RedBlackKeyType GetRedBlackKey(const RedBlackKeyType &v) { return v; }
static ALWAYS_INLINE RedBlackKeyType GetRedBlackKey(const KAutoObjectWithList &v) {
if constexpr (IsAutoObjectWithSpecializedGetId<T>) {
return static_cast<const T &>(v).GetIdImpl();
} else {
return reinterpret_cast<u64>(std::addressof(v));
}
}
template<typename U> requires (std::same_as<U, KAutoObjectWithList> || std::same_as<U, RedBlackKeyType>)
static ALWAYS_INLINE int Compare(const U &lhs, const KAutoObjectWithList &rhs) {
const u64 lid = GetRedBlackKey(lhs);
const u64 rid = GetRedBlackKey(rhs);
if (lid < rid) {
return -1;
} else if (lid > rid) {
return 1;
} else {
return 0;
}
}
};
template<typename T>
using AutoObjectWithListComparator = AutoObjectWithListComparatorImpl<typename std::conditional<IsAutoObjectWithSpecializedGetId<T>, T, KAutoObjectWithList>::type>;
template<typename T>
using TrueObjectContainerListType = typename KAutoObjectWithListContainer<T>::ListType<AutoObjectWithListComparator<T>>;
template<typename T>
ALWAYS_INLINE TrueObjectContainerListType<T> &GetTrueObjectContainerList(typename KAutoObjectWithListContainer<T>::DummyListType &l) {
static_assert(alignof(l) == alignof(impl::TrueObjectContainerListType<T>));
static_assert(sizeof(l) == sizeof(impl::TrueObjectContainerListType<T>));
return *reinterpret_cast<TrueObjectContainerListType<T> *>(std::addressof(l));
}
}
inline NOINLINE void KAutoObject::ScheduleDestruction() {
MESOSPHERE_ASSERT_THIS();
/* Set our object to destroy. */
m_next_closed_object = GetCurrentThread().GetClosedObject();
/* Set ourselves as the thread's next object to destroy. */
GetCurrentThread().SetClosedObject(this);
}
template<typename T>
class KAutoObjectWithListContainer<T>::ListAccessor : public impl::KAutoObjectWithListContainerBase::ListAccessorImpl<impl::TrueObjectContainerListType<T>> {
NON_COPYABLE(ListAccessor);
NON_MOVEABLE(ListAccessor);
private:
using BaseListAccessor = impl::KAutoObjectWithListContainerBase::ListAccessorImpl<impl::TrueObjectContainerListType<T>>;
public:
explicit ALWAYS_INLINE ListAccessor(KAutoObjectWithListContainer *container) : BaseListAccessor(container, impl::GetTrueObjectContainerList<T>(container->m_dummy_object_list)) { /* ... */ }
explicit ALWAYS_INLINE ListAccessor(KAutoObjectWithListContainer &container) : BaseListAccessor(container, impl::GetTrueObjectContainerList<T>(container.m_dummy_object_list)) { /* ... */ }
ALWAYS_INLINE ~ListAccessor() { /* ... */ }
};
template<typename T>
ALWAYS_INLINE void KAutoObjectWithListContainer<T>::Register(T *obj) {
return this->RegisterImpl(obj, impl::GetTrueObjectContainerList<T>(m_dummy_object_list));
}
template<typename T>
ALWAYS_INLINE void KAutoObjectWithListContainer<T>::Unregister(T *obj) {
return this->UnregisterImpl(obj, impl::GetTrueObjectContainerList<T>(m_dummy_object_list));
}
template<typename T>
ALWAYS_INLINE size_t KAutoObjectWithListContainer<T>::GetOwnedCountChecked(const KProcess *owner) {
return this->GetOwnedCountImpl<T>(owner, impl::GetTrueObjectContainerList<T>(m_dummy_object_list));
}
inline u64 KAutoObjectWithList::GetId() const {
#define CLASS_TOKEN_HANDLER(CLASSNAME) \
if constexpr (impl::IsAutoObjectWithSpecializedGetId<CLASSNAME>) { \
if (const CLASSNAME * const derived = this->DynamicCast<const CLASSNAME *>(); derived != nullptr) { \
return []<typename T>(const T * const t_derived) ALWAYS_INLINE_LAMBDA -> u64 { \
static_assert(std::same_as<T, CLASSNAME>); \
if constexpr (impl::IsAutoObjectWithSpecializedGetId<CLASSNAME>) { \
return impl::AutoObjectWithListComparator<CLASSNAME>::GetRedBlackKey(*t_derived); \
} else { \
AMS_ASSUME(false); \
} \
}(derived); \
} \
}
FOR_EACH_K_CLASS_TOKEN_OBJECT_TYPE(CLASS_TOKEN_HANDLER)
#undef CLASS_TOKEN_HANDLER
return impl::AutoObjectWithListComparator<KAutoObjectWithList>::GetRedBlackKey(*this);
}
}
| 8,491
|
C++
|
.h
| 132
| 53.136364
| 201
| 0.543828
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,797
|
kern_k_capabilities.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_capabilities.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_thread.hpp>
#include <mesosphere/kern_select_page_table.hpp>
#include <mesosphere/kern_svc.hpp>
namespace ams::kern {
class KCapabilities {
private:
static constexpr size_t InterruptIdCount = 0x400;
struct InterruptFlagSetTag{};
using InterruptFlagSet = util::BitFlagSet<InterruptIdCount, InterruptFlagSetTag>;
enum class CapabilityType : u32 {
CorePriority = (1u << 3) - 1,
SyscallMask = (1u << 4) - 1,
MapRange = (1u << 6) - 1,
MapIoPage = (1u << 7) - 1,
MapRegion = (1u << 10) - 1,
InterruptPair = (1u << 11) - 1,
ProgramType = (1u << 13) - 1,
KernelVersion = (1u << 14) - 1,
HandleTable = (1u << 15) - 1,
DebugFlags = (1u << 16) - 1,
Invalid = 0u,
Padding = ~0u,
};
using RawCapabilityValue = util::BitPack32::Field<0, BITSIZEOF(util::BitPack32), u32>;
static constexpr CapabilityType GetCapabilityType(const util::BitPack32 cap) {
const u32 value = cap.Get<RawCapabilityValue>();
return static_cast<CapabilityType>((~value & (value + 1)) - 1);
}
static constexpr u32 GetCapabilityFlag(CapabilityType type) {
return static_cast<u32>(type) + 1;
}
template<size_t Index, size_t Count, typename T = u32>
using Field = util::BitPack32::Field<Index, Count, T>;
#define DEFINE_FIELD(name, prev, ...) using name = Field<prev::Next, __VA_ARGS__>
template<CapabilityType Type>
static constexpr inline u32 CapabilityFlag = static_cast<u32>(Type) + 1;
template<CapabilityType Type>
static constexpr inline u32 CapabilityId = util::CountTrailingZeros<u32>(CapabilityFlag<Type>);
struct CorePriority {
using IdBits = Field<0, CapabilityId<CapabilityType::CorePriority> + 1>;
DEFINE_FIELD(LowestThreadPriority, IdBits, 6);
DEFINE_FIELD(HighestThreadPriority, LowestThreadPriority, 6);
DEFINE_FIELD(MinimumCoreId, HighestThreadPriority, 8);
DEFINE_FIELD(MaximumCoreId, MinimumCoreId, 8);
};
struct SyscallMask {
using IdBits = Field<0, CapabilityId<CapabilityType::SyscallMask> + 1>;
DEFINE_FIELD(Mask, IdBits, 24);
DEFINE_FIELD(Index, Mask, 3);
};
#if defined(MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES)
static constexpr u64 PhysicalMapAllowedMask = (1ul << 40) - 1;
#else
static constexpr u64 PhysicalMapAllowedMask = (1ul << 36) - 1;
#endif
struct MapRange {
using IdBits = Field<0, CapabilityId<CapabilityType::MapRange> + 1>;
DEFINE_FIELD(Address, IdBits, 24);
DEFINE_FIELD(ReadOnly, Address, 1, bool);
};
struct MapRangeSize {
using IdBits = Field<0, CapabilityId<CapabilityType::MapRange> + 1>;
DEFINE_FIELD(Pages, IdBits, 20);
#if defined(MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES)
DEFINE_FIELD(AddressHigh, Pages, 4);
DEFINE_FIELD(Normal, AddressHigh, 1, bool);
#else
DEFINE_FIELD(Reserved, Pages, 4);
DEFINE_FIELD(Normal, Reserved, 1, bool);
#endif
};
struct MapIoPage {
using IdBits = Field<0, CapabilityId<CapabilityType::MapIoPage> + 1>;
DEFINE_FIELD(Address, IdBits, 24);
};
enum class RegionType : u32 {
NoMapping = 0,
KernelTraceBuffer = 1,
OnMemoryBootImage = 2,
DTB = 3,
};
struct MapRegion {
using IdBits = Field<0, CapabilityId<CapabilityType::MapRegion> + 1>;
DEFINE_FIELD(Region0, IdBits, 6, RegionType);
DEFINE_FIELD(ReadOnly0, Region0, 1, bool);
DEFINE_FIELD(Region1, ReadOnly0, 6, RegionType);
DEFINE_FIELD(ReadOnly1, Region1, 1, bool);
DEFINE_FIELD(Region2, ReadOnly1, 6, RegionType);
DEFINE_FIELD(ReadOnly2, Region2, 1, bool);
};
static const u32 PaddingInterruptId = 0x3FF;
static_assert(PaddingInterruptId < InterruptIdCount);
struct InterruptPair {
using IdBits = Field<0, CapabilityId<CapabilityType::InterruptPair> + 1>;
DEFINE_FIELD(InterruptId0, IdBits, 10);
DEFINE_FIELD(InterruptId1, InterruptId0, 10);
};
struct ProgramType {
using IdBits = Field<0, CapabilityId<CapabilityType::ProgramType> + 1>;
DEFINE_FIELD(Type, IdBits, 3);
DEFINE_FIELD(Reserved, Type, 15);
};
struct KernelVersion {
using IdBits = Field<0, CapabilityId<CapabilityType::KernelVersion> + 1>;
DEFINE_FIELD(MinorVersion, IdBits, 4);
DEFINE_FIELD(MajorVersion, MinorVersion, 13);
};
struct HandleTable {
using IdBits = Field<0, CapabilityId<CapabilityType::HandleTable> + 1>;
DEFINE_FIELD(Size, IdBits, 10);
DEFINE_FIELD(Reserved, Size, 6);
};
struct DebugFlags {
using IdBits = Field<0, CapabilityId<CapabilityType::DebugFlags> + 1>;
DEFINE_FIELD(AllowDebug, IdBits, 1, bool);
DEFINE_FIELD(ForceDebugProd, AllowDebug, 1, bool);
DEFINE_FIELD(ForceDebug, ForceDebugProd, 1, bool);
DEFINE_FIELD(Reserved, ForceDebug, 12);
};
#undef DEFINE_FIELD
static constexpr u32 InitializeOnceFlags = CapabilityFlag<CapabilityType::CorePriority> |
CapabilityFlag<CapabilityType::ProgramType> |
CapabilityFlag<CapabilityType::KernelVersion> |
CapabilityFlag<CapabilityType::HandleTable> |
CapabilityFlag<CapabilityType::DebugFlags>;
private:
svc::SvcAccessFlagSet m_svc_access_flags;
InterruptFlagSet m_irq_access_flags;
u64 m_core_mask;
u64 m_phys_core_mask;
u64 m_priority_mask;
util::BitPack32 m_debug_capabilities;
s32 m_handle_table_size;
util::BitPack32 m_intended_kernel_version;
u32 m_program_type;
private:
constexpr bool SetSvcAllowed(u32 id) {
if (AMS_LIKELY(id < static_cast<u32>(m_svc_access_flags.GetCount()))) {
m_svc_access_flags[id] = true;
return true;
} else {
return false;
}
}
constexpr bool SetInterruptPermitted(u32 id) {
if (AMS_LIKELY(id < static_cast<u32>(m_irq_access_flags.GetCount()))) {
m_irq_access_flags[id] = true;
return true;
} else {
return false;
}
}
Result SetCorePriorityCapability(const util::BitPack32 cap);
Result SetSyscallMaskCapability(const util::BitPack32 cap, u32 &set_svc);
Result MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table);
Result MapIoPage(const util::BitPack32 cap, KProcessPageTable *page_table);
Result MapRegion(const util::BitPack32 cap, KProcessPageTable *page_table);
Result SetInterruptPairCapability(const util::BitPack32 cap);
Result SetProgramTypeCapability(const util::BitPack32 cap);
Result SetKernelVersionCapability(const util::BitPack32 cap);
Result SetHandleTableCapability(const util::BitPack32 cap);
Result SetDebugFlagsCapability(const util::BitPack32 cap);
template<typename F>
static Result ProcessMapRegionCapability(const util::BitPack32 cap, F f);
static Result CheckMapRegion(const util::BitPack32 cap);
Result SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table);
Result SetCapabilities(const u32 *caps, s32 num_caps, KProcessPageTable *page_table);
Result SetCapabilities(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table);
public:
constexpr explicit KCapabilities(util::ConstantInitializeTag) : m_svc_access_flags{}, m_irq_access_flags{}, m_core_mask{}, m_phys_core_mask{}, m_priority_mask{}, m_debug_capabilities{0}, m_handle_table_size{}, m_intended_kernel_version{}, m_program_type{} { /* ... */ }
KCapabilities() { /* ... */ }
Result Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table);
Result Initialize(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table);
static Result CheckCapabilities(svc::KUserPointer<const u32 *> user_caps, s32 num_caps);
constexpr u64 GetCoreMask() const { return m_core_mask; }
constexpr u64 GetPhysicalCoreMask() const { return m_phys_core_mask; }
constexpr u64 GetPriorityMask() const { return m_priority_mask; }
constexpr s32 GetHandleTableSize() const { return m_handle_table_size; }
constexpr const svc::SvcAccessFlagSet &GetSvcPermissions() const { return m_svc_access_flags; }
constexpr bool IsPermittedSvc(svc::SvcId id) const {
return (id < m_svc_access_flags.GetCount()) && m_svc_access_flags[id];
}
constexpr bool IsPermittedInterrupt(u32 id) const {
return (id < m_irq_access_flags.GetCount()) && m_irq_access_flags[id];
}
constexpr bool IsPermittedDebug() const {
return m_debug_capabilities.Get<DebugFlags::AllowDebug>();
}
constexpr bool CanForceDebugProd() const {
return m_debug_capabilities.Get<DebugFlags::ForceDebugProd>();
}
constexpr bool CanForceDebug() const {
return m_debug_capabilities.Get<DebugFlags::ForceDebug>();
}
constexpr u32 GetIntendedKernelMajorVersion() const { return m_intended_kernel_version.Get<KernelVersion::MajorVersion>(); }
constexpr u32 GetIntendedKernelMinorVersion() const { return m_intended_kernel_version.Get<KernelVersion::MinorVersion>(); }
constexpr u32 GetIntendedKernelVersion() const { return ams::svc::EncodeKernelVersion(this->GetIntendedKernelMajorVersion(), this->GetIntendedKernelMinorVersion()); }
};
}
| 12,185
|
C++
|
.h
| 216
| 42.032407
| 281
| 0.581634
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,798
|
kern_k_dynamic_slab_heap.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_slab_heap.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_slab_heap.hpp>
#include <mesosphere/kern_k_page_group.hpp>
#include <mesosphere/kern_k_memory_block.hpp>
#include <mesosphere/kern_k_dynamic_page_manager.hpp>
namespace ams::kern {
template<typename T, bool ClearNode = false>
class KDynamicSlabHeap : protected impl::KSlabHeapImpl {
NON_COPYABLE(KDynamicSlabHeap);
NON_MOVEABLE(KDynamicSlabHeap);
private:
using PageBuffer = KDynamicPageManager::PageBuffer;
private:
util::Atomic<size_t> m_used{0};
util::Atomic<size_t> m_peak{0};
util::Atomic<size_t> m_count{0};
KVirtualAddress m_address{Null<KVirtualAddress>};
size_t m_size{};
public:
constexpr KDynamicSlabHeap() = default;
constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return m_address; }
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
constexpr ALWAYS_INLINE size_t GetUsed() const { return m_used.Load(); }
constexpr ALWAYS_INLINE size_t GetPeak() const { return m_peak.Load(); }
constexpr ALWAYS_INLINE size_t GetCount() const { return m_count.Load(); }
constexpr ALWAYS_INLINE bool IsInRange(KVirtualAddress addr) const {
return this->GetAddress() <= addr && addr <= this->GetAddress() + this->GetSize() - 1;
}
ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, size_t num_objects) {
MESOSPHERE_ASSERT(page_allocator != nullptr);
/* Initialize members. */
m_address = page_allocator->GetAddress();
m_size = page_allocator->GetSize();
/* Initialize the base allocator. */
KSlabHeapImpl::Initialize();
/* Allocate until we have the correct number of objects. */
while (m_count.Load() < num_objects) {
auto *allocated = reinterpret_cast<T *>(page_allocator->Allocate());
MESOSPHERE_ABORT_UNLESS(allocated != nullptr);
for (size_t i = 0; i < sizeof(PageBuffer) / sizeof(T); i++) {
KSlabHeapImpl::Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
}
}
ALWAYS_INLINE T *Allocate(KDynamicPageManager *page_allocator) {
T *allocated = static_cast<T *>(KSlabHeapImpl::Allocate());
/* If we successfully allocated and we should clear the node, do so. */
if constexpr (ClearNode) {
if (AMS_LIKELY(allocated != nullptr)) {
reinterpret_cast<KSlabHeapImpl::Node *>(allocated)->next = nullptr;
}
}
/* If we fail to allocate, try to get a new page from our next allocator. */
if (AMS_UNLIKELY(allocated == nullptr) ) {
if (page_allocator != nullptr) {
allocated = reinterpret_cast<T *>(page_allocator->Allocate());
if (allocated != nullptr) {
/* If we succeeded in getting a page, free the rest to our slab. */
for (size_t i = 1; i < sizeof(PageBuffer) / sizeof(T); i++) {
KSlabHeapImpl::Free(allocated + i);
}
m_count += sizeof(PageBuffer) / sizeof(T);
}
}
}
if (AMS_LIKELY(allocated != nullptr)) {
/* Construct the object. */
std::construct_at(allocated);
/* Update our tracking. */
const size_t used = ++m_used;
size_t peak = m_peak.Load();
while (peak < used) {
if (m_peak.CompareExchangeWeak<std::memory_order_relaxed>(peak, used)) {
break;
}
}
}
return allocated;
}
ALWAYS_INLINE void Free(T *t) {
KSlabHeapImpl::Free(t);
--m_used;
}
};
}
| 5,043
|
C++
|
.h
| 102
| 35.343137
| 102
| 0.546821
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,799
|
kern_k_event_info.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_event_info.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KEventInfo : public KSlabAllocated<KEventInfo>, public util::IntrusiveListBaseNode<KEventInfo> {
public:
struct InfoCreateThread {
u32 thread_id;
uintptr_t tls_address;
};
struct InfoExitProcess {
ams::svc::ProcessExitReason reason;
};
struct InfoExitThread {
ams::svc::ThreadExitReason reason;
};
struct InfoException {
ams::svc::DebugException exception_type;
s32 exception_data_count;
uintptr_t exception_address;
uintptr_t exception_data[std::max<size_t>(4, cpu::NumCores)];
};
struct InfoSystemCall {
s64 tick;
s32 id;
};
public:
ams::svc::DebugEvent event;
u32 thread_id;
u32 flags;
bool is_attached;
bool continue_flag;
bool ignore_continue;
bool close_once;
union {
InfoCreateThread create_thread;
InfoExitProcess exit_process;
InfoExitThread exit_thread;
InfoException exception;
InfoSystemCall system_call;
} info;
KThread *debug_thread;
public:
explicit KEventInfo() : is_attached(), continue_flag(), ignore_continue() { /* ... */ }
~KEventInfo() { /* ... */ }
};
}
| 2,278
|
C++
|
.h
| 62
| 26.758065
| 106
| 0.589407
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,800
|
kern_k_memory_manager.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_memory_manager.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
#include <mesosphere/kern_k_page_heap.hpp>
namespace ams::kern {
class KPageGroup;
class KMemoryManager {
public:
enum Pool {
Pool_Application = 0,
Pool_Applet = 1,
Pool_System = 2,
Pool_SystemNonSecure = 3,
Pool_Count,
Pool_Shift = 4,
Pool_Mask = (0xF << Pool_Shift),
/* Aliases. */
Pool_Unsafe = Pool_Application,
Pool_Secure = Pool_System,
};
enum Direction {
Direction_FromFront = 0,
Direction_FromBack = 1,
Direction_Shift = 0,
Direction_Mask = (0xF << Direction_Shift),
};
static constexpr size_t MaxManagerCount = 10;
private:
class Impl {
private:
using RefCount = u16;
public:
static size_t CalculateManagementOverheadSize(size_t region_size);
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
return (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
}
private:
KPageHeap m_heap;
RefCount *m_page_reference_counts;
KVirtualAddress m_management_region;
Pool m_pool;
Impl *m_next;
Impl *m_prev;
public:
Impl() : m_heap(), m_page_reference_counts(), m_management_region(Null<KVirtualAddress>), m_pool(), m_next(), m_prev() { /* ... */ }
size_t Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p);
KPhysicalAddress AllocateBlock(s32 index, bool random) { return m_heap.AllocateBlock(index, random); }
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) { return m_heap.AllocateAligned(index, num_pages, align_pages); }
void Free(KPhysicalAddress addr, size_t num_pages) { m_heap.Free(addr, num_pages); }
void SetInitialUsedHeapSize(size_t reserved_size) { m_heap.SetInitialUsedSize(reserved_size); }
void InitializeOptimizedMemory() { std::memset(GetVoidPointer(m_management_region), 0, CalculateOptimizedProcessOverheadSize(m_heap.GetSize())); }
void TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages);
void TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages);
bool ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern);
constexpr Pool GetPool() const { return m_pool; }
constexpr size_t GetSize() const { return m_heap.GetSize(); }
constexpr KPhysicalAddress GetEndAddress() const { return m_heap.GetEndAddress(); }
size_t GetFreeSize() const { return m_heap.GetFreeSize(); }
void DumpFreeList() const { return m_heap.DumpFreeList(); }
constexpr size_t GetPageOffset(KPhysicalAddress address) const { return m_heap.GetPageOffset(address); }
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress address) const { return m_heap.GetPageOffsetToEnd(address); }
constexpr void SetNext(Impl *n) { m_next = n; }
constexpr void SetPrev(Impl *n) { m_prev = n; }
constexpr Impl *GetNext() const { return m_next; }
constexpr Impl *GetPrev() const { return m_prev; }
void OpenFirst(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
const RefCount ref_count = (++m_page_reference_counts[index]);
MESOSPHERE_ABORT_UNLESS(ref_count == 1);
index++;
}
}
void Open(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
while (index < end) {
const RefCount ref_count = (++m_page_reference_counts[index]);
MESOSPHERE_ABORT_UNLESS(ref_count > 1);
index++;
}
}
void Close(KPhysicalAddress address, size_t num_pages) {
size_t index = this->GetPageOffset(address);
const size_t end = index + num_pages;
size_t free_start = 0;
size_t free_count = 0;
while (index < end) {
MESOSPHERE_ABORT_UNLESS(m_page_reference_counts[index] > 0);
const RefCount ref_count = (--m_page_reference_counts[index]);
/* Keep track of how many zero refcounts we see in a row, to minimize calls to free. */
if (ref_count == 0) {
if (free_count > 0) {
free_count++;
} else {
free_start = index;
free_count = 1;
}
} else {
if (free_count > 0) {
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
free_count = 0;
}
}
index++;
}
if (free_count > 0) {
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
}
}
};
private:
KLightLock m_pool_locks[Pool_Count];
Impl *m_pool_managers_head[Pool_Count];
Impl *m_pool_managers_tail[Pool_Count];
Impl m_managers[MaxManagerCount];
size_t m_num_managers;
u64 m_optimized_process_ids[Pool_Count];
bool m_has_optimized_process[Pool_Count];
s32 m_min_heap_indexes[Pool_Count];
private:
Impl &GetManager(KPhysicalAddress address) {
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
}
const Impl &GetManager(KPhysicalAddress address) const {
return m_managers[KMemoryLayout::GetPhysicalLinearRegion(address).GetAttributes()];
}
constexpr Impl *GetFirstManager(Pool pool, Direction dir) {
return dir == Direction_FromBack ? m_pool_managers_tail[pool] : m_pool_managers_head[pool];
}
constexpr Impl *GetNextManager(Impl *cur, Direction dir) {
if (dir == Direction_FromBack) {
return cur->GetPrev();
} else {
return cur->GetNext();
}
}
Result AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index);
public:
KMemoryManager()
: m_pool_locks(), m_pool_managers_head(), m_pool_managers_tail(), m_managers(), m_num_managers(), m_optimized_process_ids(), m_has_optimized_process(), m_min_heap_indexes()
{
/* ... */
}
NOINLINE void Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts);
NOINLINE Result InitializeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE void FinalizeOptimizedMemory(u64 process_id, Pool pool);
NOINLINE KPhysicalAddress AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option);
NOINLINE Result AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern);
Pool GetPool(KPhysicalAddress address) const {
return this->GetManager(address).GetPool();
}
void Open(KPhysicalAddress address, size_t num_pages) {
/* Repeatedly open references until we've done so for all pages. */
while (num_pages) {
auto &manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[manager.GetPool()]);
manager.Open(address, cur_pages);
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
void OpenFirst(KPhysicalAddress address, size_t num_pages) {
/* Repeatedly open references until we've done so for all pages. */
while (num_pages) {
auto &manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[manager.GetPool()]);
manager.OpenFirst(address, cur_pages);
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
void Close(KPhysicalAddress address, size_t num_pages) {
/* Repeatedly close references until we've done so for all pages. */
while (num_pages) {
auto &manager = this->GetManager(address);
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
{
KScopedLightLock lk(m_pool_locks[manager.GetPool()]);
manager.Close(address, cur_pages);
}
num_pages -= cur_pages;
address += cur_pages * PageSize;
}
}
size_t GetSize() {
size_t total = 0;
for (size_t i = 0; i < m_num_managers; i++) {
total += m_managers[i].GetSize();
}
return total;
}
size_t GetSize(Pool pool) {
constexpr Direction GetSizeDirection = Direction_FromFront;
size_t total = 0;
for (auto *manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; manager = this->GetNextManager(manager, GetSizeDirection)) {
total += manager->GetSize();
}
return total;
}
size_t GetFreeSize() {
size_t total = 0;
for (size_t i = 0; i < m_num_managers; i++) {
KScopedLightLock lk(m_pool_locks[m_managers[i].GetPool()]);
total += m_managers[i].GetFreeSize();
}
return total;
}
size_t GetFreeSize(Pool pool) {
KScopedLightLock lk(m_pool_locks[pool]);
constexpr Direction GetSizeDirection = Direction_FromFront;
size_t total = 0;
for (auto *manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr; manager = this->GetNextManager(manager, GetSizeDirection)) {
total += manager->GetFreeSize();
}
return total;
}
void DumpFreeList(Pool pool) {
KScopedLightLock lk(m_pool_locks[pool]);
constexpr Direction DumpDirection = Direction_FromFront;
for (auto *manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr; manager = this->GetNextManager(manager, DumpDirection)) {
manager->DumpFreeList();
}
}
size_t GetMinimumAlignment(Pool pool) {
return KPageHeap::GetBlockSize(m_min_heap_indexes[pool]);
}
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
return Impl::CalculateManagementOverheadSize(region_size);
}
static constexpr ALWAYS_INLINE u32 EncodeOption(Pool pool, Direction dir) {
return (pool << Pool_Shift) | (dir << Direction_Shift);
}
static constexpr ALWAYS_INLINE Pool GetPool(u32 option) {
return static_cast<Pool>((option & Pool_Mask) >> Pool_Shift);
}
static constexpr ALWAYS_INLINE Direction GetDirection(u32 option) {
return static_cast<Direction>((option & Direction_Mask) >> Direction_Shift);
}
static constexpr ALWAYS_INLINE std::tuple<Pool, Direction> DecodeOption(u32 option) {
return std::make_tuple(GetPool(option), GetDirection(option));
}
};
}
| 14,660
|
C++
|
.h
| 269
| 36.866171
| 188
| 0.53004
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,801
|
kern_k_scoped_lock.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_scoped_lock.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
template<typename T>
concept KLockable = !std::is_reference<T>::value && requires (T &t) {
{ t.Lock() } -> std::same_as<void>;
{ t.Unlock() } -> std::same_as<void>;
};
template<typename T> requires KLockable<T>
class KScopedLock {
NON_COPYABLE(KScopedLock);
NON_MOVEABLE(KScopedLock);
private:
T &m_lock;
public:
explicit ALWAYS_INLINE KScopedLock(T &l) : m_lock(l) { m_lock.Lock(); }
explicit ALWAYS_INLINE KScopedLock(T *l) : KScopedLock(*l) { /* ... */ }
ALWAYS_INLINE ~KScopedLock() { m_lock.Unlock(); }
};
}
| 1,347
|
C++
|
.h
| 35
| 33.542857
| 84
| 0.662844
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,802
|
kern_k_unused_slab_memory.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_unused_slab_memory.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern {
/* Utilities to allocate/free memory from the "unused" gaps between slab heaps. */
/* See KTargetSystem::IsDynamicResourceLimitsEnabled() usage for more context. */
KVirtualAddress AllocateUnusedSlabMemory(size_t size, size_t alignment);
void FreeUnusedSlabMemory(KVirtualAddress address, size_t size);
}
| 1,079
|
C++
|
.h
| 24
| 42.583333
| 86
| 0.76616
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,803
|
kern_k_dynamic_page_manager.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_dynamic_page_manager.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_slab_heap.hpp>
#include <mesosphere/kern_k_page_group.hpp>
#include <mesosphere/kern_k_memory_block.hpp>
#include <mesosphere/kern_k_page_bitmap.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern {
class KDynamicPageManager {
public:
class PageBuffer {
private:
u8 m_buffer[PageSize];
};
static_assert(sizeof(PageBuffer) == PageSize);
private:
KSpinLock m_lock;
KPageBitmap m_page_bitmap;
size_t m_used;
size_t m_peak;
size_t m_count;
KVirtualAddress m_address;
KVirtualAddress m_aligned_address;
size_t m_size;
public:
KDynamicPageManager() : m_lock(), m_page_bitmap(), m_used(), m_peak(), m_count(), m_address(Null<KVirtualAddress>), m_aligned_address(Null<KVirtualAddress>), m_size() { /* ... */ }
Result Initialize(KVirtualAddress memory, size_t size, size_t align) {
/* We need to have positive size. */
R_UNLESS(size > 0, svc::ResultOutOfMemory());
/* Set addresses. */
m_address = memory;
m_aligned_address = util::AlignDown(GetInteger(memory), align);
/* Calculate extents. */
const size_t managed_size = m_address + size - m_aligned_address;
const size_t overhead_size = util::AlignUp(KPageBitmap::CalculateManagementOverheadSize(managed_size / sizeof(PageBuffer)), sizeof(PageBuffer));
R_UNLESS(overhead_size < size, svc::ResultOutOfMemory());
/* Set tracking fields. */
m_size = util::AlignDown(size - overhead_size, sizeof(PageBuffer));
m_count = m_size / sizeof(PageBuffer);
/* Clear the management region. */
u64 *management_ptr = GetPointer<u64>(m_address + size - overhead_size);
std::memset(management_ptr, 0, overhead_size);
/* Initialize the bitmap. */
const size_t allocatable_region_size = (GetInteger(m_address) + size - overhead_size) - GetInteger(m_aligned_address);
MESOSPHERE_ABORT_UNLESS(allocatable_region_size >= sizeof(PageBuffer));
m_page_bitmap.Initialize(management_ptr, allocatable_region_size / sizeof(PageBuffer));
/* Free the pages to the bitmap. */
for (size_t i = 0; i < m_count; i++) {
/* Ensure the freed page is all-zero. */
cpu::ClearPageToZero(GetPointer<PageBuffer>(m_address) + i);
/* Set the bit for the free page. */
m_page_bitmap.SetBit((GetInteger(m_address) + (i * sizeof(PageBuffer)) - GetInteger(m_aligned_address)) / sizeof(PageBuffer));
}
R_SUCCEED();
}
constexpr KVirtualAddress GetAddress() const { return m_address; }
constexpr size_t GetSize() const { return m_size; }
constexpr size_t GetUsed() const { return m_used; }
constexpr size_t GetPeak() const { return m_peak; }
constexpr size_t GetCount() const { return m_count; }
PageBuffer *Allocate() {
/* Take the lock. */
KScopedInterruptDisable di;
KScopedSpinLock lk(m_lock);
/* Find a random free block. */
ssize_t soffset = m_page_bitmap.FindFreeBlock(true);
if (AMS_UNLIKELY(soffset < 0)) {
return nullptr;
}
const size_t offset = static_cast<size_t>(soffset);
/* Update our tracking. */
m_page_bitmap.ClearBit(offset);
m_peak = std::max(m_peak, (++m_used));
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
PageBuffer *Allocate(size_t count) {
/* Take the lock. */
KScopedInterruptDisable di;
KScopedSpinLock lk(m_lock);
/* Find a random free block. */
ssize_t soffset = m_page_bitmap.FindFreeRange(count);
if (AMS_UNLIKELY(soffset < 0)) {
return nullptr;
}
const size_t offset = static_cast<size_t>(soffset);
/* Update our tracking. */
m_page_bitmap.ClearRange(offset, count);
m_used += count;
m_peak = std::max(m_peak, m_used);
return GetPointer<PageBuffer>(m_aligned_address) + offset;
}
void Free(PageBuffer *pb) {
/* Ensure all pages in the heap are zero. */
cpu::ClearPageToZero(pb);
/* Take the lock. */
KScopedInterruptDisable di;
KScopedSpinLock lk(m_lock);
/* Set the bit for the free page. */
size_t offset = (reinterpret_cast<uintptr_t>(pb) - GetInteger(m_aligned_address)) / sizeof(PageBuffer);
m_page_bitmap.SetBit(offset);
/* Decrement our used count. */
--m_used;
}
};
}
| 6,056
|
C++
|
.h
| 121
| 37
| 192
| 0.566785
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,804
|
kern_select_system_control.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_select_system_control.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_system_control_base.hpp>
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
#include <mesosphere/board/nintendo/nx/kern_k_system_control.hpp>
namespace ams::kern {
using ams::kern::board::nintendo::nx::KSystemControl;
}
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#include <mesosphere/board/qemu/virt/kern_k_system_control.hpp>
namespace ams::kern {
using ams::kern::board::qemu::virt::KSystemControl;
}
#else
#error "Unknown board for KSystemControl"
#endif
namespace ams::kern {
ALWAYS_INLINE u32 KSystemControlBase::ReadRegisterPrivileged(ams::svc::PhysicalAddress address) {
u32 v;
KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0x00000000u, 0);
return v;
}
ALWAYS_INLINE void KSystemControlBase::WriteRegisterPrivileged(ams::svc::PhysicalAddress address, u32 value) {
u32 v;
KSystemControl::ReadWriteRegisterPrivileged(std::addressof(v), address, 0xFFFFFFFFu, value);
}
}
| 1,708
|
C++
|
.h
| 42
| 36.738095
| 114
| 0.742909
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,805
|
kern_k_memory_layout.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_memory_layout.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/init/kern_init_page_table_select.hpp>
#include <mesosphere/kern_k_memory_region.hpp>
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
#include <mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp>
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
#include <mesosphere/board/qemu/virt/kern_k_memory_layout.hpp>
#else
#error "Unknown board for KMemoryLayout"
#endif
namespace ams::kern {
constexpr size_t KernelAslrAlignment = 2_MB;
constexpr size_t KernelVirtualAddressSpaceWidth = size_t(1ul) << 39ul;
constexpr size_t KernelPhysicalAddressSpaceWidth = size_t(1ul) << 48ul;
constexpr size_t KernelVirtualAddressSpaceBase = 0ul - KernelVirtualAddressSpaceWidth;
constexpr size_t KernelVirtualAddressSpaceEnd = KernelVirtualAddressSpaceBase + (KernelVirtualAddressSpaceWidth - KernelAslrAlignment);
constexpr size_t KernelVirtualAddressSpaceLast = KernelVirtualAddressSpaceEnd - 1ul;
constexpr size_t KernelVirtualAddressSpaceSize = KernelVirtualAddressSpaceEnd - KernelVirtualAddressSpaceBase;
constexpr size_t KernelPhysicalAddressSpaceBase = 0ul;
constexpr size_t KernelPhysicalAddressSpaceEnd = KernelPhysicalAddressSpaceBase + KernelPhysicalAddressSpaceWidth;
constexpr size_t KernelPhysicalAddressSpaceLast = KernelPhysicalAddressSpaceEnd - 1ul;
constexpr size_t KernelPhysicalAddressSpaceSize = KernelPhysicalAddressSpaceEnd - KernelPhysicalAddressSpaceBase;
constexpr size_t KernelPageTableHeapSize = init::KInitialPageTable::GetMaximumOverheadSize(kern::MainMemorySizeMax);
constexpr size_t KernelInitialPageHeapSize = 128_KB;
constexpr size_t KernelSlabHeapDataSize = 5_MB;
constexpr size_t KernelSlabHeapGapsSizeMax = 2_MB - 64_KB;
constexpr size_t KernelSlabHeapSize = KernelSlabHeapDataSize + KernelSlabHeapGapsSizeMax;
/* NOTE: This is calculated from KThread slab counts, assuming KThread size <= 0x800. */
constexpr size_t KernelPageBufferHeapSize = 0x3E0000;
constexpr size_t KernelSlabHeapAdditionalSize = 0x148000;
constexpr size_t KernelPageBufferAdditionalSize = 0x33C000;
constexpr size_t KernelResourceSize = KernelPageTableHeapSize + KernelInitialPageHeapSize + KernelSlabHeapSize + KernelPageBufferHeapSize;
class KMemoryLayout {
private:
static constinit inline uintptr_t s_linear_phys_to_virt_diff;
static constinit inline uintptr_t s_linear_virt_to_phys_diff;
static constinit inline KMemoryRegionTree s_virtual_tree;
static constinit inline KMemoryRegionTree s_physical_tree;
static constinit inline KMemoryRegionTree s_virtual_linear_tree;
static constinit inline KMemoryRegionTree s_physical_linear_tree;
private:
template<typename AddressType> requires IsKTypedAddress<AddressType>
static ALWAYS_INLINE bool IsTypedAddress(const KMemoryRegion *®ion, AddressType address, KMemoryRegionTree &tree, KMemoryRegionType type) {
/* Check if the cached region already contains the address. */
if (region != nullptr && region->Contains(GetInteger(address))) {
return true;
}
/* Find the containing region, and update the cache. */
if (const KMemoryRegion *found = tree.Find(GetInteger(address)); found != nullptr && found->IsDerivedFrom(type)) {
region = found;
return true;
} else {
return false;
}
}
template<typename AddressType> requires IsKTypedAddress<AddressType>
static ALWAYS_INLINE bool IsTypedAddress(const KMemoryRegion *®ion, AddressType address, size_t size, KMemoryRegionTree &tree, KMemoryRegionType type) {
/* Get the end of the checked region. */
const uintptr_t last_address = GetInteger(address) + size - 1;
/* Walk the tree to verify the region is correct. */
const KMemoryRegion *cur = (region != nullptr && region->Contains(GetInteger(address))) ? region : tree.Find(GetInteger(address));
while (cur != nullptr && cur->IsDerivedFrom(type)) {
if (last_address <= cur->GetLastAddress()) {
region = cur;
return true;
}
cur = cur->GetNext();
}
return false;
}
template<typename AddressType> requires IsKTypedAddress<AddressType>
static ALWAYS_INLINE const KMemoryRegion *Find(AddressType address, const KMemoryRegionTree &tree) {
return tree.Find(GetInteger(address));
}
static ALWAYS_INLINE KMemoryRegion &Dereference(KMemoryRegion *region) {
MESOSPHERE_INIT_ABORT_UNLESS(region != nullptr);
return *region;
}
static ALWAYS_INLINE const KMemoryRegion &Dereference(const KMemoryRegion *region) {
MESOSPHERE_INIT_ABORT_UNLESS(region != nullptr);
return *region;
}
static ALWAYS_INLINE KVirtualAddress GetStackTopAddress(s32 core_id, KMemoryRegionType type) {
const auto ®ion = Dereference(GetVirtualMemoryRegionTree().FindByTypeAndAttribute(type, static_cast<u32>(core_id)));
MESOSPHERE_INIT_ABORT_UNLESS(region.GetEndAddress() != 0);
return region.GetEndAddress();
}
public:
static ALWAYS_INLINE KMemoryRegionTree &GetVirtualMemoryRegionTree() { return s_virtual_tree; }
static ALWAYS_INLINE KMemoryRegionTree &GetPhysicalMemoryRegionTree() { return s_physical_tree; }
static ALWAYS_INLINE KMemoryRegionTree &GetVirtualLinearMemoryRegionTree() { return s_virtual_linear_tree; }
static ALWAYS_INLINE KMemoryRegionTree &GetPhysicalLinearMemoryRegionTree() { return s_physical_linear_tree; }
static ALWAYS_INLINE KVirtualAddress GetLinearVirtualAddress(KPhysicalAddress address) { return GetInteger(address) + s_linear_phys_to_virt_diff; }
static ALWAYS_INLINE KPhysicalAddress GetLinearPhysicalAddress(KVirtualAddress address) { return GetInteger(address) + s_linear_virt_to_phys_diff; }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion *Find(KVirtualAddress address) { return Find(address, GetVirtualMemoryRegionTree()); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion *Find(KPhysicalAddress address) { return Find(address, GetPhysicalMemoryRegionTree()); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion *FindLinear(KVirtualAddress address) { return Find(address, GetVirtualLinearMemoryRegionTree()); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion *FindLinear(KPhysicalAddress address) { return Find(address, GetPhysicalLinearMemoryRegionTree()); }
static MESOSPHERE_NOINLINE_IF_DEBUG KVirtualAddress GetMainStackTopAddress(s32 core_id) { return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscMainStack); }
static MESOSPHERE_NOINLINE_IF_DEBUG KVirtualAddress GetIdleStackTopAddress(s32 core_id) { return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscIdleStack); }
static MESOSPHERE_NOINLINE_IF_DEBUG KVirtualAddress GetExceptionStackTopAddress(s32 core_id) { return GetStackTopAddress(core_id, KMemoryRegionType_KernelMiscExceptionStack); }
static MESOSPHERE_NOINLINE_IF_DEBUG KVirtualAddress GetSlabRegionAddress() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)).GetAddress(); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetDeviceRegion(KMemoryRegionType type) { return Dereference(GetPhysicalMemoryRegionTree().FindFirstDerived(type)); }
static KPhysicalAddress GetDevicePhysicalAddress(KMemoryRegionType type) { return GetDeviceRegion(type).GetAddress(); }
static KVirtualAddress GetDeviceVirtualAddress(KMemoryRegionType type) { return GetDeviceRegion(type).GetPairAddress(); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetPoolManagementRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramPoolManagement)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetPageTableHeapRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelPtHeap)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetKernelStackRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelStack)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetTempRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelTemp)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetSlabRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_KernelSlab)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetKernelTraceBufferRegion() { return Dereference(GetVirtualLinearMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelTraceBuffer)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetSecureAppletMemoryRegion() { return Dereference(GetVirtualMemoryRegionTree().FindByType(KMemoryRegionType_VirtualDramKernelSecureAppletMemory)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetVirtualLinearRegion(KVirtualAddress address) { return Dereference(FindLinear(address)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion &GetPhysicalLinearRegion(KPhysicalAddress address) { return Dereference(FindLinear(address)); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion *GetPhysicalKernelTraceBufferRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_KernelTraceBuffer); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion *GetPhysicalOnMemoryBootImageRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_OnMemoryBootImage); }
static MESOSPHERE_NOINLINE_IF_DEBUG const KMemoryRegion *GetPhysicalDTBRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DTB); }
static MESOSPHERE_NOINLINE_IF_DEBUG bool IsHeapPhysicalAddress(const KMemoryRegion *®ion, KPhysicalAddress address) { return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), KMemoryRegionType_DramUserPool); }
static MESOSPHERE_NOINLINE_IF_DEBUG bool IsHeapVirtualAddress(const KMemoryRegion *®ion, KVirtualAddress address) { return IsTypedAddress(region, address, GetVirtualLinearMemoryRegionTree(), KMemoryRegionType_VirtualDramUserPool); }
static MESOSPHERE_NOINLINE_IF_DEBUG bool IsHeapPhysicalAddress(const KMemoryRegion *®ion, KPhysicalAddress address, size_t size) { return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), KMemoryRegionType_DramUserPool); }
static MESOSPHERE_NOINLINE_IF_DEBUG bool IsHeapVirtualAddress(const KMemoryRegion *®ion, KVirtualAddress address, size_t size) { return IsTypedAddress(region, address, size, GetVirtualLinearMemoryRegionTree(), KMemoryRegionType_VirtualDramUserPool); }
static MESOSPHERE_NOINLINE_IF_DEBUG bool IsLinearMappedPhysicalAddress(const KMemoryRegion *®ion, KPhysicalAddress address) { return IsTypedAddress(region, address, GetPhysicalLinearMemoryRegionTree(), static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); }
static MESOSPHERE_NOINLINE_IF_DEBUG bool IsLinearMappedPhysicalAddress(const KMemoryRegion *®ion, KPhysicalAddress address, size_t size) { return IsTypedAddress(region, address, size, GetPhysicalLinearMemoryRegionTree(), static_cast<KMemoryRegionType>(KMemoryRegionAttr_LinearMapped)); }
static std::tuple<size_t, size_t> GetTotalAndKernelMemorySizes() {
size_t total_size = 0, kernel_size = 0;
for (const auto ®ion : GetPhysicalMemoryRegionTree()) {
if (region.IsDerivedFrom(KMemoryRegionType_Dram)) {
total_size += region.GetSize();
if (!region.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
kernel_size += region.GetSize();
}
}
}
return std::make_tuple(total_size, kernel_size);
}
static void InitializeLinearMemoryAddresses(u64 phys_to_virt_diff) {
/* Set static differences. */
s_linear_phys_to_virt_diff = phys_to_virt_diff;
s_linear_virt_to_phys_diff = -phys_to_virt_diff;
}
static void InitializeLinearMemoryRegionTrees();
static size_t GetResourceRegionSizeForInit(bool use_extra_resource);
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Kernel); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelCodeRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelCode); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelStackRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelStack); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelMiscRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelMisc); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSlabRegionExtents() { return GetVirtualMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelSlab); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetLinearRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_LinearMapped); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetLinearRegionVirtualExtents() {
const auto physical = GetLinearRegionPhysicalExtents();
return KMemoryRegion(GetInteger(GetLinearVirtualAddress(physical.GetAddress())), GetInteger(GetLinearVirtualAddress(physical.GetLastAddress())), 0, KMemoryRegionType_None);
}
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetMainMemoryPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_Dram); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetCarveoutRegionExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionAttr_CarveoutProtected); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelBase); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelCodeRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelCode); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSlabRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSlab); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSecureAppletMemoryRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelSecureAppletMemory); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelPageTableHeapRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelPtHeap); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelInitPageTableRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramKernelInitPt); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelPoolPartitionRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolPartition); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelPoolManagementRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramPoolManagement); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSystemPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemPool); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelSystemNonSecurePoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramSystemNonSecurePool); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelAppletPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramAppletPool); }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelApplicationPoolRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_DramApplicationPool); }
static MESOSPHERE_NOINLINE_IF_DEBUG bool HasKernelSystemNonSecurePoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramSystemNonSecurePool) != nullptr; }
static MESOSPHERE_NOINLINE_IF_DEBUG bool HasKernelAppletPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramAppletPool) != nullptr; }
static MESOSPHERE_NOINLINE_IF_DEBUG bool HasKernelApplicationPoolRegion() { return GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramApplicationPool) != nullptr; }
static MESOSPHERE_NOINLINE_IF_DEBUG auto GetKernelTraceBufferRegionPhysicalExtents() { return GetPhysicalMemoryRegionTree().GetDerivedRegionExtents(KMemoryRegionType_KernelTraceBuffer); }
};
namespace init {
/* These should be generic, regardless of board. */
void SetupPoolPartitionMemoryRegions();
/* These may be implemented in a board-specific manner. */
void SetupDevicePhysicalMemoryRegions();
void SetupDramPhysicalMemoryRegions();
}
}
| 19,292
|
C++
|
.h
| 195
| 87.061538
| 302
| 0.748097
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,806
|
kern_select_debug.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_select_debug.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_k_debug.hpp>
namespace ams::kern {
using ams::kern::arch::arm64::KDebug;
}
#else
#error "Unknown architecture for KDebug"
#endif
| 916
|
C++
|
.h
| 25
| 33.92
| 76
| 0.74605
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,807
|
kern_k_memory_block.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_memory_block.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
enum KMemoryState : u32 {
KMemoryState_None = 0,
KMemoryState_Mask = 0xFF,
KMemoryState_All = ~KMemoryState_None,
KMemoryState_FlagCanReprotect = (1 << 8),
KMemoryState_FlagCanDebug = (1 << 9),
KMemoryState_FlagCanUseIpc = (1 << 10),
KMemoryState_FlagCanUseNonDeviceIpc = (1 << 11),
KMemoryState_FlagCanUseNonSecureIpc = (1 << 12),
KMemoryState_FlagMapped = (1 << 13),
KMemoryState_FlagCode = (1 << 14),
KMemoryState_FlagCanAlias = (1 << 15),
KMemoryState_FlagCanCodeAlias = (1 << 16),
KMemoryState_FlagCanTransfer = (1 << 17),
KMemoryState_FlagCanQueryPhysical = (1 << 18),
KMemoryState_FlagCanDeviceMap = (1 << 19),
KMemoryState_FlagCanAlignedDeviceMap = (1 << 20),
KMemoryState_FlagCanIpcUserBuffer = (1 << 21),
KMemoryState_FlagReferenceCounted = (1 << 22),
KMemoryState_FlagCanMapProcess = (1 << 23),
KMemoryState_FlagCanChangeAttribute = (1 << 24),
KMemoryState_FlagCanCodeMemory = (1 << 25),
KMemoryState_FlagLinearMapped = (1 << 26),
KMemoryState_FlagCanPermissionLock = (1 << 27),
KMemoryState_FlagsData = KMemoryState_FlagCanReprotect | KMemoryState_FlagCanUseIpc |
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
KMemoryState_FlagMapped | KMemoryState_FlagCanAlias |
KMemoryState_FlagCanTransfer | KMemoryState_FlagCanQueryPhysical |
KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap |
KMemoryState_FlagCanIpcUserBuffer | KMemoryState_FlagReferenceCounted |
KMemoryState_FlagCanChangeAttribute | KMemoryState_FlagLinearMapped,
KMemoryState_FlagsCode = KMemoryState_FlagCanDebug | KMemoryState_FlagCanUseIpc |
KMemoryState_FlagCanUseNonDeviceIpc | KMemoryState_FlagCanUseNonSecureIpc |
KMemoryState_FlagMapped | KMemoryState_FlagCode |
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagReferenceCounted |
KMemoryState_FlagLinearMapped,
KMemoryState_FlagsMisc = KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted |
KMemoryState_FlagCanQueryPhysical | KMemoryState_FlagCanDeviceMap |
KMemoryState_FlagLinearMapped,
KMemoryState_Free = ams::svc::MemoryState_Free,
KMemoryState_IoMemory = ams::svc::MemoryState_Io | KMemoryState_FlagMapped | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_IoRegister = ams::svc::MemoryState_Io | KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap,
KMemoryState_Static = ams::svc::MemoryState_Static | KMemoryState_FlagCanQueryPhysical,
KMemoryState_Code = ams::svc::MemoryState_Code | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess,
KMemoryState_CodeData = ams::svc::MemoryState_CodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeMemory | KMemoryState_FlagCanPermissionLock,
KMemoryState_Normal = ams::svc::MemoryState_Normal | KMemoryState_FlagsData | KMemoryState_FlagCanCodeMemory,
KMemoryState_Shared = ams::svc::MemoryState_Shared | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
/* KMemoryState_Alias was removed after 1.0.0. */
KMemoryState_AliasCode = ams::svc::MemoryState_AliasCode | KMemoryState_FlagsCode | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias,
KMemoryState_AliasCodeData = ams::svc::MemoryState_AliasCodeData | KMemoryState_FlagsData | KMemoryState_FlagCanMapProcess | KMemoryState_FlagCanCodeAlias | KMemoryState_FlagCanCodeMemory
| KMemoryState_FlagCanPermissionLock,
KMemoryState_Ipc = ams::svc::MemoryState_Ipc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_Stack = ams::svc::MemoryState_Stack | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_ThreadLocal = ams::svc::MemoryState_ThreadLocal | KMemoryState_FlagLinearMapped,
KMemoryState_Transfered = ams::svc::MemoryState_Transfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanUseIpc | KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_SharedTransfered = ams::svc::MemoryState_SharedTransfered | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_SharedCode = ams::svc::MemoryState_SharedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_Inaccessible = ams::svc::MemoryState_Inaccessible,
KMemoryState_NonSecureIpc = ams::svc::MemoryState_NonSecureIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanAlignedDeviceMap
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_NonDeviceIpc = ams::svc::MemoryState_NonDeviceIpc | KMemoryState_FlagsMisc | KMemoryState_FlagCanUseNonDeviceIpc,
KMemoryState_Kernel = ams::svc::MemoryState_Kernel,
KMemoryState_GeneratedCode = ams::svc::MemoryState_GeneratedCode | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagCanDebug | KMemoryState_FlagLinearMapped,
KMemoryState_CodeOut = ams::svc::MemoryState_CodeOut | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped,
KMemoryState_Coverage = ams::svc::MemoryState_Coverage | KMemoryState_FlagMapped,
KMemoryState_Insecure = ams::svc::MemoryState_Insecure | KMemoryState_FlagMapped | KMemoryState_FlagReferenceCounted | KMemoryState_FlagLinearMapped | KMemoryState_FlagCanChangeAttribute
| KMemoryState_FlagCanDeviceMap | KMemoryState_FlagCanAlignedDeviceMap | KMemoryState_FlagCanQueryPhysical
| KMemoryState_FlagCanUseNonSecureIpc | KMemoryState_FlagCanUseNonDeviceIpc,
};
#if 1
static_assert(KMemoryState_Free == 0x00000000);
static_assert(KMemoryState_IoMemory == 0x00182001);
static_assert(KMemoryState_IoRegister == 0x00180001);
static_assert(KMemoryState_Static == 0x00040002);
static_assert(KMemoryState_Code == 0x04DC7E03);
static_assert(KMemoryState_CodeData == 0x0FFEBD04);
static_assert(KMemoryState_Normal == 0x077EBD05);
static_assert(KMemoryState_Shared == 0x04402006);
static_assert(KMemoryState_AliasCode == 0x04DD7E08);
static_assert(KMemoryState_AliasCodeData == 0x0FFFBD09);
static_assert(KMemoryState_Ipc == 0x045C3C0A);
static_assert(KMemoryState_Stack == 0x045C3C0B);
static_assert(KMemoryState_ThreadLocal == 0x0400000C);
static_assert(KMemoryState_Transfered == 0x055C3C0D);
static_assert(KMemoryState_SharedTransfered == 0x045C380E);
static_assert(KMemoryState_SharedCode == 0x0440380F);
static_assert(KMemoryState_Inaccessible == 0x00000010);
static_assert(KMemoryState_NonSecureIpc == 0x045C3811);
static_assert(KMemoryState_NonDeviceIpc == 0x044C2812);
static_assert(KMemoryState_Kernel == 0x00000013);
static_assert(KMemoryState_GeneratedCode == 0x04402214);
static_assert(KMemoryState_CodeOut == 0x04402015);
static_assert(KMemoryState_Coverage == 0x00002016); /* TODO: Is this correct? */
static_assert(KMemoryState_Insecure == 0x055C3817);
#endif
enum KMemoryPermission : u8 {
KMemoryPermission_None = 0,
KMemoryPermission_All = static_cast<u8>(~KMemoryPermission_None),
KMemoryPermission_KernelShift = 3,
KMemoryPermission_KernelRead = ams::svc::MemoryPermission_Read << KMemoryPermission_KernelShift,
KMemoryPermission_KernelWrite = ams::svc::MemoryPermission_Write << KMemoryPermission_KernelShift,
KMemoryPermission_KernelExecute = ams::svc::MemoryPermission_Execute << KMemoryPermission_KernelShift,
KMemoryPermission_NotMapped = (1 << (2 * KMemoryPermission_KernelShift)),
KMemoryPermission_KernelReadWrite = KMemoryPermission_KernelRead | KMemoryPermission_KernelWrite,
KMemoryPermission_KernelReadExecute = KMemoryPermission_KernelRead | KMemoryPermission_KernelExecute,
KMemoryPermission_UserRead = ams::svc::MemoryPermission_Read | KMemoryPermission_KernelRead,
KMemoryPermission_UserWrite = ams::svc::MemoryPermission_Write | KMemoryPermission_KernelWrite,
KMemoryPermission_UserExecute = ams::svc::MemoryPermission_Execute,
KMemoryPermission_UserReadWrite = KMemoryPermission_UserRead | KMemoryPermission_UserWrite,
KMemoryPermission_UserReadExecute = KMemoryPermission_UserRead | KMemoryPermission_UserExecute,
KMemoryPermission_UserMask = ams::svc::MemoryPermission_Read | ams::svc::MemoryPermission_Write | ams::svc::MemoryPermission_Execute,
KMemoryPermission_IpcLockChangeMask = KMemoryPermission_NotMapped | KMemoryPermission_UserReadWrite,
};
constexpr KMemoryPermission ConvertToKMemoryPermission(ams::svc::MemoryPermission perm) {
return static_cast<KMemoryPermission>((util::ToUnderlying(perm) & KMemoryPermission_UserMask) | KMemoryPermission_KernelRead | ((util::ToUnderlying(perm) & ams::svc::MemoryPermission_Write) ? KMemoryPermission_KernelWrite : KMemoryPermission_None) | (perm == ams::svc::MemoryPermission_None ? KMemoryPermission_NotMapped : KMemoryPermission_None));
}
enum KMemoryAttribute : u8 {
KMemoryAttribute_None = 0x00,
KMemoryAttribute_All = 0xFF,
KMemoryAttribute_UserMask = KMemoryAttribute_All,
KMemoryAttribute_Locked = ams::svc::MemoryAttribute_Locked,
KMemoryAttribute_IpcLocked = ams::svc::MemoryAttribute_IpcLocked,
KMemoryAttribute_DeviceShared = ams::svc::MemoryAttribute_DeviceShared,
KMemoryAttribute_Uncached = ams::svc::MemoryAttribute_Uncached,
KMemoryAttribute_PermissionLocked = ams::svc::MemoryAttribute_PermissionLocked,
KMemoryAttribute_SetMask = KMemoryAttribute_Uncached | KMemoryAttribute_PermissionLocked,
};
enum KMemoryBlockDisableMergeAttribute : u8 {
KMemoryBlockDisableMergeAttribute_None = 0,
KMemoryBlockDisableMergeAttribute_Normal = (1u << 0),
KMemoryBlockDisableMergeAttribute_DeviceLeft = (1u << 1),
KMemoryBlockDisableMergeAttribute_IpcLeft = (1u << 2),
KMemoryBlockDisableMergeAttribute_Locked = (1u << 3),
/* ... */
KMemoryBlockDisableMergeAttribute_DeviceRight = (1u << 5),
KMemoryBlockDisableMergeAttribute_AllLeft = KMemoryBlockDisableMergeAttribute_Normal | KMemoryBlockDisableMergeAttribute_DeviceLeft | KMemoryBlockDisableMergeAttribute_IpcLeft | KMemoryBlockDisableMergeAttribute_Locked,
KMemoryBlockDisableMergeAttribute_AllRight = KMemoryBlockDisableMergeAttribute_DeviceRight,
};
struct KMemoryInfo {
uintptr_t m_address;
size_t m_size;
KMemoryState m_state;
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
u16 m_ipc_lock_count;
u16 m_device_use_count;
u16 m_ipc_disable_merge_count;
KMemoryPermission m_permission;
KMemoryAttribute m_attribute;
KMemoryPermission m_original_permission;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
constexpr ams::svc::MemoryInfo GetSvcMemoryInfo() const {
return {
.base_address = m_address,
.size = m_size,
.state = static_cast<ams::svc::MemoryState>(m_state & KMemoryState_Mask),
.attribute = static_cast<ams::svc::MemoryAttribute>(m_attribute & KMemoryAttribute_UserMask),
.permission = static_cast<ams::svc::MemoryPermission>(m_permission & KMemoryPermission_UserMask),
.ipc_count = m_ipc_lock_count,
.device_count = m_device_use_count,
.padding = {},
};
}
constexpr uintptr_t GetAddress() const {
return m_address;
}
constexpr size_t GetSize() const {
return m_size;
}
constexpr size_t GetNumPages() const {
return this->GetSize() / PageSize;
}
constexpr uintptr_t GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
constexpr uintptr_t GetLastAddress() const {
return this->GetEndAddress() - 1;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr KMemoryState GetState() const {
return m_state;
}
constexpr ams::svc::MemoryState GetSvcState() const {
return static_cast<ams::svc::MemoryState>(m_state & KMemoryState_Mask);
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
}
};
class KMemoryBlock : public util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock> {
private:
KMemoryPermission m_permission;
KMemoryPermission m_original_permission;
KMemoryAttribute m_attribute;
KMemoryBlockDisableMergeAttribute m_disable_merge_attribute;
KProcessAddress m_address;
u32 m_num_pages;
KMemoryState m_memory_state;
u16 m_ipc_lock_count;
u16 m_ipc_disable_merge_count;
u16 m_device_use_count;
u16 m_device_disable_merge_left_count;
u16 m_device_disable_merge_right_count;
public:
static constexpr ALWAYS_INLINE int Compare(const KMemoryBlock &lhs, const KMemoryBlock &rhs) {
if (lhs.GetAddress() < rhs.GetAddress()) {
return -1;
} else if (lhs.GetAddress() <= rhs.GetLastAddress()) {
return 0;
} else {
return 1;
}
}
public:
constexpr KProcessAddress GetAddress() const {
return m_address;
}
constexpr size_t GetNumPages() const {
return m_num_pages;
}
constexpr size_t GetSize() const {
return this->GetNumPages() * PageSize;
}
constexpr KProcessAddress GetEndAddress() const {
return this->GetAddress() + this->GetSize();
}
constexpr KProcessAddress GetLastAddress() const {
return this->GetEndAddress() - 1;
}
constexpr KMemoryState GetState() const {
return m_memory_state;
}
constexpr u16 GetIpcLockCount() const {
return m_ipc_lock_count;
}
constexpr u16 GetIpcDisableMergeCount() const {
return m_ipc_disable_merge_count;
}
constexpr u16 GetDeviceUseCount() const {
return m_device_use_count;
}
constexpr KMemoryPermission GetPermission() const {
return m_permission;
}
constexpr KMemoryPermission GetOriginalPermission() const {
return m_original_permission;
}
constexpr KMemoryAttribute GetAttribute() const {
return m_attribute;
}
constexpr KMemoryInfo GetMemoryInfo() const {
return {
.m_address = GetInteger(this->GetAddress()),
.m_size = this->GetSize(),
.m_state = m_memory_state,
.m_device_disable_merge_left_count = m_device_disable_merge_left_count,
.m_device_disable_merge_right_count = m_device_disable_merge_right_count,
.m_ipc_lock_count = m_ipc_lock_count,
.m_device_use_count = m_device_use_count,
.m_ipc_disable_merge_count = m_ipc_disable_merge_count,
.m_permission = m_permission,
.m_attribute = m_attribute,
.m_original_permission = m_original_permission,
.m_disable_merge_attribute = m_disable_merge_attribute,
};
}
public:
explicit KMemoryBlock() { /* ... */ }
constexpr KMemoryBlock(util::ConstantInitializeTag, KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr)
: util::IntrusiveRedBlackTreeBaseNode<KMemoryBlock>(util::ConstantInitialize), m_permission(p), m_original_permission(KMemoryPermission_None),
m_attribute(attr), m_disable_merge_attribute(), m_address(addr), m_num_pages(np), m_memory_state(ms), m_ipc_lock_count(0),
m_ipc_disable_merge_count(), m_device_use_count(0), m_device_disable_merge_left_count(), m_device_disable_merge_right_count()
{
/* ... */
}
constexpr void Initialize(KProcessAddress addr, u32 np, KMemoryState ms, KMemoryPermission p, KMemoryAttribute attr) {
MESOSPHERE_ASSERT_THIS();
m_device_disable_merge_left_count = 0;
m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages = np;
m_memory_state = ms;
m_ipc_lock_count = 0;
m_device_use_count = 0;
m_permission = p;
m_original_permission = KMemoryPermission_None;
m_attribute = attr;
m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute_None;
}
constexpr bool HasProperties(KMemoryState s, KMemoryPermission p, KMemoryAttribute a) const {
MESOSPHERE_ASSERT_THIS();
constexpr auto AttributeIgnoreMask = KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared;
return m_memory_state == s && m_permission == p && (m_attribute | AttributeIgnoreMask) == (a | AttributeIgnoreMask);
}
constexpr bool HasSameProperties(const KMemoryBlock &rhs) const {
MESOSPHERE_ASSERT_THIS();
return m_memory_state == rhs.m_memory_state &&
m_permission == rhs.m_permission &&
m_original_permission == rhs.m_original_permission &&
m_attribute == rhs.m_attribute &&
m_ipc_lock_count == rhs.m_ipc_lock_count &&
m_device_use_count == rhs.m_device_use_count;
}
constexpr bool CanMergeWith(const KMemoryBlock &rhs) const {
return this->HasSameProperties(rhs) &&
(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight) == 0 &&
(rhs.m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft) == 0;
}
constexpr bool Contains(KProcessAddress addr) const {
MESOSPHERE_ASSERT_THIS();
return this->GetAddress() <= addr && addr <= this->GetEndAddress();
}
constexpr void Add(const KMemoryBlock &added_block) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(added_block.GetNumPages() > 0);
MESOSPHERE_ASSERT(this->GetAddress() + added_block.GetSize() - 1 < this->GetEndAddress() + added_block.GetSize() - 1);
m_num_pages += added_block.GetNumPages();
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | added_block.m_disable_merge_attribute);
m_device_disable_merge_right_count = added_block.m_device_disable_merge_right_count;
}
constexpr void Update(KMemoryState s, KMemoryPermission p, KMemoryAttribute a, bool set_disable_merge_attr, u8 set_mask, u8 clear_mask) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(m_original_permission == KMemoryPermission_None);
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == 0);
m_memory_state = s;
m_permission = p;
m_attribute = static_cast<KMemoryAttribute>(a | (m_attribute & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)));
if (set_disable_merge_attr && set_mask != 0) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | set_mask);
}
if (clear_mask != 0) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~clear_mask);
}
}
constexpr void UpdateAttribute(u32 mask, u32 attr) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT((mask & KMemoryAttribute_IpcLocked) == 0);
MESOSPHERE_ASSERT((mask & KMemoryAttribute_DeviceShared) == 0);
m_attribute = static_cast<KMemoryAttribute>((m_attribute & ~mask) | attr);
}
constexpr void Split(KMemoryBlock *block, KProcessAddress addr) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->GetAddress() < addr);
MESOSPHERE_ASSERT(this->Contains(addr));
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize));
block->m_address = m_address;
block->m_num_pages = (addr - this->GetAddress()) / PageSize;
block->m_memory_state = m_memory_state;
block->m_ipc_lock_count = m_ipc_lock_count;
block->m_device_use_count = m_device_use_count;
block->m_permission = m_permission;
block->m_original_permission = m_original_permission;
block->m_attribute = m_attribute;
block->m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllLeft);
block->m_ipc_disable_merge_count = m_ipc_disable_merge_count;
block->m_device_disable_merge_left_count = m_device_disable_merge_left_count;
block->m_device_disable_merge_right_count = 0;
m_address = addr;
m_num_pages -= block->m_num_pages;
m_ipc_disable_merge_count = 0;
m_device_disable_merge_left_count = 0;
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & KMemoryBlockDisableMergeAttribute_AllRight);
}
constexpr void UpdateDeviceDisableMergeStateForShareLeft(KMemoryPermission new_perm, bool left, bool right) {
/* New permission/right aren't used. */
MESOSPHERE_UNUSED(new_perm, right);
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceLeft);
const u16 new_device_disable_merge_left_count = ++m_device_disable_merge_left_count;
MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_left_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShareRight(KMemoryPermission new_perm, bool left, bool right) {
/* New permission/left aren't used. */
MESOSPHERE_UNUSED(new_perm, left);
if (right) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_DeviceRight);
const u16 new_device_disable_merge_right_count = ++m_device_disable_merge_right_count;
MESOSPHERE_ABORT_UNLESS(new_device_disable_merge_right_count > 0);
}
}
constexpr void UpdateDeviceDisableMergeStateForShare(KMemoryPermission new_perm, bool left, bool right) {
this->UpdateDeviceDisableMergeStateForShareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForShareRight(new_perm, left, right);
}
constexpr void ShareToDevice(KMemoryPermission new_perm, bool left, bool right) {
/* New permission isn't used. */
MESOSPHERE_UNUSED(new_perm);
/* We must either be shared or have a zero lock count. */
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared || m_device_use_count == 0);
/* Share. */
const u16 new_count = ++m_device_use_count;
MESOSPHERE_ABORT_UNLESS(new_count > 0);
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute_DeviceShared);
this->UpdateDeviceDisableMergeStateForShare(new_perm, left, right);
}
constexpr void UpdateDeviceDisableMergeStateForUnshareLeft(KMemoryPermission new_perm, bool left, bool right) {
/* New permission/right aren't used. */
MESOSPHERE_UNUSED(new_perm, right);
if (left) {
if (!m_device_disable_merge_left_count) {
return;
}
--m_device_disable_merge_left_count;
}
m_device_disable_merge_left_count = std::min(m_device_disable_merge_left_count, m_device_use_count);
if (m_device_disable_merge_left_count == 0) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceLeft);
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshareRight(KMemoryPermission new_perm, bool left, bool right) {
/* New permission/left aren't used. */
MESOSPHERE_UNUSED(new_perm, left);
if (right) {
const u16 old_device_disable_merge_right_count = m_device_disable_merge_right_count--;
MESOSPHERE_ASSERT(old_device_disable_merge_right_count > 0);
if (old_device_disable_merge_right_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_DeviceRight);
}
}
}
constexpr void UpdateDeviceDisableMergeStateForUnshare(KMemoryPermission new_perm, bool left, bool right) {
this->UpdateDeviceDisableMergeStateForUnshareLeft(new_perm, left, right);
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void UnshareToDevice(KMemoryPermission new_perm, bool left, bool right) {
/* New permission isn't used. */
MESOSPHERE_UNUSED(new_perm);
/* We must be shared. */
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared);
/* Unhare. */
const u16 old_count = m_device_use_count--;
MESOSPHERE_ABORT_UNLESS(old_count > 0);
if (old_count == 1) {
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute_DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshare(new_perm, left, right);
}
constexpr void UnshareToDeviceRight(KMemoryPermission new_perm, bool left, bool right) {
/* New permission isn't used. */
MESOSPHERE_UNUSED(new_perm);
/* We must be shared. */
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_DeviceShared) == KMemoryAttribute_DeviceShared);
/* Unhare. */
const u16 old_count = m_device_use_count--;
MESOSPHERE_ABORT_UNLESS(old_count > 0);
if (old_count == 1) {
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute_DeviceShared);
}
this->UpdateDeviceDisableMergeStateForUnshareRight(new_perm, left, right);
}
constexpr void LockForIpc(KMemoryPermission new_perm, bool left, bool right) {
/* We must either be locked or have a zero lock count. */
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked || m_ipc_lock_count == 0);
/* Lock. */
const u16 new_lock_count = ++m_ipc_lock_count;
MESOSPHERE_ABORT_UNLESS(new_lock_count > 0);
/* If this is our first lock, update our permissions. */
if (new_lock_count == 1) {
MESOSPHERE_ASSERT(m_original_permission == KMemoryPermission_None);
MESOSPHERE_ASSERT((m_permission | new_perm | KMemoryPermission_NotMapped) == (m_permission | KMemoryPermission_NotMapped));
MESOSPHERE_ASSERT((m_permission & KMemoryPermission_UserExecute) != KMemoryPermission_UserExecute || (new_perm == KMemoryPermission_UserRead));
m_original_permission = m_permission;
m_permission = static_cast<KMemoryPermission>((new_perm & KMemoryPermission_IpcLockChangeMask) | (m_original_permission & ~KMemoryPermission_IpcLockChangeMask));
}
m_attribute = static_cast<KMemoryAttribute>(m_attribute | KMemoryAttribute_IpcLocked);
if (left) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute | KMemoryBlockDisableMergeAttribute_IpcLeft);
const u16 new_ipc_disable_merge_count = ++m_ipc_disable_merge_count;
MESOSPHERE_ABORT_UNLESS(new_ipc_disable_merge_count > 0);
}
MESOSPHERE_UNUSED(right);
}
constexpr void UnlockForIpc(KMemoryPermission new_perm, bool left, bool right) {
/* New permission isn't used. */
MESOSPHERE_UNUSED(new_perm);
/* We must be locked. */
MESOSPHERE_ASSERT((m_attribute & KMemoryAttribute_IpcLocked) == KMemoryAttribute_IpcLocked);
/* Unlock. */
const u16 old_lock_count = m_ipc_lock_count--;
MESOSPHERE_ABORT_UNLESS(old_lock_count > 0);
/* If this is our last unlock, update our permissions. */
if (old_lock_count == 1) {
MESOSPHERE_ASSERT(m_original_permission != KMemoryPermission_None);
m_permission = m_original_permission;
m_original_permission = KMemoryPermission_None;
m_attribute = static_cast<KMemoryAttribute>(m_attribute & ~KMemoryAttribute_IpcLocked);
}
if (left) {
const u16 old_ipc_disable_merge_count = m_ipc_disable_merge_count--;
MESOSPHERE_ASSERT(old_ipc_disable_merge_count > 0);
if (old_ipc_disable_merge_count == 1) {
m_disable_merge_attribute = static_cast<KMemoryBlockDisableMergeAttribute>(m_disable_merge_attribute & ~KMemoryBlockDisableMergeAttribute_IpcLeft);
}
}
MESOSPHERE_UNUSED(right);
}
constexpr KMemoryBlockDisableMergeAttribute GetDisableMergeAttribute() const {
return m_disable_merge_attribute;
}
};
static_assert(std::is_trivially_destructible<KMemoryBlock>::value);
}
| 37,189
|
C++
|
.h
| 543
| 52.959484
| 356
| 0.593477
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,808
|
kern_build_config.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_build_config.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#if defined(AMS_BUILD_FOR_AUDITING)
#define MESOSPHERE_BUILD_FOR_AUDITING
#endif
#if defined(MESOSPHERE_BUILD_FOR_AUDITING) || defined(AMS_BUILD_FOR_DEBUGGING)
#define MESOSPHERE_BUILD_FOR_DEBUGGING
#endif
#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING
#define MESOSPHERE_ENABLE_ASSERTIONS
#define MESOSPHERE_ENABLE_DEBUG_PRINT
#define MESOSPHERE_ENABLE_KERNEL_STACK_USAGE
#endif
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
#define MESOSPHERE_NOINLINE_IF_DEBUG NOINLINE
#define MESOSPHERE_ALWAYS_INLINE_IF_RELEASE NOINLINE
#else
#define MESOSPHERE_NOINLINE_IF_DEBUG
#define MESOSPHERE_ALWAYS_INLINE_IF_RELEASE ALWAYS_INLINE
#endif
//#define MESOSPHERE_BUILD_FOR_TRACING
//#define MESOSPHERE_ENABLE_PERFORMANCE_COUNTER
#define MESOSPHERE_ENABLE_PANIC_REGISTER_DUMP
#define MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP
/* NOTE: In 16.0.0, Nintendo deleted the creation time field for KProcess, */
/* but this may be useful for some debugging applications, and so can be. */
/* re-enabled by toggling this define. */
//#define MESOSPHERE_ENABLE_PROCESS_CREATION_TIME
/* NOTE: This enables fast class token storage using a class member. */
/* This saves a virtual call when doing KAutoObject->DynCast<>(), */
/* at the cost of storing class tokens inside the class object. */
/* However, as of (10/16/2021) KAutoObject has an unused class member */
/* of the right side, and so this does not actually cost any space. */
#define MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST
/* NOTE: This enables usage of KDebug handles as parameter for svc::GetInfo */
/* calls which require a process parameter. This enables a debugger to obtain */
/* address space/layout information, for example. However, it changes abi, and so */
/* this define allows toggling the extension. */
#define MESOSPHERE_ENABLE_GET_INFO_OF_DEBUG_PROCESS
/* NOTE: This uses currently-reserved bits inside the MapRange capability */
/* in order to support large physical addresses (40-bit instead of 36). */
/* This is toggleable in order to disable it if N ever uses those bits. */
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
//#define MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
#else
#define MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES
#endif
| 2,865
|
C++
|
.h
| 61
| 45.606557
| 84
| 0.788269
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,809
|
kern_k_page_group.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_page_group.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KBlockInfoManager;
class KPageGroup;
class KBlockInfo {
private:
friend class KPageGroup;
private:
KBlockInfo *m_next;
u32 m_page_index;
u32 m_num_pages;
public:
KBlockInfo() : m_next(nullptr) { /* ... */ }
constexpr ALWAYS_INLINE void Initialize(KPhysicalAddress addr, size_t np) {
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), PageSize));
MESOSPHERE_ASSERT(static_cast<u32>(np) == np);
m_page_index = GetInteger(addr) / PageSize;
m_num_pages = np;
}
constexpr ALWAYS_INLINE KPhysicalAddress GetAddress() const { return m_page_index * PageSize; }
constexpr ALWAYS_INLINE size_t GetNumPages() const { return m_num_pages; }
constexpr ALWAYS_INLINE size_t GetSize() const { return this->GetNumPages() * PageSize; }
constexpr ALWAYS_INLINE KPhysicalAddress GetEndAddress() const { return (m_page_index + m_num_pages) * PageSize; }
constexpr ALWAYS_INLINE KPhysicalAddress GetLastAddress() const { return this->GetEndAddress() - 1; }
constexpr ALWAYS_INLINE KBlockInfo *GetNext() const { return m_next; }
constexpr ALWAYS_INLINE bool IsEquivalentTo(const KBlockInfo &rhs) const {
return m_page_index == rhs.m_page_index && m_num_pages == rhs.m_num_pages;
}
constexpr ALWAYS_INLINE bool operator==(const KBlockInfo &rhs) const {
return this->IsEquivalentTo(rhs);
}
constexpr ALWAYS_INLINE bool operator!=(const KBlockInfo &rhs) const {
return !(*this == rhs);
}
constexpr ALWAYS_INLINE bool IsStrictlyBefore(KPhysicalAddress addr) const {
const KPhysicalAddress end = this->GetEndAddress();
if (m_page_index != 0 && end == Null<KPhysicalAddress>) {
return false;
}
return end < addr;
}
constexpr ALWAYS_INLINE bool operator<(KPhysicalAddress addr) const {
return this->IsStrictlyBefore(addr);
}
constexpr ALWAYS_INLINE bool TryConcatenate(KPhysicalAddress addr, size_t np) {
if (addr != Null<KPhysicalAddress> && addr == this->GetEndAddress()) {
m_num_pages += np;
return true;
}
return false;
}
private:
constexpr ALWAYS_INLINE void SetNext(KBlockInfo *next) {
m_next = next;
}
};
static_assert(sizeof(KBlockInfo) <= 0x10);
class KPageGroup {
public:
class Iterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = const KBlockInfo;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
private:
pointer m_node;
public:
constexpr explicit ALWAYS_INLINE Iterator(pointer n) : m_node(n) { /* ... */ }
constexpr ALWAYS_INLINE bool operator==(const Iterator &rhs) const { return m_node == rhs.m_node; }
constexpr ALWAYS_INLINE bool operator!=(const Iterator &rhs) const { return !(*this == rhs); }
constexpr ALWAYS_INLINE pointer operator->() const { return m_node; }
constexpr ALWAYS_INLINE reference operator*() const { return *m_node; }
constexpr ALWAYS_INLINE Iterator &operator++() {
m_node = m_node->GetNext();
return *this;
}
constexpr ALWAYS_INLINE Iterator operator++(int) {
const Iterator it{*this};
++(*this);
return it;
}
};
private:
KBlockInfo *m_first_block;
KBlockInfo *m_last_block;
KBlockInfoManager *m_manager;
public:
explicit KPageGroup(KBlockInfoManager *m) : m_first_block(), m_last_block(), m_manager(m) { /* ... */ }
~KPageGroup() { this->Finalize(); }
void CloseAndReset();
void Finalize();
ALWAYS_INLINE Iterator begin() const { return Iterator{m_first_block}; }
ALWAYS_INLINE Iterator end() const { return Iterator{nullptr}; }
ALWAYS_INLINE bool empty() const { return m_first_block == nullptr; }
Result AddBlock(KPhysicalAddress addr, size_t num_pages);
void Open() const;
void OpenFirst() const;
void Close() const;
size_t GetNumPages() const;
bool IsEquivalentTo(const KPageGroup &rhs) const;
Result CopyRangeTo(KPageGroup &out, size_t offset, size_t size) const;
ALWAYS_INLINE bool operator==(const KPageGroup &rhs) const {
return this->IsEquivalentTo(rhs);
}
ALWAYS_INLINE bool operator!=(const KPageGroup &rhs) const {
return !(*this == rhs);
}
};
class KScopedPageGroup {
private:
const KPageGroup *m_pg;
public:
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup *gp, bool not_first = true) : m_pg(gp) {
if (m_pg) {
if (not_first) {
m_pg->Open();
} else {
m_pg->OpenFirst();
}
}
}
explicit ALWAYS_INLINE KScopedPageGroup(const KPageGroup &gp, bool not_first = true) : KScopedPageGroup(std::addressof(gp), not_first) { /* ... */ }
ALWAYS_INLINE ~KScopedPageGroup() { if (m_pg) { m_pg->Close(); } }
ALWAYS_INLINE void CancelClose() {
m_pg = nullptr;
}
};
}
| 7,003
|
C++
|
.h
| 148
| 34.094595
| 160
| 0.557591
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,810
|
kern_k_exception_context.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_exception_context.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#ifdef ATMOSPHERE_ARCH_ARM64
#include <mesosphere/arch/arm64/kern_k_exception_context.hpp>
namespace ams::kern {
using ams::kern::arch::arm64::KExceptionContext;
}
#else
#error "Unknown architecture for KExceptionContext"
#endif
| 903
|
C++
|
.h
| 24
| 34.958333
| 76
| 0.752566
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,811
|
kern_k_handle_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_handle_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_auto_object.hpp>
#include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_thread.hpp>
#include <mesosphere/kern_k_interrupt_event.hpp>
namespace ams::kern {
constexpr ALWAYS_INLINE util::BitPack32 GetHandleBitPack(ams::svc::Handle handle) {
return util::BitPack32{handle};
}
class KProcess;
class KThread;
class KHandleTable {
NON_COPYABLE(KHandleTable);
NON_MOVEABLE(KHandleTable);
public:
static constexpr size_t MaxTableSize = 1024;
private:
using HandleRawValue = util::BitPack32::Field<0, BITSIZEOF(u32), u32>;
using HandleEncoded = util::BitPack32::Field<0, BITSIZEOF(ams::svc::Handle), ams::svc::Handle>;
using HandleIndex = util::BitPack32::Field<0, 15, u16>;
using HandleLinearId = util::BitPack32::Field<HandleIndex::Next, 15, u16>;
using HandleReserved = util::BitPack32::Field<HandleLinearId::Next, 2, u32>;
static constexpr u16 MinLinearId = 1;
static constexpr u16 MaxLinearId = util::BitPack32{std::numeric_limits<u32>::max()}.Get<HandleLinearId>();
static constexpr ALWAYS_INLINE ams::svc::Handle EncodeHandle(u16 index, u16 linear_id) {
util::BitPack32 pack = {0};
pack.Set<HandleIndex>(index);
pack.Set<HandleLinearId>(linear_id);
pack.Set<HandleReserved>(0);
return pack.Get<HandleEncoded>();
}
union EntryInfo {
u16 linear_id;
s16 next_free_index;
constexpr ALWAYS_INLINE u16 GetLinearId() const { return linear_id; }
constexpr ALWAYS_INLINE s32 GetNextFreeIndex() const { return next_free_index; }
};
private:
EntryInfo m_entry_infos[MaxTableSize];
KAutoObject *m_objects[MaxTableSize];
mutable KSpinLock m_lock;
s32 m_free_head_index;
u16 m_table_size;
u16 m_max_count;
u16 m_next_linear_id;
u16 m_count;
public:
constexpr explicit KHandleTable(util::ConstantInitializeTag) : m_entry_infos(), m_objects(), m_lock(), m_free_head_index(-1), m_table_size(), m_max_count(), m_next_linear_id(), m_count() { /* ... */ }
explicit KHandleTable() : m_lock(), m_free_head_index(-1), m_table_size(), m_max_count(), m_next_linear_id(), m_count() { MESOSPHERE_ASSERT_THIS(); }
MESOSPHERE_NOINLINE_IF_DEBUG Result Initialize(s32 size) {
MESOSPHERE_ASSERT_THIS();
/* Check that the table size is valid. */
R_UNLESS(size <= static_cast<s32>(MaxTableSize), svc::ResultOutOfMemory());
/* Lock. */
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
/* Initialize all fields. */
m_max_count = 0;
m_table_size = (size <= 0) ? MaxTableSize : size;
m_next_linear_id = MinLinearId;
m_count = 0;
m_free_head_index = -1;
/* Free all entries. */
for (s32 i = 0; i < static_cast<s32>(m_table_size); ++i) {
m_objects[i] = nullptr;
m_entry_infos[i].next_free_index = i - 1;
m_free_head_index = i;
}
R_SUCCEED();
}
constexpr ALWAYS_INLINE size_t GetTableSize() const { return m_table_size; }
constexpr ALWAYS_INLINE size_t GetCount() const { return m_count; }
constexpr ALWAYS_INLINE size_t GetMaxCount() const { return m_max_count; }
MESOSPHERE_NOINLINE_IF_DEBUG Result Finalize();
MESOSPHERE_NOINLINE_IF_DEBUG bool Remove(ams::svc::Handle handle);
template<typename T = KAutoObject>
ALWAYS_INLINE KScopedAutoObject<T> GetObjectWithoutPseudoHandle(ams::svc::Handle handle) const {
/* Lock and look up in table. */
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
if constexpr (std::is_same<T, KAutoObject>::value) {
return this->GetObjectImpl(handle);
} else {
if (auto *obj = this->GetObjectImpl(handle); AMS_LIKELY(obj != nullptr)) {
return obj->DynamicCast<T*>();
} else {
return nullptr;
}
}
}
template<typename T = KAutoObject>
ALWAYS_INLINE KScopedAutoObject<T> GetObject(ams::svc::Handle handle) const {
MESOSPHERE_ASSERT_THIS();
/* Handle pseudo-handles. */
if constexpr (std::derived_from<KProcess, T>) {
if (handle == ams::svc::PseudoHandle::CurrentProcess) {
auto * const cur_process = GetCurrentProcessPointer();
AMS_ASSUME(cur_process != nullptr);
return cur_process;
}
} else if constexpr (std::derived_from<KThread, T>) {
if (handle == ams::svc::PseudoHandle::CurrentThread) {
auto * const cur_thread = GetCurrentThreadPointer();
AMS_ASSUME(cur_thread != nullptr);
return cur_thread;
}
}
return this->template GetObjectWithoutPseudoHandle<T>(handle);
}
KScopedAutoObject<KAutoObject> GetObjectForIpcWithoutPseudoHandle(ams::svc::Handle handle) const {
/* Lock and look up in table. */
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
KAutoObject *obj = this->GetObjectImpl(handle);
if (AMS_LIKELY(obj != nullptr)) {
if (AMS_UNLIKELY(obj->DynamicCast<KInterruptEvent *>() != nullptr)) {
return nullptr;
}
}
return obj;
}
ALWAYS_INLINE KScopedAutoObject<KAutoObject> GetObjectForIpc(ams::svc::Handle handle, KThread *cur_thread) const {
/* Handle pseudo-handles. */
AMS_ASSUME(cur_thread != nullptr);
if (handle == ams::svc::PseudoHandle::CurrentProcess) {
auto * const cur_process = static_cast<KAutoObject *>(static_cast<void *>(cur_thread->GetOwnerProcess()));
AMS_ASSUME(cur_process != nullptr);
return cur_process;
}
if (handle == ams::svc::PseudoHandle::CurrentThread) {
return static_cast<KAutoObject *>(cur_thread);
}
return GetObjectForIpcWithoutPseudoHandle(handle);
}
ALWAYS_INLINE KScopedAutoObject<KAutoObject> GetObjectByIndex(ams::svc::Handle *out_handle, size_t index) const {
MESOSPHERE_ASSERT_THIS();
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
return this->GetObjectByIndexImpl(out_handle, index);
}
MESOSPHERE_NOINLINE_IF_DEBUG Result Reserve(ams::svc::Handle *out_handle);
MESOSPHERE_NOINLINE_IF_DEBUG void Unreserve(ams::svc::Handle handle);
MESOSPHERE_NOINLINE_IF_DEBUG Result Add(ams::svc::Handle *out_handle, KAutoObject *obj);
MESOSPHERE_NOINLINE_IF_DEBUG void Register(ams::svc::Handle handle, KAutoObject *obj);
template<typename T>
ALWAYS_INLINE bool GetMultipleObjects(T **out, const ams::svc::Handle *handles, size_t num_handles) const {
/* Try to convert and open all the handles. */
size_t num_opened;
{
/* Lock the table. */
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
for (num_opened = 0; num_opened < num_handles; num_opened++) {
/* Get the current handle. */
const auto cur_handle = handles[num_opened];
/* Get the object for the current handle. */
KAutoObject *cur_object = this->GetObjectImpl(cur_handle);
if (AMS_UNLIKELY(cur_object == nullptr)) {
break;
}
/* Cast the current object to the desired type. */
T *cur_t = cur_object->DynamicCast<T*>();
if (AMS_UNLIKELY(cur_t == nullptr)) {
break;
}
/* Open a reference to the current object. */
cur_t->Open();
out[num_opened] = cur_t;
}
}
/* If we converted every object, succeed. */
if (AMS_LIKELY(num_opened == num_handles)) {
return true;
}
/* If we didn't convert entry object, close the ones we opened. */
for (size_t i = 0; i < num_opened; i++) {
out[i]->Close();
}
return false;
}
private:
constexpr ALWAYS_INLINE s32 AllocateEntry() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(m_count < m_table_size);
const auto index = m_free_head_index;
m_free_head_index = m_entry_infos[index].GetNextFreeIndex();
m_max_count = std::max(m_max_count, ++m_count);
return index;
}
constexpr ALWAYS_INLINE void FreeEntry(s32 index) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(m_count > 0);
m_objects[index] = nullptr;
m_entry_infos[index].next_free_index = m_free_head_index;
m_free_head_index = index;
--m_count;
}
constexpr ALWAYS_INLINE u16 AllocateLinearId() {
const u16 id = m_next_linear_id++;
if (m_next_linear_id > MaxLinearId) {
m_next_linear_id = MinLinearId;
}
return id;
}
constexpr ALWAYS_INLINE bool IsValidHandle(ams::svc::Handle handle) const {
MESOSPHERE_ASSERT_THIS();
/* Unpack the handle. */
const auto handle_pack = GetHandleBitPack(handle);
const auto raw_value = handle_pack.Get<HandleRawValue>();
const auto index = handle_pack.Get<HandleIndex>();
const auto linear_id = handle_pack.Get<HandleLinearId>();
const auto reserved = handle_pack.Get<HandleReserved>();
MESOSPHERE_ASSERT(reserved == 0);
MESOSPHERE_UNUSED(reserved);
/* Validate our indexing information. */
if (AMS_UNLIKELY(raw_value == 0)) {
return false;
}
if (AMS_UNLIKELY(linear_id == 0)) {
return false;
}
if (AMS_UNLIKELY(index >= m_table_size)) {
return false;
}
/* Check that there's an object, and our serial id is correct. */
if (AMS_UNLIKELY(m_objects[index] == nullptr)) {
return false;
}
if (AMS_UNLIKELY(m_entry_infos[index].GetLinearId() != linear_id)) {
return false;
}
return true;
}
constexpr MESOSPHERE_NOINLINE_IF_DEBUG KAutoObject *GetObjectImpl(ams::svc::Handle handle) const {
MESOSPHERE_ASSERT_THIS();
/* Handles must not have reserved bits set. */
const auto handle_pack = GetHandleBitPack(handle);
if (AMS_UNLIKELY(handle_pack.Get<HandleReserved>() != 0)) {
return nullptr;
}
if (AMS_LIKELY(this->IsValidHandle(handle))) {
return m_objects[handle_pack.Get<HandleIndex>()];
} else {
return nullptr;
}
}
constexpr ALWAYS_INLINE KAutoObject *GetObjectByIndexImpl(ams::svc::Handle *out_handle, size_t index) const {
MESOSPHERE_ASSERT_THIS();
/* Index must be in bounds. */
if (AMS_UNLIKELY(index >= m_table_size)) {
return nullptr;
}
/* Ensure entry has an object. */
if (KAutoObject *obj = m_objects[index]; obj != nullptr) {
*out_handle = EncodeHandle(index, m_entry_infos[index].GetLinearId());
return obj;
} else {
return nullptr;
}
}
};
}
| 14,035
|
C++
|
.h
| 278
| 34.640288
| 212
| 0.524533
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,812
|
kern_k_scheduler_lock.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_scheduler_lock.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_current_context.hpp>
#include <mesosphere/kern_k_scoped_lock.hpp>
namespace ams::kern {
class KThread;
template<typename T>
concept KSchedulerLockable = !std::is_reference<T>::value && requires(T) {
{ T::DisableScheduling() } -> std::same_as<void>;
{ T::EnableScheduling(std::declval<u64>()) } -> std::same_as<void>;
{ T::UpdateHighestPriorityThreads() } -> std::convertible_to<u64>;
};
template<typename SchedulerType> requires KSchedulerLockable<SchedulerType>
class KAbstractSchedulerLock {
private:
KAlignedSpinLock m_spin_lock;
s32 m_lock_count;
KThread *m_owner_thread;
public:
constexpr ALWAYS_INLINE KAbstractSchedulerLock() : m_spin_lock(), m_lock_count(0), m_owner_thread(nullptr) { MESOSPHERE_ASSERT_THIS(); }
ALWAYS_INLINE bool IsLockedByCurrentThread() const {
MESOSPHERE_ASSERT_THIS();
return m_owner_thread == GetCurrentThreadPointer();
}
MESOSPHERE_ALWAYS_INLINE_IF_RELEASE void Lock() {
MESOSPHERE_ASSERT_THIS();
if (this->IsLockedByCurrentThread()) {
/* If we already own the lock, the lock count should be > 0. */
/* For debug, ensure this is true. */
MESOSPHERE_ASSERT(m_lock_count > 0);
} else {
/* Otherwise, we want to disable scheduling and acquire the spinlock. */
SchedulerType::DisableScheduling();
m_spin_lock.Lock();
/* For debug, ensure that our state is valid. */
MESOSPHERE_ASSERT(m_lock_count == 0);
MESOSPHERE_ASSERT(m_owner_thread == nullptr);
/* Take ownership of the lock. */
m_owner_thread = GetCurrentThreadPointer();
}
/* Increment the lock count. */
m_lock_count++;
}
MESOSPHERE_ALWAYS_INLINE_IF_RELEASE void Unlock() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(m_lock_count > 0);
/* Release an instance of the lock. */
if ((--m_lock_count) == 0) {
/* Perform a memory barrier here. */
cpu::DataMemoryBarrierInnerShareable();
/* We're no longer going to hold the lock. Take note of what cores need scheduling. */
const u64 cores_needing_scheduling = SchedulerType::UpdateHighestPriorityThreads();
/* Note that we no longer hold the lock, and unlock the spinlock. */
m_owner_thread = nullptr;
m_spin_lock.Unlock();
/* Enable scheduling, and perform a rescheduling operation. */
SchedulerType::EnableScheduling(cores_needing_scheduling);
}
}
};
}
| 3,853
|
C++
|
.h
| 78
| 37.602564
| 148
| 0.59074
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,813
|
kern_select_interrupt_manager.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_manager.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_interrupt_name.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_k_interrupt_manager.hpp>
namespace ams::kern {
using ams::kern::arch::arm64::KInterruptManager;
}
#else
#error "Unknown architecture for KInterruptManager"
#endif
namespace ams::kern {
/* Enable or disable interrupts for the lifetime of an object. */
class KScopedInterruptDisable {
NON_COPYABLE(KScopedInterruptDisable);
NON_MOVEABLE(KScopedInterruptDisable);
private:
u32 m_prev_intr_state;
public:
ALWAYS_INLINE KScopedInterruptDisable() : m_prev_intr_state(KInterruptManager::GetInterruptsEnabledStateAndDisableInterrupts()) { /* ... */ }
ALWAYS_INLINE ~KScopedInterruptDisable() { KInterruptManager::RestoreInterrupts(m_prev_intr_state); }
};
class KScopedInterruptEnable {
NON_COPYABLE(KScopedInterruptEnable);
NON_MOVEABLE(KScopedInterruptEnable);
private:
u32 m_prev_intr_state;
public:
ALWAYS_INLINE KScopedInterruptEnable() : m_prev_intr_state(KInterruptManager::GetInterruptsEnabledStateAndEnableInterrupts()) { /* ... */ }
ALWAYS_INLINE ~KScopedInterruptEnable() { KInterruptManager::RestoreInterrupts(m_prev_intr_state); }
};
}
| 2,043
|
C++
|
.h
| 47
| 38.106383
| 153
| 0.723426
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,814
|
kern_k_light_condition_variable.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_light_condition_variable.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
#include <mesosphere/kern_k_thread_queue.hpp>
#include <mesosphere/kern_k_scoped_scheduler_lock_and_sleep.hpp>
namespace ams::kern {
class KLightConditionVariable {
private:
KThread::WaiterList m_wait_list;
public:
constexpr explicit ALWAYS_INLINE KLightConditionVariable(util::ConstantInitializeTag) : m_wait_list() { /* ... */ }
explicit ALWAYS_INLINE KLightConditionVariable() { /* ... */ }
public:
void Wait(KLightLock *lock, s64 timeout = -1ll, bool allow_terminating_thread = true);
void Broadcast();
};
}
| 1,380
|
C++
|
.h
| 33
| 37.484848
| 127
| 0.714818
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,815
|
kern_k_typed_address.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_typed_address.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
#ifndef MESOSPHERE_DISABLE_TYPED_ADDRESSES
template<bool Virtual, typename T>
class KTypedAddress {
private:
uintptr_t m_address;
public:
/* Constructors. */
ALWAYS_INLINE KTypedAddress() { /* ... */ }
constexpr ALWAYS_INLINE KTypedAddress(uintptr_t a) : m_address(a) { /* ... */ }
template<typename U>
constexpr ALWAYS_INLINE explicit KTypedAddress(U *ptr) : m_address(reinterpret_cast<uintptr_t>(ptr)) { /* ... */ }
/* Copy constructor. */
constexpr ALWAYS_INLINE KTypedAddress(const KTypedAddress &rhs) = default;
/* Assignment operator. */
constexpr ALWAYS_INLINE KTypedAddress &operator=(const KTypedAddress &rhs) = default;
/* Arithmetic operators. */
template<typename I>
constexpr ALWAYS_INLINE KTypedAddress operator+(I rhs) const {
static_assert(std::is_integral<I>::value);
return m_address + rhs;
}
template<typename I>
constexpr ALWAYS_INLINE KTypedAddress operator-(I rhs) const {
static_assert(std::is_integral<I>::value);
return m_address - rhs;
}
constexpr ALWAYS_INLINE ptrdiff_t operator-(KTypedAddress rhs) const {
return m_address - rhs.m_address;
}
template<typename I>
constexpr ALWAYS_INLINE KTypedAddress operator+=(I rhs) {
static_assert(std::is_integral<I>::value);
m_address += rhs;
return *this;
}
template<typename I>
constexpr ALWAYS_INLINE KTypedAddress operator-=(I rhs) {
static_assert(std::is_integral<I>::value);
m_address -= rhs;
return *this;
}
/* Logical operators. */
constexpr ALWAYS_INLINE uintptr_t operator&(uintptr_t mask) const {
return m_address & mask;
}
constexpr ALWAYS_INLINE uintptr_t operator|(uintptr_t mask) const {
return m_address | mask;
}
constexpr ALWAYS_INLINE uintptr_t operator<<(int shift) const {
return m_address << shift;
}
constexpr ALWAYS_INLINE uintptr_t operator>>(int shift) const {
return m_address >> shift;
}
template<typename U>
constexpr ALWAYS_INLINE size_t operator/(U size) const { return m_address / size; }
/* constexpr ALWAYS_INLINE uintptr_t operator%(U align) const { return m_address % align; } */
/* Comparison operators. */
constexpr ALWAYS_INLINE bool operator==(KTypedAddress rhs) const {
return m_address == rhs.m_address;
}
constexpr ALWAYS_INLINE bool operator!=(KTypedAddress rhs) const {
return m_address != rhs.m_address;
}
constexpr ALWAYS_INLINE bool operator<(KTypedAddress rhs) const {
return m_address < rhs.m_address;
}
constexpr ALWAYS_INLINE bool operator<=(KTypedAddress rhs) const {
return m_address <= rhs.m_address;
}
constexpr ALWAYS_INLINE bool operator>(KTypedAddress rhs) const {
return m_address > rhs.m_address;
}
constexpr ALWAYS_INLINE bool operator>=(KTypedAddress rhs) const {
return m_address >= rhs.m_address;
}
/* For convenience, also define comparison operators versus uintptr_t. */
constexpr ALWAYS_INLINE bool operator==(uintptr_t rhs) const {
return m_address == rhs;
}
/* Allow getting the address explicitly, for use in accessors. */
constexpr ALWAYS_INLINE uintptr_t GetValue() const {
return m_address;
}
};
struct KPhysicalAddressTag{};
struct KVirtualAddressTag{};
struct KProcessAddressTag{};
using KPhysicalAddress = KTypedAddress<false, KPhysicalAddressTag>;
using KVirtualAddress = KTypedAddress<true, KVirtualAddressTag>;
using KProcessAddress = KTypedAddress<true, KProcessAddressTag>;
/* Define accessors. */
template<bool Virtual, typename T>
constexpr ALWAYS_INLINE uintptr_t GetInteger(KTypedAddress<Virtual, T> address) {
return address.GetValue();
}
template<typename T, typename U>
ALWAYS_INLINE T *GetPointer(KTypedAddress<true, U> address) {
return reinterpret_cast<T *>(address.GetValue());
}
template<typename T>
ALWAYS_INLINE void *GetVoidPointer(KTypedAddress<true, T> address) {
return reinterpret_cast<void *>(address.GetValue());
}
#else
/* Plausibly, we may not want compiler overhead from using strongly typed addresses. */
/* In this case, we should just use uintptr_t. */
using KPhysicalAddress = uintptr_t;
using KVirtualAddress = uintptr_t;
using KProcessAddress = uintptr_t;
/* Define accessors. */
constexpr ALWAYS_INLINE uintptr_t GetInteger(uintptr_t address) {
return address;
}
template<typename T>
constexpr ALWAYS_INLINE T *GetPointer(uintptr_t address) {
return reinterpret_cast<T *>(address);
}
template<typename T>
constexpr ALWAYS_INLINE void *GetVoidPointer(uintptr_t address) {
return reinterpret_cast<void *>(address);
}
#endif
template<typename T>
concept IsKTypedAddress = std::same_as<T, KPhysicalAddress> || std::same_as<T, KVirtualAddress> || std::same_as<T, KProcessAddress>;
template<typename T>
constexpr inline T Null = [] {
if constexpr (std::is_same<T, uintptr_t>::value) {
return 0;
} else {
static_assert(std::is_same<T, KPhysicalAddress>::value ||
std::is_same<T, KVirtualAddress>::value ||
std::is_same<T, KProcessAddress>::value);
return T(0);
}
}();
/* Basic type validations. */
static_assert(sizeof(KPhysicalAddress) == sizeof(uintptr_t));
static_assert(sizeof(KVirtualAddress) == sizeof(uintptr_t));
static_assert(sizeof(KProcessAddress) == sizeof(uintptr_t));
static_assert(std::is_trivially_copyable<KPhysicalAddress>::value);
static_assert(std::is_trivially_copyable<KVirtualAddress>::value);
static_assert(std::is_trivially_copyable<KProcessAddress>::value);
static_assert(std::is_trivially_copy_constructible<KPhysicalAddress>::value);
static_assert(std::is_trivially_copy_constructible<KVirtualAddress>::value);
static_assert(std::is_trivially_copy_constructible<KProcessAddress>::value);
static_assert(std::is_trivially_move_constructible<KPhysicalAddress>::value);
static_assert(std::is_trivially_move_constructible<KVirtualAddress>::value);
static_assert(std::is_trivially_move_constructible<KProcessAddress>::value);
static_assert(std::is_trivially_copy_assignable<KPhysicalAddress>::value);
static_assert(std::is_trivially_copy_assignable<KVirtualAddress>::value);
static_assert(std::is_trivially_copy_assignable<KProcessAddress>::value);
static_assert(std::is_trivially_move_assignable<KPhysicalAddress>::value);
static_assert(std::is_trivially_move_assignable<KVirtualAddress>::value);
static_assert(std::is_trivially_move_assignable<KProcessAddress>::value);
static_assert(std::is_trivially_destructible<KPhysicalAddress>::value);
static_assert(std::is_trivially_destructible<KVirtualAddress>::value);
static_assert(std::is_trivially_destructible<KProcessAddress>::value);
static_assert(Null<uintptr_t> == 0);
static_assert(Null<KPhysicalAddress> == Null<uintptr_t>);
static_assert(Null<KVirtualAddress> == Null<uintptr_t>);
static_assert(Null<KProcessAddress> == Null<uintptr_t>);
/* Constructor/assignment validations. */
static_assert([]{ const KPhysicalAddress a(5); KPhysicalAddress b(a); return b; }() == KPhysicalAddress(5));
static_assert([]{ const KPhysicalAddress a(5); KPhysicalAddress b(10); b = a; return b; }() == KPhysicalAddress(5));
/* Arithmetic validations. */
static_assert(KPhysicalAddress(10) + 5 == KPhysicalAddress(15));
static_assert(KPhysicalAddress(10) - 5 == KPhysicalAddress(5));
static_assert([]{ KPhysicalAddress v(10); v += 5; return v; }() == KPhysicalAddress(15));
static_assert([]{ KPhysicalAddress v(10); v -= 5; return v; }() == KPhysicalAddress(5));
/* Logical validations. */
static_assert((KPhysicalAddress(0b11111111) >> 1) == 0b01111111);
static_assert((KPhysicalAddress(0b10101010) >> 1) == 0b01010101);
static_assert((KPhysicalAddress(0b11111111) << 1) == 0b111111110);
static_assert((KPhysicalAddress(0b01010101) << 1) == 0b10101010);
static_assert((KPhysicalAddress(0b11111111) & 0b01010101) == 0b01010101);
static_assert((KPhysicalAddress(0b11111111) & 0b10101010) == 0b10101010);
static_assert((KPhysicalAddress(0b01010101) & 0b10101010) == 0b00000000);
static_assert((KPhysicalAddress(0b00000000) | 0b01010101) == 0b01010101);
static_assert((KPhysicalAddress(0b11111111) | 0b01010101) == 0b11111111);
static_assert((KPhysicalAddress(0b10101010) | 0b01010101) == 0b11111111);
/* Comparisons. */
static_assert(KPhysicalAddress(0) == KPhysicalAddress(0));
static_assert(KPhysicalAddress(0) != KPhysicalAddress(1));
static_assert(KPhysicalAddress(0) < KPhysicalAddress(1));
static_assert(KPhysicalAddress(0) <= KPhysicalAddress(1));
static_assert(KPhysicalAddress(1) > KPhysicalAddress(0));
static_assert(KPhysicalAddress(1) >= KPhysicalAddress(0));
static_assert(!(KPhysicalAddress(0) == KPhysicalAddress(1)));
static_assert(!(KPhysicalAddress(0) != KPhysicalAddress(0)));
static_assert(!(KPhysicalAddress(1) < KPhysicalAddress(0)));
static_assert(!(KPhysicalAddress(1) <= KPhysicalAddress(0)));
static_assert(!(KPhysicalAddress(0) > KPhysicalAddress(1)));
static_assert(!(KPhysicalAddress(0) >= KPhysicalAddress(1)));
/* Accessors. */
static_assert(15 == GetInteger(KPhysicalAddress(15)));
static_assert(0 == GetInteger(Null<KPhysicalAddress>));
}
| 11,131
|
C++
|
.h
| 216
| 42.759259
| 136
| 0.656601
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,816
|
kern_common.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_common.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_build_config.hpp>
#include <mesosphere/svc/kern_svc_results.hpp>
#include <mesosphere/kern_select_assembly_offsets.h>
namespace ams::kern {
constexpr size_t PageSize = 4_KB;
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
ams::TargetFirmware GetTargetFirmware();
#else
consteval ALWAYS_INLINE ams::TargetFirmware GetTargetFirmware() {
return ams::TargetFirmware_Current;
}
#endif
}
| 1,095
|
C++
|
.h
| 30
| 34.1
| 76
| 0.763431
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,817
|
kern_kernel.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_kernel.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
#include <mesosphere/kern_k_memory_manager.hpp>
#include <mesosphere/kern_k_scheduler.hpp>
#include <mesosphere/kern_k_interrupt_task_manager.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
#include <mesosphere/kern_select_hardware_timer.hpp>
#include <mesosphere/kern_k_worker_task_manager.hpp>
namespace ams::kern {
class KThread;
class KHardwareTimer;
class KResourceLimit;
class KInterruptManager;
class KInterruptTaskManager;
class KScheduler;
class KMemoryManager;
class KPageTableManager;
class KMemoryBlockSlabManager;
class KBlockInfoManager;
class KUnsafeMemory;
#if defined(ATMOSPHERE_ARCH_ARM64)
namespace arch::arm64 {
class KSupervisorPageTable;
}
using ams::kern::arch::arm64::KSupervisorPageTable;
#else
#error "Unknown architecture for KSupervisorPageTable forward declare"
#endif
class Kernel {
public:
enum class State : u8 {
Invalid = 0,
Initializing = 1,
Initialized = 2,
};
static constexpr size_t ApplicationMemoryBlockSlabHeapSize = 20000;
static constexpr size_t SystemMemoryBlockSlabHeapSize = 10000;
static constexpr size_t BlockInfoSlabHeapSize = 4000;
static constexpr size_t ReservedDynamicPageCount = 64;
private:
static State s_state;
static KResourceLimit s_system_resource_limit;
static KMemoryManager s_memory_manager;
static KPageTableSlabHeap s_page_table_heap;
static KMemoryBlockSlabHeap s_app_memory_block_heap;
static KMemoryBlockSlabHeap s_sys_memory_block_heap;
static KBlockInfoSlabHeap s_block_info_heap;
static KPageTableManager s_app_page_table_manager;
static KPageTableManager s_sys_page_table_manager;
static KMemoryBlockSlabManager s_app_memory_block_manager;
static KMemoryBlockSlabManager s_sys_memory_block_manager;
static KBlockInfoManager s_app_block_info_manager;
static KBlockInfoManager s_sys_block_info_manager;
static KSystemResource s_app_system_resource;
static KSystemResource s_sys_system_resource;
static KSupervisorPageTable s_supervisor_page_table;
static KUnsafeMemory s_unsafe_memory;
static KWorkerTaskManager s_worker_task_managers[KWorkerTaskManager::WorkerType_Count];
static KInterruptManager s_interrupt_manager;
static KScheduler s_schedulers[cpu::NumCores];
static KInterruptTaskManager s_interrupt_task_managers[cpu::NumCores];
static KHardwareTimer s_hardware_timers[cpu::NumCores];
public:
static NOINLINE void InitializeCoreLocalRegion(s32 core_id);
static NOINLINE void InitializeMainAndIdleThreads(s32 core_id);
static NOINLINE void InitializeResourceManagers(KVirtualAddress address, size_t size);
static NOINLINE void PrintLayout();
static ALWAYS_INLINE State GetState() { return s_state; }
static ALWAYS_INLINE void SetState(State state) { s_state = state; }
static KThread &GetMainThread(s32 core_id);
static KThread &GetIdleThread(s32 core_id);
static ALWAYS_INLINE KScheduler &GetScheduler() {
return s_schedulers[GetCurrentCoreId()];
}
static ALWAYS_INLINE KScheduler &GetScheduler(s32 core_id) {
return s_schedulers[core_id];
}
static ALWAYS_INLINE KInterruptTaskManager &GetInterruptTaskManager() {
return s_interrupt_task_managers[GetCurrentCoreId()];
}
static ALWAYS_INLINE KInterruptManager &GetInterruptManager() {
return s_interrupt_manager;
}
static ALWAYS_INLINE KHardwareTimer &GetHardwareTimer() {
return s_hardware_timers[GetCurrentCoreId()];
}
static ALWAYS_INLINE KHardwareTimer &GetHardwareTimer(s32 core_id) {
return s_hardware_timers[core_id];
}
static ALWAYS_INLINE KResourceLimit &GetSystemResourceLimit() {
return s_system_resource_limit;
}
static ALWAYS_INLINE KMemoryManager &GetMemoryManager() {
return s_memory_manager;
}
static ALWAYS_INLINE KSystemResource &GetApplicationSystemResource() {
return s_app_system_resource;
}
static ALWAYS_INLINE KSystemResource &GetSystemSystemResource() {
return s_sys_system_resource;
}
static ALWAYS_INLINE KSupervisorPageTable &GetKernelPageTable() {
return s_supervisor_page_table;
}
static ALWAYS_INLINE KUnsafeMemory &GetUnsafeMemory() {
return s_unsafe_memory;
}
static ALWAYS_INLINE KWorkerTaskManager &GetWorkerTaskManager(KWorkerTaskManager::WorkerType type) {
MESOSPHERE_ASSERT(type <= KWorkerTaskManager::WorkerType_Count);
return s_worker_task_managers[type];
}
};
ALWAYS_INLINE KScheduler &GetCurrentScheduler() {
return Kernel::GetScheduler();
}
}
| 6,235
|
C++
|
.h
| 134
| 36.783582
| 112
| 0.67177
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,818
|
kern_k_affinity_mask.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_affinity_mask.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
namespace ams::kern {
class KAffinityMask {
private:
static constexpr u64 AllowedAffinityMask = (1ul << cpu::NumCores) - 1;
private:
u64 m_mask;
private:
static constexpr ALWAYS_INLINE u64 GetCoreBit(s32 core) {
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
return (1ul << core);
}
public:
constexpr ALWAYS_INLINE KAffinityMask() : m_mask(0) { MESOSPHERE_ASSERT_THIS(); }
constexpr ALWAYS_INLINE u64 GetAffinityMask() const { return m_mask; }
constexpr ALWAYS_INLINE void SetAffinityMask(u64 new_mask) {
MESOSPHERE_ASSERT((new_mask & ~AllowedAffinityMask) == 0);
m_mask = new_mask;
}
constexpr ALWAYS_INLINE bool GetAffinity(s32 core) const {
return m_mask & GetCoreBit(core);
}
constexpr ALWAYS_INLINE void SetAffinity(s32 core, bool set) {
MESOSPHERE_ASSERT(0 <= core && core < static_cast<s32>(cpu::NumCores));
if (set) {
m_mask |= GetCoreBit(core);
} else {
m_mask &= ~GetCoreBit(core);
}
}
constexpr ALWAYS_INLINE void SetAll() {
m_mask = AllowedAffinityMask;
}
};
}
| 2,143
|
C++
|
.h
| 52
| 31.923077
| 93
| 0.608069
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,819
|
kern_select_interrupt_controller.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_select_interrupt_controller.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_interrupt_name.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_k_interrupt_controller.hpp>
namespace ams::kern {
using ams::kern::arch::arm64::KInterruptController;
}
#elif defined(ATMOSPHERE_ARCH_ARM)
#include <mesosphere/arch/arm/kern_k_interrupt_controller.hpp>
namespace ams::kern {
using ams::kern::arch::arm::KInterruptController;
}
#else
#error "Unknown architecture for KInterruptController"
#endif
| 1,206
|
C++
|
.h
| 31
| 35.806452
| 76
| 0.753425
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,820
|
kern_k_worker_task_manager.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_worker_task_manager.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_worker_task.hpp>
#include <mesosphere/kern_k_thread.hpp>
namespace ams::kern {
class KWorkerTaskManager {
public:
static constexpr s32 ExitWorkerPriority = 11;
enum WorkerType {
WorkerType_ExitThread,
WorkerType_ExitProcess,
WorkerType_Count,
};
private:
KWorkerTask *m_head_task;
KWorkerTask *m_tail_task;
KThread *m_waiting_thread;
private:
static void ThreadFunction(uintptr_t arg);
void ThreadFunctionImpl();
KWorkerTask *GetTask();
void AddTask(KWorkerTask *task);
public:
constexpr KWorkerTaskManager() : m_head_task(), m_tail_task(), m_waiting_thread() { /* ... */ }
NOINLINE void Initialize(s32 priority);
static void AddTask(WorkerType type, KWorkerTask *task);
};
}
| 1,641
|
C++
|
.h
| 43
| 31
| 107
| 0.658705
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,821
|
kern_k_class_token.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_class_token.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern {
class KAutoObject;
class KSystemResource;
#define FOR_EACH_K_CLASS_TOKEN_OBJECT_TYPE(HANDLER) \
HANDLER(KAutoObject) \
\
HANDLER(KSynchronizationObject) \
HANDLER(KReadableEvent) \
\
HANDLER(KInterruptEvent) \
HANDLER(KDebug) \
HANDLER(KThread) \
HANDLER(KServerPort) \
HANDLER(KServerSession) \
HANDLER(KClientPort) \
HANDLER(KClientSession) \
HANDLER(KProcess) \
HANDLER(KResourceLimit) \
HANDLER(KLightSession) \
HANDLER(KPort) \
HANDLER(KSession) \
HANDLER(KSharedMemory) \
HANDLER(KEvent) \
HANDLER(KLightClientSession) \
HANDLER(KLightServerSession) \
HANDLER(KTransferMemory) \
HANDLER(KDeviceAddressSpace) \
HANDLER(KSessionRequest) \
HANDLER(KCodeMemory) \
HANDLER(KIoPool) \
HANDLER(KIoRegion) \
HANDLER(KSystemResource)
class KClassTokenGenerator {
public:
using TokenBaseType = u16;
public:
static constexpr size_t BaseClassBits = 8;
static constexpr size_t FinalClassBits = (sizeof(TokenBaseType) * CHAR_BIT) - BaseClassBits;
/* One bit per base class. */
static constexpr size_t NumBaseClasses = BaseClassBits;
/* Final classes are permutations of three bits. */
static constexpr size_t NumFinalClasses = [] {
TokenBaseType index = 0;
for (size_t i = 0; i < FinalClassBits; i++) {
for (size_t j = i + 1; j < FinalClassBits; j++) {
for (size_t k = j + 1; k < FinalClassBits; k++) {
index++;
}
}
}
return index;
}();
private:
template<TokenBaseType Index>
static constexpr inline TokenBaseType BaseClassToken = BIT(Index);
template<TokenBaseType Index>
static constexpr inline TokenBaseType FinalClassToken = [] {
TokenBaseType index = 0;
for (size_t i = 0; i < FinalClassBits; i++) {
for (size_t j = i + 1; j < FinalClassBits; j++) {
for (size_t k = j + 1; k < FinalClassBits; k++) {
if ((index++) == Index) {
return ((1ul << i) | (1ul << j) | (1ul << k)) << BaseClassBits;
}
}
}
}
__builtin_unreachable();
}();
template<typename T>
static constexpr inline TokenBaseType GetClassToken() {
static_assert(std::is_base_of<KAutoObject, T>::value);
if constexpr (std::is_same<T, KAutoObject>::value) {
static_assert(T::ObjectType == ObjectType::KAutoObject);
return 0;
} else if constexpr (!std::is_final<T>::value && !std::same_as<T, KSystemResource>) {
static_assert(ObjectType::BaseClassesStart <= T::ObjectType && T::ObjectType < ObjectType::BaseClassesEnd);
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - static_cast<TokenBaseType>(ObjectType::BaseClassesStart);
return BaseClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
} else if constexpr (ObjectType::FinalClassesStart <= T::ObjectType && T::ObjectType < ObjectType::FinalClassesEnd) {
constexpr auto ClassIndex = static_cast<TokenBaseType>(T::ObjectType) - static_cast<TokenBaseType>(ObjectType::FinalClassesStart);
return FinalClassToken<ClassIndex> | GetClassToken<typename T::BaseClass>();
} else {
static_assert(!std::is_same<T, T>::value, "GetClassToken: Invalid Type");
}
};
public:
enum class ObjectType {
KAutoObject,
BaseClassesStart,
KSynchronizationObject = BaseClassesStart,
KReadableEvent,
BaseClassesEnd,
FinalClassesStart = BaseClassesEnd,
KInterruptEvent = FinalClassesStart,
KDebug,
KThread,
KServerPort,
KServerSession,
KClientPort,
KClientSession,
KProcess,
KResourceLimit,
KLightSession,
KPort,
KSession,
KSharedMemory,
KEvent,
KLightClientSession,
KLightServerSession,
KTransferMemory,
KDeviceAddressSpace,
KSessionRequest,
KCodeMemory,
KIoPool,
KIoRegion,
/* NOTE: What occupies these gaps? They can be inferred, but they don't make sense. */
KAlpha,
KBeta,
KSystemResource,
FinalClassesLast,
FinalClassesEnd = FinalClassesStart + NumFinalClasses,
};
static_assert(ObjectType::FinalClassesLast <= ObjectType::FinalClassesEnd);
template<typename T>
static constexpr inline TokenBaseType ClassToken = GetClassToken<T>();
};
using ClassTokenType = KClassTokenGenerator::TokenBaseType;
template<typename T>
static constexpr inline ClassTokenType ClassToken = KClassTokenGenerator::ClassToken<T>;
namespace impl {
consteval bool IsKClassTokenGeneratorForEachMacroValid() {
auto IsObjectTypeIncludedByMacro = [](KClassTokenGenerator::ObjectType object_type) -> bool {
#define CLASS_TOKEN_HANDLER(CLASSNAME) if (object_type == KClassTokenGenerator::ObjectType::CLASSNAME) { return true; }
FOR_EACH_K_CLASS_TOKEN_OBJECT_TYPE(CLASS_TOKEN_HANDLER)
#undef CLASS_TOKEN_HANDLER
return false;
};
if (!IsObjectTypeIncludedByMacro(KClassTokenGenerator::ObjectType::KAutoObject)) {
return false;
}
for (auto base = util::ToUnderlying(KClassTokenGenerator::ObjectType::BaseClassesStart); base < util::ToUnderlying(KClassTokenGenerator::ObjectType::BaseClassesEnd); ++base) {
if (!IsObjectTypeIncludedByMacro(static_cast<KClassTokenGenerator::ObjectType>(base))) {
return false;
}
}
for (auto fin = util::ToUnderlying(KClassTokenGenerator::ObjectType::FinalClassesStart); fin < util::ToUnderlying(KClassTokenGenerator::ObjectType::FinalClassesLast); ++fin) {
if (const auto o = static_cast<KClassTokenGenerator::ObjectType>(fin); !IsObjectTypeIncludedByMacro(o)) {
if (o != KClassTokenGenerator::ObjectType::KAlpha && o != KClassTokenGenerator::ObjectType::KBeta) {
return false;
}
}
}
return true;
}
static_assert(IsKClassTokenGeneratorForEachMacroValid());
}
}
| 8,835
|
C++
|
.h
| 176
| 35.142045
| 187
| 0.526474
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,822
|
kern_k_interrupt_task.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
namespace ams::kern {
class KInterruptTask;
class KInterruptHandler {
public:
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) = 0;
};
class KInterruptTask : public KInterruptHandler {
private:
KInterruptTask *m_next_task;
public:
constexpr ALWAYS_INLINE KInterruptTask() : m_next_task(nullptr) { /* ... */ }
constexpr ALWAYS_INLINE KInterruptTask *GetNextTask() const {
return m_next_task;
}
constexpr ALWAYS_INLINE void SetNextTask(KInterruptTask *t) {
m_next_task = t;
}
virtual void DoTask() = 0;
};
static ALWAYS_INLINE KInterruptTask *GetDummyInterruptTask() {
return reinterpret_cast<KInterruptTask *>(1);
}
}
| 1,467
|
C++
|
.h
| 39
| 31.205128
| 89
| 0.670895
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,823
|
kern_k_synchronization_object.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_synchronization_object.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_k_auto_object.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KThread;
class KSynchronizationObject : public KAutoObjectWithList {
MESOSPHERE_AUTOOBJECT_TRAITS(KSynchronizationObject, KAutoObject);
public:
struct ThreadListNode {
ThreadListNode *next;
KThread *thread;
};
private:
ThreadListNode *m_thread_list_head;
ThreadListNode *m_thread_list_tail;
protected:
constexpr ALWAYS_INLINE explicit KSynchronizationObject(util::ConstantInitializeTag) : KAutoObjectWithList(util::ConstantInitialize), m_thread_list_head(), m_thread_list_tail() { MESOSPHERE_ASSERT_THIS(); }
ALWAYS_INLINE explicit KSynchronizationObject() : m_thread_list_head(), m_thread_list_tail() { MESOSPHERE_ASSERT_THIS(); }
/* NOTE: This is a virtual function which is overridden only by KDebugBase in Nintendo's kernel. */
/* virtual void OnFinalizeSynchronizationObject() { MESOSPHERE_ASSERT_THIS(); } */
void NotifyAvailable(Result result);
ALWAYS_INLINE void NotifyAvailable() {
return this->NotifyAvailable(ResultSuccess());
}
public:
static Result Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout);
public:
void Finalize();
virtual bool IsSignaled() const { AMS_INFINITE_LOOP(); }
void DumpWaiters();
ALWAYS_INLINE void LinkNode(ThreadListNode *node) {
/* Link the node to the list. */
if (m_thread_list_tail == nullptr) {
m_thread_list_head = node;
} else {
m_thread_list_tail->next = node;
}
m_thread_list_tail = node;
}
ALWAYS_INLINE void UnlinkNode(ThreadListNode *node) {
/* Unlink the node from the list. */
ThreadListNode *prev_ptr = reinterpret_cast<ThreadListNode *>(std::addressof(m_thread_list_head));
ThreadListNode *prev_val = nullptr;
ThreadListNode *prev, *tail_prev;
do {
prev = prev_ptr;
prev_ptr = prev_ptr->next;
tail_prev = prev_val;
prev_val = prev_ptr;
} while (prev_ptr != node);
if (m_thread_list_tail == node) {
m_thread_list_tail = tail_prev;
}
prev->next = node->next;
}
};
}
| 3,343
|
C++
|
.h
| 72
| 35.333333
| 218
| 0.603438
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,824
|
kern_k_worker_task.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_worker_task.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
/* NOTE: We inherit KWorkerTask from KSynchronizationObject in order to devirtualize DoWorkerTask, which is exclusive to KThread/KProcess. */
/* This should be reverted, if Nintendo adds new types of worker tasks in the future. */
/* Nintendo has class KWorkerTask { ... } with identical interface but DoWorkerTask() virtual. */
class KWorkerTask : public KSynchronizationObject {
private:
KWorkerTask *m_next_task;
public:
constexpr ALWAYS_INLINE explicit KWorkerTask(util::ConstantInitializeTag) : KSynchronizationObject(util::ConstantInitialize), m_next_task(nullptr) { /* ... */ }
ALWAYS_INLINE explicit KWorkerTask() : m_next_task(nullptr) { /* ... */ }
constexpr ALWAYS_INLINE KWorkerTask *GetNextTask() const { return m_next_task; }
constexpr ALWAYS_INLINE void SetNextTask(KWorkerTask *task) { m_next_task = task; }
void DoWorkerTask();
};
}
| 1,676
|
C++
|
.h
| 32
| 47.3125
| 173
| 0.715159
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,825
|
kern_k_shared_memory_info.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_shared_memory_info.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KSharedMemory;
class KSharedMemoryInfo : public KSlabAllocated<KSharedMemoryInfo>, public util::IntrusiveListBaseNode<KSharedMemoryInfo> {
private:
KSharedMemory *m_shared_memory;
size_t m_reference_count;
public:
explicit KSharedMemoryInfo() { /* ... */ }
~KSharedMemoryInfo() { /* ... */ }
constexpr void Initialize(KSharedMemory *m) {
MESOSPHERE_ASSERT_THIS();
m_shared_memory = m;
m_reference_count = 0;
}
constexpr void Open() {
++m_reference_count;
MESOSPHERE_ASSERT(m_reference_count > 0);
}
constexpr bool Close() {
MESOSPHERE_ASSERT(m_reference_count > 0);
return (--m_reference_count) == 0;
}
constexpr KSharedMemory *GetSharedMemory() const { return m_shared_memory; }
constexpr size_t GetReferenceCount() const { return m_reference_count; }
};
}
| 1,810
|
C++
|
.h
| 44
| 33.181818
| 127
| 0.641069
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,826
|
kern_select_page_table_impl.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_select_page_table_impl.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_k_page_table_impl.hpp>
namespace ams::kern {
using ams::kern::arch::arm64::KPageTableImpl;
}
#else
#error "Unknown architecture for KPageTableImpl"
#endif
| 942
|
C++
|
.h
| 25
| 34.96
| 76
| 0.751096
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,827
|
kern_k_port.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_port.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_k_client_port.hpp>
#include <mesosphere/kern_k_server_port.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KServerSession;
class KLightServerSession;
class KPort final : public KAutoObjectWithSlabHeapAndContainer<KPort, KAutoObjectWithList> {
MESOSPHERE_AUTOOBJECT_TRAITS(KPort, KAutoObject);
private:
enum class State : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
private:
KServerPort m_server;
KClientPort m_client;
uintptr_t m_name;
State m_state;
bool m_is_light;
public:
explicit KPort() : m_state(State::Invalid), m_is_light() { /* ... */ }
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
void Initialize(s32 max_sessions, bool is_light, uintptr_t name);
void Finalize() { /* ... */ }
void OnClientClosed();
void OnServerClosed();
uintptr_t GetName() const { return m_name; }
bool IsLight() const { return m_is_light; }
bool IsServerClosed() const {
KScopedSchedulerLock sl;
return m_state == State::ServerClosed;
}
Result EnqueueSession(KServerSession *session);
Result EnqueueSession(KLightServerSession *session);
KClientPort &GetClientPort() { return m_client; }
KServerPort &GetServerPort() { return m_server; }
const KClientPort &GetClientPort() const { return m_client; }
const KServerPort &GetServerPort() const { return m_server; }
};
}
| 2,545
|
C++
|
.h
| 60
| 34
| 96
| 0.634196
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,828
|
kern_panic.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_panic.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_debug_log.hpp>
namespace ams::kern {
NORETURN NOINLINE void Panic(const char *file, int line, const char *format, ...) __attribute__((format(printf, 3, 4)));
NORETURN NOINLINE void Panic();
}
namespace ams::diag {
NORETURN ALWAYS_INLINE void OnAssertionFailure(AssertionType type, const char *expr, const char *func, const char *file, int line) {
#if defined(MESOSPHERE_ENABLE_DEBUG_PRINT)
::ams::kern::Panic(file, line, "ams::diag::OnAssertionFailure: %d %s:%s", (type == AssertionType_Audit), func, expr);
#else
::ams::kern::Panic();
AMS_UNUSED(type, expr, func, file, line);
#endif
}
}
#define MESOSPHERE_UNUSED(...) AMS_UNUSED(__VA_ARGS__)
#ifdef MESOSPHERE_ENABLE_DEBUG_PRINT
#define MESOSPHERE_PANIC(...) do { ::ams::kern::Panic(__FILE__, __LINE__, ## __VA_ARGS__); } while(0)
#else
#define MESOSPHERE_PANIC(...) do { MESOSPHERE_UNUSED(__VA_ARGS__); ::ams::kern::Panic(); } while(0)
#endif
#ifdef MESOSPHERE_ENABLE_ASSERTIONS
#define MESOSPHERE_ASSERT_IMPL(expr, ...) \
({ \
const bool __tmp_meso_assert_val = (expr); \
if (AMS_UNLIKELY(!__tmp_meso_assert_val)) { \
MESOSPHERE_PANIC(__VA_ARGS__); \
} \
})
#elif defined(MESOSPHERE_PRESERVE_ASSERTION_EXPRESSIONS)
#define MESOSPHERE_ASSERT_IMPL(expr, ...) do { static_cast<void>(expr); } while (0)
#else
#define MESOSPHERE_ASSERT_IMPL(expr, ...) static_cast<void>(0)
#endif
#define MESOSPHERE_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(expr, "Assertion failed: %s\n", #expr)
#define MESOSPHERE_R_ASSERT(expr) MESOSPHERE_ASSERT_IMPL(R_SUCCEEDED(expr), "Result assertion failed: %s\n", #expr)
#define MESOSPHERE_UNREACHABLE_DEFAULT_CASE() default: MESOSPHERE_PANIC("Unreachable default case entered")
#ifdef MESOSPHERE_ENABLE_THIS_ASSERT
#define MESOSPHERE_ASSERT_THIS() MESOSPHERE_ASSERT(this != nullptr)
#else
#define MESOSPHERE_ASSERT_THIS()
#endif
#ifdef MESOSPHERE_BUILD_FOR_AUDITING
#define MESOSPHERE_AUDIT(expr) MESOSPHERE_ASSERT(expr)
#elif defined(MESOSPHERE_PRESERVE_AUDIT_EXPRESSIONS)
#define MESOSPHERE_AUDIT(expr) do { static_cast<void>(expr); } while (0)
#else
#define MESOSPHERE_AUDIT(expr) static_cast<void>(0)
#endif
#define MESOSPHERE_TODO(arg) ({ constexpr const char *__mesosphere_todo = arg; static_cast<void>(__mesosphere_todo); MESOSPHERE_PANIC("TODO (%s): %s\n", __PRETTY_FUNCTION__, __mesosphere_todo); })
#define MESOSPHERE_UNIMPLEMENTED() MESOSPHERE_PANIC("%s: Unimplemented\n", __PRETTY_FUNCTION__)
#define MESOSPHERE_ABORT() MESOSPHERE_PANIC("Abort()\n");
#define MESOSPHERE_INIT_ABORT() AMS_INFINITE_LOOP()
#define MESOSPHERE_ABORT_UNLESS(expr) \
({ \
const bool _tmp_meso_assert_val = (expr); \
if (AMS_UNLIKELY(!_tmp_meso_assert_val)) { \
MESOSPHERE_PANIC("Abort(): %s", #expr); \
} \
})
#define MESOSPHERE_INIT_ABORT_UNLESS(expr) \
({ \
const bool __tmp_meso_assert_val = (expr); \
if (AMS_UNLIKELY(!__tmp_meso_assert_val)) { \
MESOSPHERE_INIT_ABORT(); \
} \
})
#define MESOSPHERE_R_ABORT_UNLESS(expr) \
({ \
const ::ams::Result _tmp_meso_r_abort_res = static_cast<::ams::Result>((expr)); \
if (AMS_UNLIKELY((R_FAILED(_tmp_meso_r_abort_res)))) { \
MESOSPHERE_PANIC("Result Abort(): %s 2%03d-%04d\n", #expr, _tmp_meso_r_abort_res.GetModule(), _tmp_meso_r_abort_res.GetDescription()); \
} \
})
| 4,994
|
C++
|
.h
| 91
| 50.901099
| 196
| 0.565493
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,829
|
kern_k_page_heap.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_page_heap.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_page_bitmap.hpp>
namespace ams::kern {
class KPageHeap {
private:
static constexpr inline size_t MemoryBlockPageShifts[] = { 0xC, 0x10, 0x15, 0x16, 0x19, 0x1D, 0x1E };
static constexpr size_t NumMemoryBlockPageShifts = util::size(MemoryBlockPageShifts);
public:
static constexpr s32 GetAlignedBlockIndex(size_t num_pages, size_t align_pages) {
const size_t target_pages = std::max(num_pages, align_pages);
for (size_t i = 0; i < NumMemoryBlockPageShifts; i++) {
if (target_pages <= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return static_cast<s32>(i);
}
}
return -1;
}
static constexpr s32 GetBlockIndex(size_t num_pages) {
for (s32 i = static_cast<s32>(NumMemoryBlockPageShifts) - 1; i >= 0; i--) {
if (num_pages >= (static_cast<size_t>(1) << MemoryBlockPageShifts[i]) / PageSize) {
return i;
}
}
return -1;
}
static constexpr size_t GetBlockSize(size_t index) {
return static_cast<size_t>(1) << MemoryBlockPageShifts[index];
}
static constexpr size_t GetBlockNumPages(size_t index) {
return GetBlockSize(index) / PageSize;
}
private:
class Block {
private:
KPageBitmap m_bitmap;
KPhysicalAddress m_heap_address;
uintptr_t m_end_offset;
size_t m_block_shift;
size_t m_next_block_shift;
public:
Block() : m_bitmap(), m_heap_address(Null<KPhysicalAddress>), m_end_offset(), m_block_shift(), m_next_block_shift() { /* ... */ }
constexpr size_t GetShift() const { return m_block_shift; }
constexpr size_t GetNextShift() const { return m_next_block_shift; }
constexpr size_t GetSize() const { return u64(1) << this->GetShift(); }
constexpr size_t GetNumPages() const { return this->GetSize() / PageSize; }
constexpr size_t GetNumFreeBlocks() const { return m_bitmap.GetNumBits(); }
constexpr size_t GetNumFreePages() const { return this->GetNumFreeBlocks() * this->GetNumPages(); }
u64 *Initialize(KPhysicalAddress addr, size_t size, size_t bs, size_t nbs, u64 *bit_storage) {
/* Set shifts. */
m_block_shift = bs;
m_next_block_shift = nbs;
/* Align up the address. */
KPhysicalAddress end = addr + size;
const size_t align = (m_next_block_shift != 0) ? (u64(1) << m_next_block_shift) : (u64(1) << m_block_shift);
addr = util::AlignDown(GetInteger(addr), align);
end = util::AlignUp(GetInteger(end), align);
m_heap_address = addr;
m_end_offset = (end - addr) / (u64(1) << m_block_shift);
return m_bitmap.Initialize(bit_storage, m_end_offset);
}
KPhysicalAddress PushBlock(KPhysicalAddress address) {
/* Set the bit for the free block. */
size_t offset = (address - m_heap_address) >> this->GetShift();
m_bitmap.SetBit(offset);
/* If we have a next shift, try to clear the blocks below this one and return the new address. */
if (this->GetNextShift()) {
const size_t diff = u64(1) << (this->GetNextShift() - this->GetShift());
offset = util::AlignDown(offset, diff);
if (m_bitmap.ClearRange(offset, diff)) {
return m_heap_address + (offset << this->GetShift());
}
}
/* We couldn't coalesce, or we're already as big as possible. */
return Null<KPhysicalAddress>;
}
KPhysicalAddress PopBlock(bool random) {
/* Find a free block. */
ssize_t soffset = m_bitmap.FindFreeBlock(random);
if (soffset < 0) {
return Null<KPhysicalAddress>;
}
const size_t offset = static_cast<size_t>(soffset);
/* Update our tracking and return it. */
m_bitmap.ClearBit(offset);
return m_heap_address + (offset << this->GetShift());
}
public:
static constexpr size_t CalculateManagementOverheadSize(size_t region_size, size_t cur_block_shift, size_t next_block_shift) {
const size_t cur_block_size = (u64(1) << cur_block_shift);
const size_t next_block_size = (u64(1) << next_block_shift);
const size_t align = (next_block_shift != 0) ? next_block_size : cur_block_size;
return KPageBitmap::CalculateManagementOverheadSize((align * 2 + util::AlignUp(region_size, align)) / cur_block_size);
}
};
private:
KPhysicalAddress m_heap_address;
size_t m_heap_size;
size_t m_initial_used_size;
size_t m_num_blocks;
Block m_blocks[NumMemoryBlockPageShifts];
KPageBitmap::RandomBitGenerator m_rng;
private:
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts);
size_t GetNumFreePages() const;
void FreeBlock(KPhysicalAddress block, s32 index);
public:
KPageHeap() : m_heap_address(Null<KPhysicalAddress>), m_heap_size(), m_initial_used_size(), m_num_blocks(), m_blocks(), m_rng() { /* ... */ }
constexpr KPhysicalAddress GetAddress() const { return m_heap_address; }
constexpr size_t GetSize() const { return m_heap_size; }
constexpr KPhysicalAddress GetEndAddress() const { return this->GetAddress() + this->GetSize(); }
constexpr size_t GetPageOffset(KPhysicalAddress block) const { return (block - this->GetAddress()) / PageSize; }
constexpr size_t GetPageOffsetToEnd(KPhysicalAddress block) const { return (this->GetEndAddress() - block) / PageSize; }
void Initialize(KPhysicalAddress heap_address, size_t heap_size, KVirtualAddress management_address, size_t management_size) {
return this->Initialize(heap_address, heap_size, management_address, management_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
}
size_t GetFreeSize() const { return this->GetNumFreePages() * PageSize; }
void DumpFreeList() const;
void SetInitialUsedSize(size_t reserved_size) {
/* Check that the reserved size is valid. */
const size_t free_size = this->GetNumFreePages() * PageSize;
MESOSPHERE_ABORT_UNLESS(m_heap_size >= free_size + reserved_size);
/* Set the initial used size. */
m_initial_used_size = m_heap_size - free_size - reserved_size;
}
KPhysicalAddress AllocateBlock(s32 index, bool random) {
if (random) {
const size_t block_pages = m_blocks[index].GetNumPages();
return this->AllocateByRandom(index, block_pages, block_pages);
} else {
return this->AllocateByLinearSearch(index);
}
}
KPhysicalAddress AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
/* TODO: linear search support? */
return this->AllocateByRandom(index, num_pages, align_pages);
}
void Free(KPhysicalAddress addr, size_t num_pages);
private:
KPhysicalAddress AllocateByLinearSearch(s32 index);
KPhysicalAddress AllocateByRandom(s32 index, size_t num_pages, size_t align_pages);
static size_t CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts);
public:
static size_t CalculateManagementOverheadSize(size_t region_size) {
return CalculateManagementOverheadSize(region_size, MemoryBlockPageShifts, NumMemoryBlockPageShifts);
}
};
}
| 9,702
|
C++
|
.h
| 163
| 43.092025
| 190
| 0.559071
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,830
|
kern_k_page_table_manager.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_page_table_manager.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_page_table_slab_heap.hpp>
#include <mesosphere/kern_k_dynamic_resource_manager.hpp>
namespace ams::kern {
class KPageTableManager : public KDynamicResourceManager<impl::PageTablePage, true> {
public:
using RefCount = KPageTableSlabHeap::RefCount;
static constexpr size_t PageTableSize = KPageTableSlabHeap::PageTableSize;
private:
using BaseHeap = KDynamicResourceManager<impl::PageTablePage, true>;
private:
KPageTableSlabHeap *m_pt_heap;
public:
constexpr explicit KPageTableManager(util::ConstantInitializeTag) : m_pt_heap() { /* ... */ }
explicit KPageTableManager() { /* ... */ }
ALWAYS_INLINE void Initialize(KDynamicPageManager *page_allocator, KPageTableSlabHeap *pt_heap) {
m_pt_heap = pt_heap;
static_assert(std::derived_from<KPageTableSlabHeap, DynamicSlabType>);
BaseHeap::Initialize(page_allocator, pt_heap);
}
KVirtualAddress Allocate() {
return KVirtualAddress(BaseHeap::Allocate());
}
void Free(KVirtualAddress addr) {
return BaseHeap::Free(GetPointer<impl::PageTablePage>(addr));
}
ALWAYS_INLINE RefCount GetRefCount(KVirtualAddress addr) const {
return m_pt_heap->GetRefCount(addr);
}
ALWAYS_INLINE void Open(KVirtualAddress addr, int count) {
return m_pt_heap->Open(addr, count);
}
ALWAYS_INLINE bool Close(KVirtualAddress addr, int count) {
return m_pt_heap->Close(addr, count);
}
constexpr ALWAYS_INLINE bool IsInPageTableHeap(KVirtualAddress addr) const {
return m_pt_heap->IsInRange(addr);
}
};
}
| 2,569
|
C++
|
.h
| 56
| 36.857143
| 109
| 0.654676
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,831
|
kern_k_priority_queue.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_priority_queue.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern {
template<typename T>
concept KPriorityQueueAffinityMask = !std::is_reference<T>::value && requires (T &t) {
{ t.GetAffinityMask() } -> std::convertible_to<u64>;
{ t.SetAffinityMask(std::declval<u64>()) };
{ t.GetAffinity(std::declval<int32_t>()) } -> std::same_as<bool>;
{ t.SetAffinity(std::declval<int32_t>(), std::declval<bool>()) };
{ t.SetAll() };
};
template<typename T>
concept KPriorityQueueMember = !std::is_reference<T>::value && requires (T &t) {
{ typename T::QueueEntry() };
{ (typename T::QueueEntry()).Initialize() };
{ (typename T::QueueEntry()).SetPrev(std::addressof(t)) };
{ (typename T::QueueEntry()).SetNext(std::addressof(t)) };
{ (typename T::QueueEntry()).GetNext() } -> std::same_as<T*>;
{ (typename T::QueueEntry()).GetPrev() } -> std::same_as<T*>;
{ t.GetPriorityQueueEntry(std::declval<s32>()) } -> std::same_as<typename T::QueueEntry &>;
{ t.GetAffinityMask() };
{ typename std::remove_cvref<decltype(t.GetAffinityMask())>::type() } -> KPriorityQueueAffinityMask;
{ t.GetActiveCore() } -> std::convertible_to<s32>;
{ t.GetPriority() } -> std::convertible_to<s32>;
};
template<typename Member, size_t _NumCores, int LowestPriority, int HighestPriority> requires KPriorityQueueMember<Member>
class KPriorityQueue {
public:
using AffinityMaskType = typename std::remove_cv<typename std::remove_reference<decltype(std::declval<Member>().GetAffinityMask())>::type>::type;
static_assert(LowestPriority >= 0);
static_assert(HighestPriority >= 0);
static_assert(LowestPriority >= HighestPriority);
static constexpr size_t NumPriority = LowestPriority - HighestPriority + 1;
static constexpr size_t NumCores = _NumCores;
static constexpr ALWAYS_INLINE bool IsValidCore(s32 core) {
return 0 <= core && core < static_cast<s32>(NumCores);
}
static constexpr ALWAYS_INLINE bool IsValidPriority(s32 priority) {
return HighestPriority <= priority && priority <= LowestPriority + 1;
}
private:
using Entry = typename Member::QueueEntry;
public:
class KPerCoreQueue {
private:
Entry m_root[NumCores];
public:
constexpr ALWAYS_INLINE KPerCoreQueue() : m_root() {
for (size_t i = 0; i < NumCores; i++) {
m_root[i].Initialize();
}
}
constexpr ALWAYS_INLINE bool PushBack(s32 core, Member *member) {
/* Get the entry associated with the member. */
Entry &member_entry = member->GetPriorityQueueEntry(core);
/* Get the entry associated with the end of the queue. */
Member *tail = m_root[core].GetPrev();
Entry &tail_entry = (tail != nullptr) ? tail->GetPriorityQueueEntry(core) : m_root[core];
/* Link the entries. */
member_entry.SetPrev(tail);
member_entry.SetNext(nullptr);
tail_entry.SetNext(member);
m_root[core].SetPrev(member);
return (tail == nullptr);
}
constexpr ALWAYS_INLINE bool PushFront(s32 core, Member *member) {
/* Get the entry associated with the member. */
Entry &member_entry = member->GetPriorityQueueEntry(core);
/* Get the entry associated with the front of the queue. */
Member *head = m_root[core].GetNext();
Entry &head_entry = (head != nullptr) ? head->GetPriorityQueueEntry(core) : m_root[core];
/* Link the entries. */
member_entry.SetPrev(nullptr);
member_entry.SetNext(head);
head_entry.SetPrev(member);
m_root[core].SetNext(member);
return (head == nullptr);
}
constexpr ALWAYS_INLINE bool Remove(s32 core, Member *member) {
/* Get the entry associated with the member. */
Entry &member_entry = member->GetPriorityQueueEntry(core);
/* Get the entries associated with next and prev. */
Member *prev = member_entry.GetPrev();
Member *next = member_entry.GetNext();
Entry &prev_entry = (prev != nullptr) ? prev->GetPriorityQueueEntry(core) : m_root[core];
Entry &next_entry = (next != nullptr) ? next->GetPriorityQueueEntry(core) : m_root[core];
/* Unlink. */
prev_entry.SetNext(next);
next_entry.SetPrev(prev);
return (this->GetFront(core) == nullptr);
}
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
return m_root[core].GetNext();
}
};
class KPriorityQueueImpl {
private:
KPerCoreQueue m_queues[NumPriority];
util::BitSet64<NumPriority> m_available_priorities[NumCores];
public:
constexpr ALWAYS_INLINE KPriorityQueueImpl() : m_queues(), m_available_priorities() { /* ... */ }
constexpr ALWAYS_INLINE void PushBack(s32 priority, s32 core, Member *member) {
MESOSPHERE_ASSERT(IsValidCore(core));
MESOSPHERE_ASSERT(IsValidPriority(priority));
if (AMS_LIKELY(priority <= LowestPriority)) {
if (m_queues[priority].PushBack(core, member)) {
m_available_priorities[core].SetBit(priority);
}
}
}
constexpr ALWAYS_INLINE void PushFront(s32 priority, s32 core, Member *member) {
MESOSPHERE_ASSERT(IsValidCore(core));
MESOSPHERE_ASSERT(IsValidPriority(priority));
if (AMS_LIKELY(priority <= LowestPriority)) {
if (m_queues[priority].PushFront(core, member)) {
m_available_priorities[core].SetBit(priority);
}
}
}
constexpr ALWAYS_INLINE void Remove(s32 priority, s32 core, Member *member) {
MESOSPHERE_ASSERT(IsValidCore(core));
MESOSPHERE_ASSERT(IsValidPriority(priority));
if (AMS_LIKELY(priority <= LowestPriority)) {
if (m_queues[priority].Remove(core, member)) {
m_available_priorities[core].ClearBit(priority);
}
}
}
constexpr ALWAYS_INLINE Member *GetFront(s32 core) const {
MESOSPHERE_ASSERT(IsValidCore(core));
const s32 priority = m_available_priorities[core].CountLeadingZero();
if (AMS_LIKELY(priority <= LowestPriority)) {
return m_queues[priority].GetFront(core);
} else {
return nullptr;
}
}
constexpr ALWAYS_INLINE Member *GetFront(s32 priority, s32 core) const {
MESOSPHERE_ASSERT(IsValidCore(core));
MESOSPHERE_ASSERT(IsValidPriority(priority));
if (AMS_LIKELY(priority <= LowestPriority)) {
return m_queues[priority].GetFront(core);
} else {
return nullptr;
}
}
constexpr ALWAYS_INLINE Member *GetNext(s32 core, const Member *member) const {
MESOSPHERE_ASSERT(IsValidCore(core));
Member *next = member->GetPriorityQueueEntry(core).GetNext();
if (next == nullptr) {
const s32 priority = m_available_priorities[core].GetNextSet(member->GetPriority());
if (AMS_LIKELY(priority <= LowestPriority)) {
next = m_queues[priority].GetFront(core);
}
}
return next;
}
constexpr ALWAYS_INLINE void MoveToFront(s32 priority, s32 core, Member *member) {
MESOSPHERE_ASSERT(IsValidCore(core));
MESOSPHERE_ASSERT(IsValidPriority(priority));
if (AMS_LIKELY(priority <= LowestPriority)) {
m_queues[priority].Remove(core, member);
m_queues[priority].PushFront(core, member);
}
}
constexpr ALWAYS_INLINE Member *MoveToBack(s32 priority, s32 core, Member *member) {
MESOSPHERE_ASSERT(IsValidCore(core));
MESOSPHERE_ASSERT(IsValidPriority(priority));
if (AMS_LIKELY(priority <= LowestPriority)) {
m_queues[priority].Remove(core, member);
m_queues[priority].PushBack(core, member);
return m_queues[priority].GetFront(core);
} else {
return nullptr;
}
}
};
private:
KPriorityQueueImpl m_scheduled_queue;
KPriorityQueueImpl m_suggested_queue;
private:
static constexpr ALWAYS_INLINE void ClearAffinityBit(u64 &affinity, s32 core) {
affinity &= ~(UINT64_C(1) << core);
}
static constexpr ALWAYS_INLINE s32 GetNextCore(u64 &affinity) {
const s32 core = __builtin_ctzll(static_cast<unsigned long long>(affinity));
ClearAffinityBit(affinity, core);
return core;
}
constexpr ALWAYS_INLINE void PushBack(s32 priority, Member *member) {
MESOSPHERE_ASSERT(IsValidPriority(priority));
/* Push onto the scheduled queue for its core, if we can. */
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
m_scheduled_queue.PushBack(priority, core, member);
ClearAffinityBit(affinity, core);
}
/* And suggest the thread for all other cores. */
while (affinity) {
m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
}
}
constexpr ALWAYS_INLINE void PushFront(s32 priority, Member *member) {
MESOSPHERE_ASSERT(IsValidPriority(priority));
/* Push onto the scheduled queue for its core, if we can. */
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
m_scheduled_queue.PushFront(priority, core, member);
ClearAffinityBit(affinity, core);
}
/* And suggest the thread for all other cores. */
/* Note: Nintendo pushes onto the back of the suggested queue, not the front. */
while (affinity) {
m_suggested_queue.PushBack(priority, GetNextCore(affinity), member);
}
}
constexpr ALWAYS_INLINE void Remove(s32 priority, Member *member) {
MESOSPHERE_ASSERT(IsValidPriority(priority));
/* Remove from the scheduled queue for its core. */
u64 affinity = member->GetAffinityMask().GetAffinityMask();
if (const s32 core = member->GetActiveCore(); core >= 0) {
m_scheduled_queue.Remove(priority, core, member);
ClearAffinityBit(affinity, core);
}
/* Remove from the suggested queue for all other cores. */
while (affinity) {
m_suggested_queue.Remove(priority, GetNextCore(affinity), member);
}
}
public:
constexpr ALWAYS_INLINE KPriorityQueue() : m_scheduled_queue(), m_suggested_queue() { /* ... */ }
/* Getters. */
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core) const {
return m_scheduled_queue.GetFront(core);
}
constexpr ALWAYS_INLINE Member *GetScheduledFront(s32 core, s32 priority) const {
return m_scheduled_queue.GetFront(priority, core);
}
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core) const {
return m_suggested_queue.GetFront(core);
}
constexpr ALWAYS_INLINE Member *GetSuggestedFront(s32 core, s32 priority) const {
return m_suggested_queue.GetFront(priority, core);
}
constexpr ALWAYS_INLINE Member *GetScheduledNext(s32 core, const Member *member) const {
return m_scheduled_queue.GetNext(core, member);
}
constexpr ALWAYS_INLINE Member *GetSuggestedNext(s32 core, const Member *member) const {
return m_suggested_queue.GetNext(core, member);
}
constexpr ALWAYS_INLINE Member *GetSamePriorityNext(s32 core, const Member *member) const {
return member->GetPriorityQueueEntry(core).GetNext();
}
/* Mutators. */
constexpr ALWAYS_INLINE void PushBack(Member *member) {
this->PushBack(member->GetPriority(), member);
}
constexpr ALWAYS_INLINE void Remove(Member *member) {
this->Remove(member->GetPriority(), member);
}
constexpr ALWAYS_INLINE void MoveToScheduledFront(Member *member) {
m_scheduled_queue.MoveToFront(member->GetPriority(), member->GetActiveCore(), member);
}
constexpr ALWAYS_INLINE KThread *MoveToScheduledBack(Member *member) {
return m_scheduled_queue.MoveToBack(member->GetPriority(), member->GetActiveCore(), member);
}
/* First class fancy operations. */
constexpr ALWAYS_INLINE void ChangePriority(s32 prev_priority, bool is_running, Member *member) {
MESOSPHERE_ASSERT(IsValidPriority(prev_priority));
/* Remove the member from the queues. */
const s32 new_priority = member->GetPriority();
this->Remove(prev_priority, member);
/* And enqueue. If the member is running, we want to keep it running. */
if (is_running) {
this->PushFront(new_priority, member);
} else {
this->PushBack(new_priority, member);
}
}
constexpr ALWAYS_INLINE void ChangeAffinityMask(s32 prev_core, const AffinityMaskType &prev_affinity, Member *member) {
/* Get the new information. */
const s32 priority = member->GetPriority();
const AffinityMaskType &new_affinity = member->GetAffinityMask();
const s32 new_core = member->GetActiveCore();
/* Remove the member from all queues it was in before. */
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
if (prev_affinity.GetAffinity(core)) {
if (core == prev_core) {
m_scheduled_queue.Remove(priority, core, member);
} else {
m_suggested_queue.Remove(priority, core, member);
}
}
}
/* And add the member to all queues it should be in now. */
for (s32 core = 0; core < static_cast<s32>(NumCores); core++) {
if (new_affinity.GetAffinity(core)) {
if (core == new_core) {
m_scheduled_queue.PushBack(priority, core, member);
} else {
m_suggested_queue.PushBack(priority, core, member);
}
}
}
}
constexpr ALWAYS_INLINE void ChangeCore(s32 prev_core, Member *member, bool to_front = false) {
/* Get the new information. */
const s32 new_core = member->GetActiveCore();
const s32 priority = member->GetPriority();
/* We don't need to do anything if the core is the same. */
if (prev_core != new_core) {
/* Remove from the scheduled queue for the previous core. */
if (prev_core >= 0) {
m_scheduled_queue.Remove(priority, prev_core, member);
}
/* Remove from the suggested queue and add to the scheduled queue for the new core. */
if (new_core >= 0) {
m_suggested_queue.Remove(priority, new_core, member);
if (to_front) {
m_scheduled_queue.PushFront(priority, new_core, member);
} else {
m_scheduled_queue.PushBack(priority, new_core, member);
}
}
/* Add to the suggested queue for the previous core. */
if (prev_core >= 0) {
m_suggested_queue.PushBack(priority, prev_core, member);
}
}
}
};
}
| 19,498
|
C++
|
.h
| 347
| 37.674352
| 157
| 0.51879
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,832
|
kern_k_session.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_session.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_k_server_session.hpp>
#include <mesosphere/kern_k_client_session.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KClientPort;
class KProcess;
class KSession final : public KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList, true> {
MESOSPHERE_AUTOOBJECT_TRAITS(KSession, KAutoObject);
private:
enum class State : u8 {
Invalid = 0,
Normal = 1,
ClientClosed = 2,
ServerClosed = 3,
};
private:
util::Atomic<std::underlying_type<State>::type> m_atomic_state;
bool m_initialized;
KServerSession m_server;
KClientSession m_client;
KClientPort *m_port;
uintptr_t m_name;
KProcess *m_process;
private:
ALWAYS_INLINE void SetState(State state) {
m_atomic_state = static_cast<u8>(state);
}
ALWAYS_INLINE State GetState() const {
return static_cast<State>(m_atomic_state.Load());
}
public:
constexpr explicit KSession(util::ConstantInitializeTag)
: KAutoObjectWithSlabHeapAndContainer<KSession, KAutoObjectWithList, true>(util::ConstantInitialize),
m_atomic_state(static_cast<std::underlying_type<State>::type>(State::Invalid)), m_initialized(),
m_server(util::ConstantInitialize), m_client(util::ConstantInitialize), m_port(), m_name(), m_process()
{
/* ... */
}
explicit KSession() : m_atomic_state(util::ToUnderlying(State::Invalid)), m_initialized(false), m_process(nullptr) { /* ... */ }
void Initialize(KClientPort *client_port, uintptr_t name);
void Finalize();
bool IsInitialized() const { return m_initialized; }
uintptr_t GetPostDestroyArgument() const { return reinterpret_cast<uintptr_t>(m_process); }
static void PostDestroy(uintptr_t arg);
void OnServerClosed();
void OnClientClosed();
bool IsServerClosed() const { return this->GetState() != State::Normal; }
bool IsClientClosed() const { return this->GetState() != State::Normal; }
Result OnRequest(KSessionRequest *request) { R_RETURN(m_server.OnRequest(request)); }
KClientSession &GetClientSession() { return m_client; }
KServerSession &GetServerSession() { return m_server; }
const KClientSession &GetClientSession() const { return m_client; }
const KServerSession &GetServerSession() const { return m_server; }
const KClientPort *GetParent() const { return m_port; }
};
}
| 3,574
|
C++
|
.h
| 74
| 38.756757
| 140
| 0.639128
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,833
|
kern_k_light_lock.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_light_lock.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_current_context.hpp>
#include <mesosphere/kern_k_scoped_lock.hpp>
namespace ams::kern {
class KLightLock {
private:
util::Atomic<uintptr_t> m_tag;
public:
constexpr ALWAYS_INLINE KLightLock() : m_tag(0) { /* ... */ }
void Lock() {
MESOSPHERE_ASSERT_THIS();
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
while (true) {
uintptr_t old_tag = m_tag.Load<std::memory_order_relaxed>();
while (!m_tag.CompareExchangeWeak<std::memory_order_acquire>(old_tag, (old_tag == 0) ? cur_thread : (old_tag | 1))) {
/* ... */
}
if (old_tag == 0 || this->LockSlowPath(old_tag | 1, cur_thread)) {
break;
}
}
}
ALWAYS_INLINE void Unlock() {
MESOSPHERE_ASSERT_THIS();
const uintptr_t cur_thread = reinterpret_cast<uintptr_t>(GetCurrentThreadPointer());
uintptr_t expected = cur_thread;
if (!m_tag.CompareExchangeStrong<std::memory_order_release>(expected, 0)) {
this->UnlockSlowPath(cur_thread);
}
}
NOINLINE bool LockSlowPath(uintptr_t owner, uintptr_t cur_thread);
NOINLINE void UnlockSlowPath(uintptr_t cur_thread);
ALWAYS_INLINE bool IsLocked() const { return m_tag.Load() != 0; }
ALWAYS_INLINE bool IsLockedByCurrentThread() const { return (m_tag.Load() | 0x1ul) == (reinterpret_cast<uintptr_t>(GetCurrentThreadPointer()) | 0x1ul); }
};
using KScopedLightLock = KScopedLock<KLightLock>;
}
| 2,540
|
C++
|
.h
| 54
| 37
| 165
| 0.609223
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,834
|
kern_k_interrupt_task_manager.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_interrupt_task_manager.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_k_interrupt_task.hpp>
namespace ams::kern {
class KThread;
class KInterruptTaskManager {
private:
class TaskQueue {
private:
KInterruptTask *m_head;
KInterruptTask *m_tail;
public:
constexpr ALWAYS_INLINE TaskQueue() : m_head(nullptr), m_tail(nullptr) { /* ... */ }
constexpr ALWAYS_INLINE KInterruptTask *GetHead() { return m_head; }
constexpr ALWAYS_INLINE bool IsEmpty() const { return m_head == nullptr; }
constexpr ALWAYS_INLINE void Clear() { m_head = nullptr; m_tail = nullptr; }
void Enqueue(KInterruptTask *task);
void Dequeue();
};
private:
TaskQueue m_task_queue;
s64 m_cpu_time;
public:
constexpr KInterruptTaskManager() : m_task_queue(), m_cpu_time(0) { /* ... */ }
constexpr ALWAYS_INLINE s64 GetCpuTime() const { return m_cpu_time; }
void EnqueueTask(KInterruptTask *task);
void DoTasks();
};
}
| 1,816
|
C++
|
.h
| 43
| 33.186047
| 104
| 0.616431
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,835
|
kern_k_object_name.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_object_name.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_auto_object.hpp>
#include <mesosphere/kern_k_light_lock.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KObjectName : public KSlabAllocated<KObjectName>, public util::IntrusiveListBaseNode<KObjectName> {
public:
static constexpr size_t NameLengthMax = 12;
using List = util::IntrusiveListBaseTraits<KObjectName>::ListType;
private:
char m_name[NameLengthMax];
KAutoObject *m_object;
public:
static Result NewFromName(KAutoObject *obj, const char *name);
static Result Delete(KAutoObject *obj, const char *name);
static KScopedAutoObject<KAutoObject> Find(const char *name);
template<typename Derived>
static Result Delete(const char *name) {
/* Find the object. */
KScopedAutoObject obj = Find(name);
R_UNLESS(obj.IsNotNull(), svc::ResultNotFound());
/* Cast the object to the desired type. */
Derived *derived = obj->DynamicCast<Derived *>();
R_UNLESS(derived != nullptr, svc::ResultNotFound());
/* Check that the object is closed. */
R_UNLESS(derived->IsServerClosed(), svc::ResultInvalidState());
R_RETURN(Delete(obj.GetPointerUnsafe(), name));
}
template<typename Derived> requires std::derived_from<Derived, KAutoObject>
static KScopedAutoObject<Derived> Find(const char *name) {
return Find(name);
}
private:
static KScopedAutoObject<KAutoObject> FindImpl(const char *name);
void Initialize(KAutoObject *obj, const char *name);
bool MatchesName(const char *name) const;
KAutoObject *GetObject() const { return m_object; }
};
}
| 2,592
|
C++
|
.h
| 55
| 38.309091
| 109
| 0.655446
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,836
|
kern_k_auto_object.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_auto_object.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#include <mesosphere/kern_k_class_token.hpp>
namespace ams::kern {
class KProcess;
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) || defined(MESOSPHERE_BUILD_FOR_AUDITING)
#define MESOSPHERE_AUTO_OBJECT_TYPENAME_IMPL(CLASS) #CLASS
#else
#define MESOSPHERE_AUTO_OBJECT_TYPENAME_IMPL(CLASS) ""
#endif
#define MESOSPHERE_AUTOOBJECT_TRAITS(CLASS, BASE_CLASS) \
NON_COPYABLE(CLASS); \
NON_MOVEABLE(CLASS); \
private: \
friend class ::ams::kern::KClassTokenGenerator; \
static constexpr inline auto ObjectType = ::ams::kern::KClassTokenGenerator::ObjectType::CLASS; \
static constexpr inline const char * const TypeName = MESOSPHERE_AUTO_OBJECT_TYPENAME_IMPL(CLASS); \
static constexpr inline ClassTokenType ClassToken() { return ::ams::kern::ClassToken<CLASS>; } \
public: \
using BaseClass = BASE_CLASS; \
static consteval ALWAYS_INLINE TypeObj GetStaticTypeObj() { \
constexpr ClassTokenType Token = ClassToken(); \
return TypeObj(TypeName, Token); \
} \
static consteval ALWAYS_INLINE const char *GetStaticTypeName() { return TypeName; } \
virtual TypeObj GetTypeObj() const { return GetStaticTypeObj(); } \
virtual const char *GetTypeName() { return GetStaticTypeName(); } \
private:
class KAutoObject {
public:
class ReferenceCount {
NON_COPYABLE(ReferenceCount);
NON_MOVEABLE(ReferenceCount);
private:
using Storage = u32;
private:
util::Atomic<Storage> m_value;
public:
ALWAYS_INLINE explicit ReferenceCount() { /* ... */ }
constexpr ALWAYS_INLINE explicit ReferenceCount(Storage v) : m_value(v) { /* ... */ }
ALWAYS_INLINE void operator=(Storage v) { m_value = v; }
ALWAYS_INLINE Storage GetValue() const { return m_value.Load(); }
ALWAYS_INLINE bool Open() {
/* Atomically increment the reference count, only if it's positive. */
u32 cur = m_value.Load<std::memory_order_relaxed>();
do {
if (AMS_UNLIKELY(cur == 0)) {
MESOSPHERE_AUDIT(cur != 0);
return false;
}
MESOSPHERE_ABORT_UNLESS(cur < cur + 1);
} while (AMS_UNLIKELY(!m_value.CompareExchangeWeak<std::memory_order_relaxed>(cur, cur + 1)));
return true;
}
ALWAYS_INLINE bool Close() {
/* Atomically decrement the reference count, not allowing it to decrement past zero. */
u32 cur = m_value.Load<std::memory_order_relaxed>();
do {
MESOSPHERE_ABORT_UNLESS(cur > 0);
} while (AMS_UNLIKELY(!m_value.CompareExchangeWeak<std::memory_order_relaxed>(cur, cur - 1)));
/* Return whether the object was closed. */
return cur - 1 == 0;
}
};
protected:
class TypeObj {
private:
const char *m_name;
ClassTokenType m_class_token;
public:
constexpr explicit TypeObj(const char *n, ClassTokenType tok) : m_name(n), m_class_token(tok) { /* ... */ }
constexpr ALWAYS_INLINE const char *GetName() const { return m_name; }
constexpr ALWAYS_INLINE ClassTokenType GetClassToken() const { return m_class_token; }
constexpr ALWAYS_INLINE bool operator==(const TypeObj &rhs) {
return this->GetClassToken() == rhs.GetClassToken();
}
constexpr ALWAYS_INLINE bool operator!=(const TypeObj &rhs) {
return this->GetClassToken() != rhs.GetClassToken();
}
constexpr ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) {
return IsClassTokenDerivedFrom(this->GetClassToken(), rhs.GetClassToken());
}
static constexpr ALWAYS_INLINE bool IsClassTokenDerivedFrom(ClassTokenType derived, ClassTokenType base) {
return (derived | base) == derived;
}
};
private:
MESOSPHERE_AUTOOBJECT_TRAITS(KAutoObject, KAutoObject);
private:
KAutoObject *m_next_closed_object;
ReferenceCount m_ref_count;
#if defined(MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST)
ClassTokenType m_class_token;
#endif
public:
constexpr ALWAYS_INLINE explicit KAutoObject(util::ConstantInitializeTag) : m_next_closed_object(nullptr), m_ref_count(0)
#if defined(MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST)
, m_class_token(0)
#endif
{
MESOSPHERE_ASSERT_THIS();
}
ALWAYS_INLINE explicit KAutoObject() : m_ref_count(0) { MESOSPHERE_ASSERT_THIS(); }
/* Destroy is responsible for destroying the auto object's resources when ref_count hits zero. */
virtual void Destroy() { MESOSPHERE_ASSERT_THIS(); }
/* Finalize is responsible for cleaning up resource, but does not destroy the object. */
/* NOTE: This is a virtual function in official kernel, but because everything which uses it */
/* is already using CRTP for slab heap, we have devirtualized it for performance gain. */
/* virtual void Finalize() { MESOSPHERE_ASSERT_THIS(); } */
/* NOTE: This is a virtual function which is unused-except-for-debug in Nintendo's kernel. */
/* virtual KProcess *GetOwner() const { return nullptr; } */
u32 GetReferenceCount() const {
return m_ref_count.GetValue();
}
ALWAYS_INLINE bool IsDerivedFrom(const TypeObj &rhs) const {
#if defined(MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST)
return TypeObj::IsClassTokenDerivedFrom(m_class_token, rhs.GetClassToken());
#else
return this->GetTypeObj().IsDerivedFrom(rhs);
#endif
}
ALWAYS_INLINE bool IsDerivedFrom(const KAutoObject &rhs) const {
#if defined(MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST)
return TypeObj::IsClassTokenDerivedFrom(m_class_token, rhs.m_class_token);
#else
return this->IsDerivedFrom(rhs.GetTypeObj());
#endif
}
template<typename Derived>
ALWAYS_INLINE Derived DynamicCast() {
static_assert(std::is_pointer<Derived>::value);
using DerivedType = typename std::remove_pointer<Derived>::type;
if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
template<typename Derived>
ALWAYS_INLINE const Derived DynamicCast() const {
static_assert(std::is_pointer<Derived>::value);
using DerivedType = typename std::remove_pointer<Derived>::type;
if (AMS_LIKELY(this->IsDerivedFrom(DerivedType::GetStaticTypeObj()))) {
return static_cast<Derived>(this);
} else {
return nullptr;
}
}
MESOSPHERE_ALWAYS_INLINE_IF_RELEASE bool Open() {
MESOSPHERE_ASSERT_THIS();
return m_ref_count.Open();
}
MESOSPHERE_ALWAYS_INLINE_IF_RELEASE void Close() {
MESOSPHERE_ASSERT_THIS();
if (m_ref_count.Close()) {
this->ScheduleDestruction();
}
}
private:
/* NOTE: This has to be defined *after* KThread is defined. */
/* Nintendo seems to handle this by defining Open/Close() in a cpp, but we'd like them to remain in headers. */
/* Implementation for this will be inside kern_k_thread.hpp, so it can be ALWAYS_INLINE. */
void ScheduleDestruction();
public:
/* Getter, for KThread. */
ALWAYS_INLINE KAutoObject *GetNextClosedObject() { return m_next_closed_object; }
public:
template<typename Derived> requires (std::derived_from<Derived, KAutoObject>)
static ALWAYS_INLINE void Create(typename std::type_identity<Derived>::type *obj) {
/* Get auto object pointer. */
KAutoObject &auto_object = *static_cast<KAutoObject *>(obj);
/* If we should, set our class token. */
#if defined(MESOSPHERE_ENABLE_DEVIRTUALIZED_DYNAMIC_CAST)
{
constexpr auto Token = Derived::GetStaticTypeObj().GetClassToken();
auto_object.m_class_token = Token;
}
#endif
/* Initialize reference count to 1. */
auto_object.m_ref_count = 1;
}
};
class KAutoObjectWithListBase : public KAutoObject {
private:
void *m_alignment_forcer_unused[0];
public:
constexpr ALWAYS_INLINE explicit KAutoObjectWithListBase(util::ConstantInitializeTag) : KAutoObject(util::ConstantInitialize), m_alignment_forcer_unused{} { /* ... */ }
ALWAYS_INLINE explicit KAutoObjectWithListBase() { /* ... */ }
};
class KAutoObjectWithList : public KAutoObjectWithListBase {
private:
template<typename>
friend class KAutoObjectWithListContainer;
private:
util::IntrusiveRedBlackTreeNode m_list_node;
public:
constexpr ALWAYS_INLINE KAutoObjectWithList(util::ConstantInitializeTag) : KAutoObjectWithListBase(util::ConstantInitialize), m_list_node(util::ConstantInitialize) { /* ... */ }
ALWAYS_INLINE explicit KAutoObjectWithList() { /* ... */ }
public:
/* NOTE: This is virtual in Nintendo's kernel. */
u64 GetId() const;
};
template<typename T> requires std::derived_from<T, KAutoObject>
class KScopedAutoObject {
NON_COPYABLE(KScopedAutoObject);
private:
template<typename U> requires std::derived_from<U, KAutoObject>
friend class KScopedAutoObject;
private:
T *m_obj;
private:
constexpr ALWAYS_INLINE void Swap(KScopedAutoObject &rhs) {
std::swap(m_obj, rhs.m_obj);
}
public:
constexpr ALWAYS_INLINE KScopedAutoObject(T *o) : m_obj(o) {
if (m_obj != nullptr) {
m_obj->Open();
}
}
ALWAYS_INLINE ~KScopedAutoObject() {
if (m_obj != nullptr) {
m_obj->Close();
}
m_obj = nullptr;
}
template<typename U> requires (std::derived_from<T, U> || std::derived_from<U, T>)
constexpr KScopedAutoObject(KScopedAutoObject<U> &&rhs) {
if constexpr (std::derived_from<U, T>) {
/* Upcast. */
m_obj = rhs.m_obj;
rhs.m_obj = nullptr;
} else {
/* Downcast. */
T *derived = nullptr;
if (rhs.m_obj != nullptr) {
derived = rhs.m_obj->template DynamicCast<T *>();
if (derived == nullptr) {
rhs.m_obj->Close();
}
}
m_obj = derived;
rhs.m_obj = nullptr;
}
}
constexpr ALWAYS_INLINE KScopedAutoObject<T> &operator=(KScopedAutoObject<T> &&rhs) {
rhs.Swap(*this);
return *this;
}
constexpr ALWAYS_INLINE T *operator->() { return m_obj; }
constexpr ALWAYS_INLINE T &operator*() { return *m_obj; }
constexpr ALWAYS_INLINE void Reset(T *o) {
KScopedAutoObject(o).Swap(*this);
}
constexpr ALWAYS_INLINE T *GetPointerUnsafe() { return m_obj; }
constexpr ALWAYS_INLINE T *ReleasePointerUnsafe() { T *ret = m_obj; m_obj = nullptr; return ret; }
constexpr ALWAYS_INLINE bool IsNull() const { return m_obj == nullptr; }
constexpr ALWAYS_INLINE bool IsNotNull() const { return m_obj != nullptr; }
};
template<typename T> requires std::derived_from<T, KAutoObject>
class KSharedAutoObject {
private:
T *m_object;
KAutoObject::ReferenceCount m_ref_count;
public:
explicit KSharedAutoObject() : m_object(nullptr) { /* ... */ }
void Attach(T *obj) {
MESOSPHERE_ASSERT(m_object == nullptr);
/* Set our object. */
m_object = obj;
/* Open reference to our object. */
m_object->Open();
/* Set our reference count. */
m_ref_count = 1;
}
bool Open() {
return m_ref_count.Open();
}
void Close() {
if (m_ref_count.Close()) {
this->Detach();
}
}
ALWAYS_INLINE T *Get() const {
return m_object;
}
private:
void Detach() {
/* Close our object, if we have one. */
if (T * const object = m_object; AMS_LIKELY(object != nullptr)) {
/* Set our object to a debug sentinel value, which will cause a crash if accessed. */
m_object = reinterpret_cast<T *>(1);
/* Close reference to our object. */
object->Close();
}
}
};
}
| 16,225
|
C++
|
.h
| 314
| 36.866242
| 189
| 0.514318
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,837
|
kern_k_server_port.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/kern_k_server_port.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_synchronization_object.hpp>
#include <mesosphere/kern_slab_helpers.hpp>
namespace ams::kern {
class KPort;
class KServerSession;
class KLightServerSession;
class KServerPort final : public KSynchronizationObject {
MESOSPHERE_AUTOOBJECT_TRAITS(KServerPort, KSynchronizationObject);
private:
using SessionList = util::IntrusiveListBaseTraits<KServerSession>::ListType;
using LightSessionList = util::IntrusiveListBaseTraits<KLightServerSession>::ListType;
private:
SessionList m_session_list;
LightSessionList m_light_session_list;
KPort *m_parent;
public:
constexpr explicit KServerPort(util::ConstantInitializeTag) : KSynchronizationObject(util::ConstantInitialize), m_session_list(), m_light_session_list(), m_parent() { /* ... */ }
explicit KServerPort() { /* ... */ }
void Initialize(KPort *parent);
void EnqueueSession(KServerSession *session);
void EnqueueSession(KLightServerSession *session);
KServerSession *AcceptSession();
KLightServerSession *AcceptLightSession();
constexpr const KPort *GetParent() const { return m_parent; }
bool IsLight() const;
/* Overridden virtual functions. */
virtual void Destroy() override;
virtual bool IsSignaled() const override;
private:
void CleanupSessions();
};
}
| 2,213
|
C++
|
.h
| 49
| 38.061224
| 190
| 0.692807
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,838
|
kern_k_system_control.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_system_control.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_system_control_base.hpp>
namespace ams::kern::board::nintendo::nx {
class KSystemControl : public KSystemControlBase {
public:
/* This can be overridden as needed. */
static constexpr size_t SecureAppletMemorySize = 4_MB;
public:
class Init : public KSystemControlBase::Init {
private:
friend class KSystemControlBase::Init;
private:
static void CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg);
public:
/* Initialization. */
static size_t GetRealMemorySize();
static size_t GetIntendedMemorySize();
static bool ShouldIncreaseThreadResourceLimit();
static size_t GetApplicationPoolSize();
static size_t GetAppletPoolSize();
static size_t GetMinimumNonSecureSystemPoolSize();
static u8 GetDebugLogUartPort();
/* Randomness. */
static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
};
public:
/* Initialization. */
static NOINLINE void ConfigureKTargetSystem();
static NOINLINE void InitializePhase1();
static NOINLINE void InitializePhase2();
static NOINLINE u32 GetCreateProcessMemoryPool();
/* Randomness. */
static void GenerateRandom(u64 *dst, size_t count);
static u64 GenerateRandomRange(u64 min, u64 max);
static u64 GenerateRandomU64();
/* Privileged Access. */
static void ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
static Result ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value);
/* Power management. */
static void SleepSystem();
static NORETURN void StopSystem(void *arg = nullptr);
/* User access. */
static void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args);
/* Secure Memory. */
static size_t CalculateRequiredSecureMemorySize(size_t size, u32 pool);
static Result AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool);
static void FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool);
};
}
| 3,269
|
C++
|
.h
| 66
| 38.5
| 118
| 0.634429
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,839
|
kern_k_device_page_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_device_page_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_page_group.hpp>
#include <mesosphere/kern_k_memory_manager.hpp>
#include <mesosphere/kern_select_page_table.hpp>
namespace ams::kern::board::nintendo::nx {
using KDeviceVirtualAddress = u64;
class KDevicePageTable {
private:
static constexpr size_t TableCount = 4;
private:
KVirtualAddress m_tables[TableCount];
u8 m_table_asids[TableCount];
u64 m_attached_device;
u32 m_attached_value;
u32 m_detached_value;
u32 m_hs_attached_value;
u32 m_hs_detached_value;
private:
static ALWAYS_INLINE bool IsHeapVirtualAddress(KVirtualAddress addr) {
const KMemoryRegion *hint = nullptr;
return KMemoryLayout::IsHeapVirtualAddress(hint, addr);
}
static ALWAYS_INLINE bool IsHeapPhysicalAddress(KPhysicalAddress addr) {
const KMemoryRegion *hint = nullptr;
return KMemoryLayout::IsHeapPhysicalAddress(hint, addr);
}
static ALWAYS_INLINE KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress addr) {
return KPageTable::GetHeapVirtualAddress(addr);
}
static ALWAYS_INLINE KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress addr) {
return KPageTable::GetHeapPhysicalAddress(addr);
}
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
return KPageTable::GetPageTableVirtualAddress(addr);
}
static ALWAYS_INLINE KPhysicalAddress GetPageTablePhysicalAddress(KVirtualAddress addr) {
return KPageTable::GetPageTablePhysicalAddress(addr);
}
public:
constexpr KDevicePageTable()
: m_tables{Null<KVirtualAddress>, Null<KVirtualAddress>, Null<KVirtualAddress>, Null<KVirtualAddress>},
m_table_asids(), m_attached_device(), m_attached_value(), m_detached_value(), m_hs_attached_value(), m_hs_detached_value()
{
/* ... */
}
Result Initialize(u64 space_address, u64 space_size);
void Finalize();
Result Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size);
Result Detach(ams::svc::DeviceName device_name);
Result Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool is_io);
Result Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address);
void Unmap(KDeviceVirtualAddress device_address, size_t size) {
return this->UnmapImpl(device_address, size, false);
}
private:
Result MapDevicePage(KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm);
Result MapImpl(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned);
void UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force);
bool IsFree(KDeviceVirtualAddress address, u64 size) const;
bool Compare(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) const;
public:
static void Initialize();
static void Lock();
static void Unlock();
static void Sleep();
static void Wakeup();
};
}
| 4,480
|
C++
|
.h
| 84
| 43.107143
| 207
| 0.675645
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,840
|
kern_k_memory_layout.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_k_memory_layout.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern {
constexpr inline KPhysicalAddress MainMemoryAddress = 0x80000000;
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;
constexpr inline u32 MinimumMemoryManagerAlignmentShifts[] = {
0, 0, 0, 0
};
}
| 1,033
|
C++
|
.h
| 26
| 36.923077
| 76
| 0.752495
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,841
|
kern_cpu_map.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/nintendo/nx/kern_cpu_map.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern::board::nintendo::nx::impl::cpu {
/* Virtual to Physical core map. */
constexpr inline const s32 VirtualToPhysicalCoreMap[BITSIZEOF(u64)] = {
0, 1, 2, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3,
};
}
| 1,124
|
C++
|
.h
| 30
| 33.366667
| 76
| 0.636114
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,842
|
kern_k_system_control.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_system_control.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_system_control_base.hpp>
namespace ams::kern::board::qemu::virt {
class KSystemControl : public KSystemControlBase {
public:
/* User access. */
static void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args);
};
}
| 990
|
C++
|
.h
| 25
| 36.36
| 96
| 0.737279
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,843
|
kern_k_memory_layout.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_k_memory_layout.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern {
constexpr inline KPhysicalAddress MainMemoryAddress = 0x40000000;
constexpr inline size_t MainMemorySize = 4_GB;
constexpr inline size_t MainMemorySizeMax = 8_GB;
}
| 939
|
C++
|
.h
| 23
| 38.521739
| 76
| 0.764254
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,844
|
kern_cpu_map.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/qemu/virt/kern_cpu_map.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern::board::qemu::virt::impl::cpu {
/* Virtual to Physical core map. */
constexpr inline const s32 VirtualToPhysicalCoreMap[BITSIZEOF(u64)] = {
0, 1, 2, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 3,
};
}
| 1,122
|
C++
|
.h
| 30
| 33.3
| 76
| 0.635445
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,845
|
kern_k_device_page_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/board/generic/kern_k_device_page_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_page_group.hpp>
#include <mesosphere/kern_k_memory_manager.hpp>
#include <mesosphere/kern_select_page_table.hpp>
namespace ams::kern::board::generic {
using KDeviceVirtualAddress = u64;
class KDevicePageTable {
public:
constexpr KDevicePageTable() { /* ... */ }
Result ALWAYS_INLINE Initialize(u64 space_address, u64 space_size) {
MESOSPHERE_UNUSED(space_address, space_size);
R_THROW(ams::kern::svc::ResultNotImplemented());
}
void ALWAYS_INLINE Finalize() { /* ... */ }
Result ALWAYS_INLINE Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) {
MESOSPHERE_UNUSED(device_name, space_address, space_size);
R_THROW(ams::kern::svc::ResultNotImplemented());
}
Result ALWAYS_INLINE Detach(ams::svc::DeviceName device_name) {
MESOSPHERE_UNUSED(device_name);
R_THROW(ams::kern::svc::ResultNotImplemented());
}
Result ALWAYS_INLINE Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool is_io) {
MESOSPHERE_UNUSED(page_table, process_address, size, device_address, device_perm, is_aligned, is_io);
R_THROW(ams::kern::svc::ResultNotImplemented());
}
Result ALWAYS_INLINE Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {
MESOSPHERE_UNUSED(page_table, process_address, size, device_address);
R_THROW(ams::kern::svc::ResultNotImplemented());
}
void ALWAYS_INLINE Unmap(KDeviceVirtualAddress device_address, size_t size) {
MESOSPHERE_UNUSED(device_address, size);
}
public:
static ALWAYS_INLINE void Initialize() { /* ... */ }
static ALWAYS_INLINE void Lock() { /* ... */ }
static ALWAYS_INLINE void Unlock() { /* ... */ }
static ALWAYS_INLINE void Sleep() { /* ... */ }
static ALWAYS_INLINE void Wakeup() { /* ... */ }
};
}
| 2,994
|
C++
|
.h
| 57
| 43.508772
| 222
| 0.648325
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,846
|
kern_k_thread_context.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern {
class KThread;
}
namespace ams::kern::arch::arm64 {
class KThreadContext {
public:
static constexpr size_t NumCalleeSavedRegisters = (29 - 19) + 1;
static constexpr size_t NumCalleeSavedFpuRegisters = 8;
static constexpr size_t NumCallerSavedFpuRegisters = 24;
static constexpr size_t NumFpuRegisters = NumCalleeSavedFpuRegisters + NumCallerSavedFpuRegisters;
public:
union CalleeSaveRegisters {
u64 registers[NumCalleeSavedRegisters];
struct {
u64 x19;
u64 x20;
u64 x21;
u64 x22;
u64 x23;
u64 x24;
u64 x25;
u64 x26;
u64 x27;
u64 x28;
u64 x29;
};
};
union CalleeSaveFpu64Registers {
u128 v[NumCalleeSavedFpuRegisters];
struct {
u128 q8;
u128 q9;
u128 q10;
u128 q11;
u128 q12;
u128 q13;
u128 q14;
u128 q15;
};
};
union CalleeSaveFpu32Registers {
u128 v[NumCalleeSavedFpuRegisters / 2];
struct {
u128 q4;
u128 q5;
u128 q6;
u128 q7;
};
};
union CalleeSaveFpuRegisters {
CalleeSaveFpu64Registers fpu64;
CalleeSaveFpu32Registers fpu32;
};
union CallerSaveFpu64Registers {
u128 v[NumCallerSavedFpuRegisters];
struct {
union {
u128 v0_7[NumCallerSavedFpuRegisters / 3];
struct {
u128 q0;
u128 q1;
u128 q2;
u128 q3;
u128 q4;
u128 q5;
u128 q6;
u128 q7;
};
};
union {
u128 v16_31[2 * NumCallerSavedFpuRegisters / 3];
struct {
u128 q16;
u128 q17;
u128 q18;
u128 q19;
u128 q20;
u128 q21;
u128 q22;
u128 q23;
u128 q24;
u128 q25;
u128 q26;
u128 q27;
u128 q28;
u128 q29;
u128 q30;
u128 q31;
};
};
};
};
union CallerSaveFpu32Registers {
u128 v[NumCallerSavedFpuRegisters / 2];
struct {
union {
u128 v0_3[(NumCallerSavedFpuRegisters / 3) / 2];
struct {
u128 q0;
u128 q1;
u128 q2;
u128 q3;
};
};
union {
u128 v8_15[(2 * NumCallerSavedFpuRegisters / 3) / 2];
struct {
u128 q8;
u128 q9;
u128 q10;
u128 q11;
u128 q12;
u128 q13;
u128 q14;
u128 q15;
};
};
};
};
union CallerSaveFpuRegisters {
CallerSaveFpu64Registers fpu64;
CallerSaveFpu32Registers fpu32;
};
private:
CalleeSaveRegisters m_callee_saved;
u64 m_lr;
u64 m_sp;
u32 m_fpcr;
u32 m_fpsr;
alignas(0x10) CalleeSaveFpuRegisters m_callee_saved_fpu;
bool m_locked;
private:
static void RestoreFpuRegisters64(const KThreadContext &);
static void RestoreFpuRegisters32(const KThreadContext &);
public:
constexpr explicit KThreadContext(util::ConstantInitializeTag) : m_callee_saved(), m_lr(), m_sp(), m_fpcr(), m_fpsr(), m_callee_saved_fpu(), m_locked() { /* ... */ }
explicit KThreadContext() { /* ... */ }
Result Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main);
void SetArguments(uintptr_t arg0, uintptr_t arg1);
static void FpuContextSwitchHandler(KThread *thread);
u32 GetFpcr() const { return m_fpcr; }
u32 GetFpsr() const { return m_fpsr; }
void SetFpcr(u32 v) { m_fpcr = v; }
void SetFpsr(u32 v) { m_fpsr = v; }
void CloneFpuStatus();
const auto &GetCalleeSaveFpuRegisters() const { return m_callee_saved_fpu; }
auto &GetCalleeSaveFpuRegisters() { return m_callee_saved_fpu; }
public:
static void OnThreadTerminating(const KThread *thread);
public:
static consteval bool ValidateOffsets();
template<typename CallerSave, typename CalleeSave> requires ((std::same_as<CallerSave, CallerSaveFpu64Registers> && std::same_as<CalleeSave, CalleeSaveFpu64Registers>) || (std::same_as<CallerSave, CallerSaveFpu32Registers> && std::same_as<CalleeSave, CalleeSaveFpu32Registers>))
static void GetFpuRegisters(u128 *out, const CallerSave &caller_save, const CalleeSave &callee_save) {
/* Check that the register counts are correct. */
constexpr size_t RegisterUnitCount = util::size(CalleeSave{}.v);
static_assert(util::size(CalleeSave{}.v) == 1 * RegisterUnitCount);
static_assert(util::size(CallerSave{}.v) == 3 * RegisterUnitCount);
/* Copy the low caller-save registers. */
for (size_t i = 0; i < RegisterUnitCount; ++i) {
*(out++) = caller_save.v[i];
}
/* Copy the callee-save registers. */
for (size_t i = 0; i < RegisterUnitCount; ++i) {
*(out++) = callee_save.v[i];
}
/* Copy the remaining caller-save registers. */
for (size_t i = 0; i < 2 * RegisterUnitCount; ++i) {
*(out++) = caller_save.v[RegisterUnitCount + i];
}
}
template<typename CallerSave, typename CalleeSave> requires ((std::same_as<CallerSave, CallerSaveFpu64Registers> && std::same_as<CalleeSave, CalleeSaveFpu64Registers>) || (std::same_as<CallerSave, CallerSaveFpu32Registers> && std::same_as<CalleeSave, CalleeSaveFpu32Registers>))
static ALWAYS_INLINE void SetFpuRegisters(CallerSave &caller_save, CalleeSave &callee_save, const u128 *v) {
/* Check that the register counts are correct. */
constexpr size_t RegisterUnitCount = util::size(CalleeSave{}.v);
static_assert(util::size(CalleeSave{}.v) == 1 * RegisterUnitCount);
static_assert(util::size(CallerSave{}.v) == 3 * RegisterUnitCount);
/* Copy the low caller-save registers. */
for (size_t i = 0; i < RegisterUnitCount; ++i) {
caller_save.v[i] = *(v++);
}
/* Copy the callee-save registers. */
for (size_t i = 0; i < RegisterUnitCount; ++i) {
callee_save.v[i] = *(v++);
}
/* Copy the remaining caller-save registers. */
for (size_t i = 0; i < 2 * RegisterUnitCount; ++i) {
caller_save.v[RegisterUnitCount + i] = *(v++);
}
}
};
consteval bool KThreadContext::ValidateOffsets() {
static_assert(sizeof(KThreadContext) == THREAD_CONTEXT_SIZE);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.registers) == THREAD_CONTEXT_CPU_REGISTERS);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x19) == THREAD_CONTEXT_X19);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x20) == THREAD_CONTEXT_X20);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x21) == THREAD_CONTEXT_X21);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x22) == THREAD_CONTEXT_X22);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x23) == THREAD_CONTEXT_X23);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x24) == THREAD_CONTEXT_X24);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x25) == THREAD_CONTEXT_X25);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x26) == THREAD_CONTEXT_X26);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x27) == THREAD_CONTEXT_X27);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x28) == THREAD_CONTEXT_X28);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved.x29) == THREAD_CONTEXT_X29);
static_assert(AMS_OFFSETOF(KThreadContext, m_lr) == THREAD_CONTEXT_LR);
static_assert(AMS_OFFSETOF(KThreadContext, m_sp) == THREAD_CONTEXT_SP);
static_assert(AMS_OFFSETOF(KThreadContext, m_fpcr) == THREAD_CONTEXT_FPCR);
static_assert(AMS_OFFSETOF(KThreadContext, m_fpsr) == THREAD_CONTEXT_FPSR);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu) == THREAD_CONTEXT_FPU_REGISTERS);
static_assert(AMS_OFFSETOF(KThreadContext, m_locked) == THREAD_CONTEXT_LOCKED);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q8 ) == THREAD_CONTEXT_FPU64_Q8 );
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q9 ) == THREAD_CONTEXT_FPU64_Q9 );
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q10) == THREAD_CONTEXT_FPU64_Q10);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q11) == THREAD_CONTEXT_FPU64_Q11);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q12) == THREAD_CONTEXT_FPU64_Q12);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q13) == THREAD_CONTEXT_FPU64_Q13);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q14) == THREAD_CONTEXT_FPU64_Q14);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu64.q15) == THREAD_CONTEXT_FPU64_Q15);
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu32.q4 ) == THREAD_CONTEXT_FPU32_Q4 );
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu32.q5 ) == THREAD_CONTEXT_FPU32_Q5 );
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu32.q6 ) == THREAD_CONTEXT_FPU32_Q6 );
static_assert(AMS_OFFSETOF(KThreadContext, m_callee_saved_fpu.fpu32.q7 ) == THREAD_CONTEXT_FPU32_Q7 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q0 ) == THREAD_FPU64_CONTEXT_Q0 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q1 ) == THREAD_FPU64_CONTEXT_Q1 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q2 ) == THREAD_FPU64_CONTEXT_Q2 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q3 ) == THREAD_FPU64_CONTEXT_Q3 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q4 ) == THREAD_FPU64_CONTEXT_Q4 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q5 ) == THREAD_FPU64_CONTEXT_Q5 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q6 ) == THREAD_FPU64_CONTEXT_Q6 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q7 ) == THREAD_FPU64_CONTEXT_Q7 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q16) == THREAD_FPU64_CONTEXT_Q16);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q17) == THREAD_FPU64_CONTEXT_Q17);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q18) == THREAD_FPU64_CONTEXT_Q18);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q19) == THREAD_FPU64_CONTEXT_Q19);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q20) == THREAD_FPU64_CONTEXT_Q20);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q21) == THREAD_FPU64_CONTEXT_Q21);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q22) == THREAD_FPU64_CONTEXT_Q22);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q23) == THREAD_FPU64_CONTEXT_Q23);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q24) == THREAD_FPU64_CONTEXT_Q24);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q25) == THREAD_FPU64_CONTEXT_Q25);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q26) == THREAD_FPU64_CONTEXT_Q26);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q27) == THREAD_FPU64_CONTEXT_Q27);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q28) == THREAD_FPU64_CONTEXT_Q28);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q29) == THREAD_FPU64_CONTEXT_Q29);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q30) == THREAD_FPU64_CONTEXT_Q30);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu64.q31) == THREAD_FPU64_CONTEXT_Q31);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q0 ) == THREAD_FPU32_CONTEXT_Q0 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q1 ) == THREAD_FPU32_CONTEXT_Q1 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q2 ) == THREAD_FPU32_CONTEXT_Q2 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q3 ) == THREAD_FPU32_CONTEXT_Q3 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q8 ) == THREAD_FPU32_CONTEXT_Q8 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q9 ) == THREAD_FPU32_CONTEXT_Q9 );
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q10) == THREAD_FPU32_CONTEXT_Q10);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q11) == THREAD_FPU32_CONTEXT_Q11);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q12) == THREAD_FPU32_CONTEXT_Q12);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q13) == THREAD_FPU32_CONTEXT_Q13);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q14) == THREAD_FPU32_CONTEXT_Q14);
static_assert(AMS_OFFSETOF(KThreadContext::CallerSaveFpuRegisters, fpu32.q15) == THREAD_FPU32_CONTEXT_Q15);
return true;
}
static_assert(KThreadContext::ValidateOffsets());
void GetUserContext(ams::svc::ThreadContext *out, const KThread *thread);
}
| 16,911
|
C++
|
.h
| 281
| 44.939502
| 290
| 0.588515
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,847
|
kern_k_page_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_page_table_base.hpp>
#include <mesosphere/kern_k_page_group.hpp>
#include <mesosphere/kern_k_page_table_manager.hpp>
namespace ams::kern::arch::arm64 {
class KPageTable final : public KPageTableBase {
NON_COPYABLE(KPageTable);
NON_MOVEABLE(KPageTable);
private:
friend class KPageTableBase;
public:
using TraversalEntry = KPageTableImpl::TraversalEntry;
using TraversalContext = KPageTableImpl::TraversalContext;
enum BlockType {
BlockType_L3Block,
BlockType_L3ContiguousBlock,
BlockType_L2Block,
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
BlockType_L2TegraSmmuBlock,
#endif
BlockType_L2ContiguousBlock,
BlockType_L1Block,
BlockType_Count,
};
static_assert(L3BlockSize == PageSize);
static constexpr size_t ContiguousPageSize = L3ContiguousBlockSize;
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
static constexpr size_t L2TegraSmmuBlockSize = 2 * L2BlockSize;
#endif
static constexpr size_t BlockSizes[BlockType_Count] = {
[BlockType_L3Block] = L3BlockSize,
[BlockType_L3ContiguousBlock] = L3ContiguousBlockSize,
[BlockType_L2Block] = L2BlockSize,
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
[BlockType_L2TegraSmmuBlock] = L2TegraSmmuBlockSize,
#endif
[BlockType_L2ContiguousBlock] = L2ContiguousBlockSize,
[BlockType_L1Block] = L1BlockSize,
};
static constexpr BlockType GetMaxBlockType() {
return BlockType_L1Block;
}
static constexpr size_t GetBlockSize(BlockType type) {
return BlockSizes[type];
}
static constexpr BlockType GetBlockType(size_t size) {
switch (size) {
case L3BlockSize: return BlockType_L3Block;
case L3ContiguousBlockSize: return BlockType_L3ContiguousBlock;
case L2BlockSize: return BlockType_L2Block;
#ifdef ATMOSPHERE_BOARD_NINTENDO_NX
case L2TegraSmmuBlockSize: return BlockType_L2TegraSmmuBlock;
#endif
case L2ContiguousBlockSize: return BlockType_L2ContiguousBlock;
case L1BlockSize: return BlockType_L1Block;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
static constexpr size_t GetSmallerAlignment(size_t alignment) {
MESOSPHERE_ASSERT(alignment > L3BlockSize);
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) - 1));
}
static constexpr size_t GetLargerAlignment(size_t alignment) {
MESOSPHERE_ASSERT(alignment < L1BlockSize);
return KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(KPageTable::GetBlockType(alignment) + 1));
}
public:
/* TODO: How should this size be determined. Does the KProcess slab count need to go in a header as a define? */
static constexpr size_t NumTtbr0Entries = 81;
private:
static constinit inline const volatile u64 s_ttbr0_entries[NumTtbr0Entries] = {};
private:
KPageTableManager *m_manager;
u8 m_asid;
protected:
Result OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll);
Result OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll);
void FinalizeUpdateImpl(PageLinkedList *page_list);
KPageTableManager &GetPageTableManager() const { return *m_manager; }
private:
constexpr PageTableEntry GetEntryTemplate(const KPageProperties properties) const {
/* Check that the property is not kernel execute. */
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_KernelExecute) == 0);
/* Set basic attributes. */
PageTableEntry entry{PageTableEntry::ExtensionFlag_Valid};
entry.SetPrivilegedExecuteNever(true);
entry.SetAccessFlag(PageTableEntry::AccessFlag_Accessed);
entry.SetShareable(PageTableEntry::Shareable_InnerShareable);
if (!this->IsKernel()) {
entry.SetGlobal(false);
}
/* Set page attribute. */
if (properties.io) {
MESOSPHERE_ABORT_UNLESS(!properties.uncached);
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_Device_nGnRnE)
.SetUserExecuteNever(true);
} else if (properties.uncached) {
MESOSPHERE_ABORT_UNLESS((properties.perm & KMemoryPermission_UserExecute) == 0);
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemoryNotCacheable)
.SetUserExecuteNever(true);
} else {
entry.SetPageAttribute(PageTableEntry::PageAttribute_NormalMemory);
if ((properties.perm & KMemoryPermission_UserExecute) != 0) {
/* Check that the permission is either r--/--x or r--/r-x. */
MESOSPHERE_ABORT_UNLESS((properties.perm & ~ams::svc::MemoryPermission_Read) == (KMemoryPermission_KernelRead | KMemoryPermission_UserExecute));
} else {
entry.SetUserExecuteNever(true);
}
}
/* Set AP[1] based on perm. */
switch (properties.perm & KMemoryPermission_UserReadWrite) {
case KMemoryPermission_UserReadWrite:
case KMemoryPermission_UserRead:
entry.SetUserAccessible(true);
break;
case KMemoryPermission_KernelReadWrite:
case KMemoryPermission_KernelRead:
entry.SetUserAccessible(false);
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* Set AP[2] based on perm. */
switch (properties.perm & KMemoryPermission_UserReadWrite) {
case KMemoryPermission_UserReadWrite:
case KMemoryPermission_KernelReadWrite:
entry.SetReadOnly(false);
break;
case KMemoryPermission_KernelRead:
case KMemoryPermission_UserRead:
entry.SetReadOnly(true);
break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* Set the fault bit based on whether the page is mapped. */
entry.SetMapped((properties.perm & KMemoryPermission_NotMapped) == 0);
return entry;
}
public:
constexpr explicit KPageTable(util::ConstantInitializeTag) : KPageTableBase(util::ConstantInitialize), m_manager(), m_asid() { /* ... */ }
explicit KPageTable() { /* ... */ }
static NOINLINE void Initialize(s32 core_id);
static const volatile u64 &GetTtbr0Entry(size_t index) { return s_ttbr0_entries[index]; }
static ALWAYS_INLINE u64 GetKernelTtbr0() {
return s_ttbr0_entries[0];
}
static ALWAYS_INLINE void ActivateKernel() {
/* Activate, using asid 0 and process id = 0xFFFFFFFF */
cpu::SwitchProcess(GetKernelTtbr0(), 0xFFFFFFFF);
}
static ALWAYS_INLINE void ActivateProcess(size_t proc_idx, u32 proc_id) {
cpu::SwitchProcess(s_ttbr0_entries[proc_idx + 1], proc_id);
}
NOINLINE Result InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end);
NOINLINE Result InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index);
Result Finalize();
private:
Result Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll);
Result Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll);
Result MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll);
Result MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll);
bool MergePages(TraversalContext *context, PageLinkedList *page_list);
void MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list);
Result SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll);
Result SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll);
Result ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll);
static ALWAYS_INLINE void PteDataMemoryBarrier() {
cpu::DataMemoryBarrierInnerShareableStore();
}
static ALWAYS_INLINE void ClearPageTable(KVirtualAddress table) {
cpu::ClearPageToZero(GetVoidPointer(table));
}
ALWAYS_INLINE void OnTableUpdated() const {
cpu::InvalidateTlbByAsid(m_asid);
}
ALWAYS_INLINE void OnKernelTableUpdated() const {
cpu::InvalidateEntireTlbDataOnly();
}
ALWAYS_INLINE void OnKernelTableSinglePageUpdated(KProcessAddress virt_addr) const {
cpu::InvalidateTlbByVaDataOnly(virt_addr);
}
void NoteUpdated() const;
void NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const;
KVirtualAddress AllocatePageTable(PageLinkedList *page_list, bool reuse_ll) const {
KVirtualAddress table = this->GetPageTableManager().Allocate();
if (table == Null<KVirtualAddress>) {
if (reuse_ll && page_list->Peek()) {
table = KVirtualAddress(reinterpret_cast<uintptr_t>(page_list->Pop()));
} else {
return Null<KVirtualAddress>;
}
}
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0);
return table;
}
void FreePageTable(PageLinkedList *page_list, KVirtualAddress table) const {
MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(table));
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(table) == 0);
page_list->Push(table);
}
};
}
| 12,835
|
C++
|
.h
| 219
| 44.26484
| 263
| 0.625457
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,848
|
kern_userspace_memory_access.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_userspace_memory_access.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern::arch::arm64 {
void UserspaceAccessFunctionAreaBegin();
class UserspaceAccess {
private:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(void *dst, const void *src);
public:
static bool CopyMemoryFromUserSize32BitWithSupervisorAccess(void *dst, const void *src) {
/* Check that the address is within the valid userspace range. */
if (const uintptr_t src_uptr = reinterpret_cast<uintptr_t>(src); src_uptr < ams::svc::AddressNullGuard32Size || (src_uptr + sizeof(u32) - 1) >= ams::svc::AddressMemoryRegion39Size) {
return false;
}
return CopyMemoryFromUserSize32BitWithSupervisorAccessImpl(dst, src);
}
static bool CopyMemoryFromUser(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned32Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserAligned64Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryFromUserSize64Bit(void *dst, const void *src);
static bool CopyMemoryFromUserSize32Bit(void *dst, const void *src);
static s32 CopyStringFromUser(void *dst, const void *src, size_t size);
static bool CopyMemoryToUser(void *dst, const void *src, size_t size);
static bool CopyMemoryToUserAligned32Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryToUserAligned64Bit(void *dst, const void *src, size_t size);
static bool CopyMemoryToUserSize32Bit(void *dst, const void *src);
static s32 CopyStringToUser(void *dst, const void *src, size_t size);
static bool ClearMemory(void *dst, size_t size);
static bool ClearMemoryAligned32Bit(void *dst, size_t size);
static bool ClearMemoryAligned64Bit(void *dst, size_t size);
static bool ClearMemorySize32Bit(void *dst);
static bool UpdateLockAtomic(u32 *out, u32 *address, u32 if_zero, u32 new_orr_mask);
static bool UpdateIfEqualAtomic(s32 *out, s32 *address, s32 compare_value, s32 new_value);
static bool DecrementIfLessThanAtomic(s32 *out, s32 *address, s32 compare);
static bool StoreDataCache(uintptr_t start, uintptr_t end);
static bool FlushDataCache(uintptr_t start, uintptr_t end);
static bool InvalidateDataCache(uintptr_t start, uintptr_t end);
static bool ReadIoMemory32Bit(void *dst, const void *src, size_t size);
static bool ReadIoMemory16Bit(void *dst, const void *src, size_t size);
static bool ReadIoMemory8Bit(void *dst, const void *src, size_t size);
static bool WriteIoMemory32Bit(void *dst, const void *src, size_t size);
static bool WriteIoMemory16Bit(void *dst, const void *src, size_t size);
static bool WriteIoMemory8Bit(void *dst, const void *src, size_t size);
};
void UserspaceAccessFunctionAreaEnd();
}
| 3,770
|
C++
|
.h
| 60
| 53.466667
| 198
| 0.688751
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,849
|
kern_k_slab_heap_impl.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_slab_heap_impl.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_select_interrupt_manager.hpp>
namespace ams::kern::arch::arm64 {
template<typename T>
concept SlabHeapNode = requires (T &t) {
{ t.next } -> std::convertible_to<T *>;
};
ALWAYS_INLINE bool IsSlabAtomicValid() {
/* Without careful consideration, slab heaps atomics are vulnerable to */
/* the ABA problem, when doing compare and swap of node pointers. */
/* We resolve this by using the ARM exclusive monitor; we bundle the */
/* load and store of the relevant values into a single exclusive monitor */
/* hold, preventing the ABA problem. */
/* However, our assembly must do both a load and a store under a single */
/* hold, at different memory addresses. Considering the case where the */
/* addresses are distinct but resolve to the same cache set (by chance), */
/* we can note that under a 1-way associative (direct-mapped) cache */
/* we would have as a guarantee that the second access would evict the */
/* cache line from the first access, invalidating our exclusive monitor */
/* hold. Thus, we require that the cache is not 1-way associative, for */
/* our implementation to be correct. */
{
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Select L1 cache. */
cpu::SetCsselrEl1(0);
cpu::InstructionMemoryBarrier();
/* Check that the L1 cache is not direct-mapped. */
return cpu::CacheSizeIdRegisterAccessor().GetAssociativity() != 0;
}
}
template<typename T> requires SlabHeapNode<T>
ALWAYS_INLINE T *AllocateFromSlabAtomic(T **head) {
u32 tmp;
T *node, *next;
__asm__ __volatile__(
"1:\n"
" ldaxr %[node], [%[head]]\n"
" cbz %[node], 2f\n"
" ldr %[next], [%[node]]\n"
" stlxr %w[tmp], %[next], [%[head]]\n"
" cbnz %w[tmp], 1b\n"
"2:\n"
: [tmp]"=&r"(tmp), [node]"=&r"(node), [next]"=&r"(next), [head]"+&r"(head)
:
: "cc", "memory"
);
return node;
}
template<typename T> requires SlabHeapNode<T>
ALWAYS_INLINE void FreeToSlabAtomic(T **head, T *node) {
u32 tmp;
T *next;
__asm__ __volatile__(
"1:\n"
" ldaxr %[next], [%[head]]\n"
" str %[next], [%[node]]\n"
" stlxr %w[tmp], %[node], [%[head]]\n"
" cbnz %w[tmp], 1b\n"
: [tmp]"=&r"(tmp), [node]"+&r"(node), [next]"=&r"(next), [head]"+&r"(head)
:
: "cc", "memory"
);
}
}
| 3,477
|
C++
|
.h
| 82
| 34.463415
| 86
| 0.581856
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,850
|
kern_k_debug.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_debug.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_debug_base.hpp>
namespace ams::kern {
class KThread;
class KProcess;
}
namespace ams::kern::arch::arm64 {
class KDebug final : public KAutoObjectWithSlabHeapAndContainer<KDebug, KDebugBase> {
MESOSPHERE_AUTOOBJECT_TRAITS(KDebug, KSynchronizationObject);
public:
explicit KDebug() { /* ... */ }
static void PostDestroy(uintptr_t arg) { MESOSPHERE_UNUSED(arg); /* ... */ }
public:
/* NOTE: These are virtual in Nintendo's kernel. */
Result GetThreadContextImpl(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags);
Result SetThreadContextImpl(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags);
private:
Result GetFpuContext(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags);
Result SetFpuContext(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags);
public:
static uintptr_t GetProgramCounter(const KThread &thread);
static void SetPreviousProgramCounter();
static void PrintRegister(KThread *thread = nullptr);
static void PrintBacktrace(KThread *thread = nullptr);
static Result BreakIfAttached(ams::svc::BreakReason break_reason, uintptr_t address, size_t size);
static Result SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value);
static constexpr bool IsBreakInstruction(u32 insn, u32 psr) {
constexpr u32 BreakInstructionAarch64 = 0xE7FFFFFF;
constexpr u32 BreakInstructionAarch32 = 0xE7FFDEFE;
constexpr u32 BreakInstructionThumb32 = 0xB68E;
if ((psr & 0x10) == 0) {
return insn == BreakInstructionAarch64;
} else {
if ((psr & 0x20) == 0) {
return insn == BreakInstructionAarch32;
} else {
return insn == BreakInstructionThumb32;
}
}
}
};
}
| 2,879
|
C++
|
.h
| 59
| 39.59322
| 117
| 0.655516
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,851
|
kern_cpu.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/arch/arm64/kern_cpu_system_registers.hpp>
#include <mesosphere/kern_select_userspace_memory_access.hpp>
namespace ams::kern::arch::arm64::cpu {
#if defined(ATMOSPHERE_CPU_ARM_CORTEX_A57) || defined(ATMOSPHERE_CPU_ARM_CORTEX_A53)
constexpr inline size_t InstructionCacheLineSize = 0x40;
constexpr inline size_t DataCacheLineSize = 0x40;
constexpr inline size_t NumPerformanceCounters = 6;
#else
#error "Unknown CPU for cache line sizes"
#endif
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
constexpr inline size_t NumCores = 4;
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
constexpr inline size_t NumCores = 4;
#else
#error "Unknown Board for cpu::NumCores"
#endif
constexpr inline u32 El0Aarch64PsrMask = 0xF0000000;
constexpr inline u32 El0Aarch32PsrMask = 0xFE0FFE20;
/* Initialization. */
NOINLINE void InitializeInterruptThreads(s32 core_id);
/* Helpers for managing memory state. */
ALWAYS_INLINE void DataSynchronizationBarrier() {
__asm__ __volatile__("dsb sy" ::: "memory");
}
ALWAYS_INLINE void DataSynchronizationBarrierInnerShareable() {
__asm__ __volatile__("dsb ish" ::: "memory");
}
ALWAYS_INLINE void DataSynchronizationBarrierInnerShareableStore() {
__asm__ __volatile__("dsb ishst" ::: "memory");
}
ALWAYS_INLINE void DataMemoryBarrier() {
__asm__ __volatile__("dmb sy" ::: "memory");
}
ALWAYS_INLINE void DataMemoryBarrierInnerShareable() {
__asm__ __volatile__("dmb ish" ::: "memory");
}
ALWAYS_INLINE void DataMemoryBarrierInnerShareableStore() {
__asm__ __volatile__("dmb ishst" ::: "memory");
}
ALWAYS_INLINE void InstructionMemoryBarrier() {
__asm__ __volatile__("isb" ::: "memory");
}
ALWAYS_INLINE void EnsureInstructionConsistency() {
DataSynchronizationBarrierInnerShareable();
InstructionMemoryBarrier();
}
ALWAYS_INLINE void EnsureInstructionConsistencyFullSystem() {
DataSynchronizationBarrier();
InstructionMemoryBarrier();
}
ALWAYS_INLINE void Yield() {
__asm__ __volatile__("yield" ::: "memory");
}
ALWAYS_INLINE void SwitchProcess(u64 ttbr, u32 proc_id) {
SetTtbr0El1(ttbr);
ContextIdRegisterAccessor(0).SetProcId(proc_id).Store();
InstructionMemoryBarrier();
}
/* Performance counter helpers. */
ALWAYS_INLINE u64 GetCycleCounter() {
return cpu::GetPmcCntrEl0();
}
ALWAYS_INLINE u32 GetPerformanceCounter(s32 n) {
u64 counter = 0;
if (n < static_cast<s32>(NumPerformanceCounters)) {
switch (n) {
case 0:
counter = cpu::GetPmevCntr0El0();
break;
case 1:
counter = cpu::GetPmevCntr1El0();
break;
case 2:
counter = cpu::GetPmevCntr2El0();
break;
case 3:
counter = cpu::GetPmevCntr3El0();
break;
case 4:
counter = cpu::GetPmevCntr4El0();
break;
case 5:
counter = cpu::GetPmevCntr5El0();
break;
default:
break;
}
}
return static_cast<u32>(counter);
}
/* Helper for address access. */
ALWAYS_INLINE bool GetPhysicalAddressWritable(KPhysicalAddress *out, KVirtualAddress addr, bool privileged = false) {
const uintptr_t va = GetInteger(addr);
if (privileged) {
__asm__ __volatile__("at s1e1w, %[va]" :: [va]"r"(va) : "memory");
} else {
__asm__ __volatile__("at s1e0w, %[va]" :: [va]"r"(va) : "memory");
}
InstructionMemoryBarrier();
u64 par = GetParEl1();
if (par & 0x1) {
return false;
}
if (out) {
*out = KPhysicalAddress((par & 0xFFFFFFFFF000ull) | (va & 0xFFFull));
}
return true;
}
ALWAYS_INLINE bool GetPhysicalAddressReadable(KPhysicalAddress *out, KVirtualAddress addr, bool privileged = false) {
const uintptr_t va = GetInteger(addr);
if (privileged) {
__asm__ __volatile__("at s1e1r, %[va]" :: [va]"r"(va) : "memory");
} else {
__asm__ __volatile__("at s1e0r, %[va]" :: [va]"r"(va) : "memory");
}
InstructionMemoryBarrier();
u64 par = GetParEl1();
if (par & 0x1) {
return false;
}
if (out) {
*out = KPhysicalAddress((par & 0xFFFFFFFFF000ull) | (va & 0xFFFull));
}
return true;
}
ALWAYS_INLINE bool CanAccessAtomic(KProcessAddress addr, bool privileged = false) {
const uintptr_t va = GetInteger(addr);
if (privileged) {
__asm__ __volatile__("at s1e1w, %[va]" :: [va]"r"(va) : "memory");
} else {
__asm__ __volatile__("at s1e0w, %[va]" :: [va]"r"(va) : "memory");
}
InstructionMemoryBarrier();
u64 par = GetParEl1();
if (par & 0x1) {
return false;
}
return (par >> (BITSIZEOF(par) - BITSIZEOF(u8))) == 0xFF;
}
ALWAYS_INLINE void StoreDataCacheForInitArguments(const void *addr, size_t size) {
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
for (size_t stored = 0; stored < size; stored += cpu::DataCacheLineSize) {
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(start + stored) : "memory");
}
DataSynchronizationBarrier();
}
/* Synchronization helpers. */
NOINLINE void SynchronizeAllCores();
void SynchronizeCores(u64 core_mask);
/* Cache management helpers. */
void StoreCacheForInit(void *addr, size_t size);
void FlushEntireDataCache();
Result InvalidateDataCache(void *addr, size_t size);
Result StoreDataCache(const void *addr, size_t size);
Result FlushDataCache(const void *addr, size_t size);
void InvalidateEntireInstructionCache();
void ClearPageToZeroImpl(void *);
ALWAYS_INLINE void ClearPageToZero(void * const page) {
MESOSPHERE_ASSERT(util::IsAligned(reinterpret_cast<uintptr_t>(page), PageSize));
MESOSPHERE_ASSERT(page != nullptr);
ClearPageToZeroImpl(page);
}
ALWAYS_INLINE void InvalidateTlbByAsid(u32 asid) {
const u64 value = (static_cast<u64>(asid) << 48);
__asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(value) : "memory");
EnsureInstructionConsistency();
}
ALWAYS_INLINE void InvalidateTlbByAsidAndVa(u32 asid, KProcessAddress virt_addr) {
const u64 value = (static_cast<u64>(asid) << 48) | ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
__asm__ __volatile__("tlbi aside1is, %[value]" :: [value]"r"(value) : "memory");
EnsureInstructionConsistency();
}
ALWAYS_INLINE void InvalidateEntireTlb() {
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
EnsureInstructionConsistency();
}
ALWAYS_INLINE void InvalidateEntireTlbDataOnly() {
__asm__ __volatile__("tlbi vmalle1is" ::: "memory");
DataSynchronizationBarrierInnerShareable();
}
ALWAYS_INLINE void InvalidateTlbByVaDataOnly(KProcessAddress virt_addr) {
const u64 value = ((GetInteger(virt_addr) >> 12) & 0xFFFFFFFFFFFul);
__asm__ __volatile__("tlbi vaae1is, %[value]" :: [value]"r"(value) : "memory");
DataSynchronizationBarrierInnerShareable();
}
ALWAYS_INLINE uintptr_t GetCurrentThreadPointerValue() {
register uintptr_t x18 asm("x18");
__asm__ __volatile__("" : [x18]"=r"(x18));
return x18;
}
ALWAYS_INLINE void SetCurrentThreadPointerValue(uintptr_t value) {
register uintptr_t x18 asm("x18") = value;
__asm__ __volatile__("":: [x18]"r"(x18));
}
ALWAYS_INLINE void SetExceptionThreadStackTop(uintptr_t top) {
cpu::SetCntvCvalEl0(top);
}
ALWAYS_INLINE void SwitchThreadLocalRegion(uintptr_t tlr) {
cpu::SetTpidrRoEl0(tlr);
}
}
| 8,943
|
C++
|
.h
| 219
| 32.776256
| 121
| 0.615544
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,852
|
kern_assembly_offsets.h
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_offsets.h
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_build_config.hpp>
/* TODO: Different header for this? */
#define AMS_KERN_NUM_SUPERVISOR_CALLS 0xC0
/* ams::kern::KThread, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp */
#define THREAD_KERNEL_STACK_TOP 0x280
/* ams::kern::KThread::StackParameters, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_thread.hpp */
#define THREAD_STACK_PARAMETERS_SIZE 0x140
#define THREAD_STACK_PARAMETERS_SVC_PERMISSION 0x00
#define THREAD_STACK_PARAMETERS_CALLER_SAVE_FPU_REGISTERS 0x18
#define THREAD_STACK_PARAMETERS_CUR_THREAD 0x20
#define THREAD_STACK_PARAMETERS_DISABLE_COUNT 0x28
#define THREAD_STACK_PARAMETERS_DPC_FLAGS 0x2A
#define THREAD_STACK_PARAMETERS_CURRENT_SVC_ID 0x2B
#define THREAD_STACK_PARAMETERS_RESERVED_2C 0x2C
#define THREAD_STACK_PARAMETERS_EXCEPTION_FLAGS 0x2D
#define THREAD_STACK_PARAMETERS_IS_PINNED 0x2E
#define THREAD_STACK_PARAMETERS_RESERVED_2F 0x2F
#define THREAD_STACK_PARAMETERS_RESERVED_30 0x30
#define THREAD_STACK_PARAMETERS_THREAD_CONTEXT 0x40
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_CALLING_SVC (0)
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_EXCEPTION_HANDLER (1)
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_FPU_CONTEXT_RESTORE_NEEDED (2)
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_FPU_64_BIT (3)
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_USERMODE_EXCEPTION_HANDLER (4)
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_CACHE_MAINTENANCE_OPERATION (5)
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_TLB_MAINTENANCE_OPERATION (6)
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
#define THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_HARDWARE_SINGLE_STEP (7)
#endif
#define THREAD_EXCEPTION_FLAG_IS_CALLING_SVC (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_CALLING_SVC)
#define THREAD_EXCEPTION_FLAG_IS_IN_EXCEPTION_HANDLER (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_EXCEPTION_HANDLER)
#define THREAD_EXCEPTION_FLAG_IS_FPU_CONTEXT_RESTORE_NEEDED (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_FPU_CONTEXT_RESTORE_NEEDED)
#define THREAD_EXCEPTION_FLAG_IS_FPU_64_BIT (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_FPU_64_BIT)
#define THREAD_EXCEPTION_FLAG_IS_IN_USERMODE_EXCEPTION_HANDLER (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_USERMODE_EXCEPTION_HANDLER)
#define THREAD_EXCEPTION_FLAG_IS_IN_CACHE_MAINTENANCE_OPERATION (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_CACHE_MAINTENANCE_OPERATION)
#define THREAD_EXCEPTION_FLAG_IS_IN_TLB_MAINTENANCE_OPERATION (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_IN_TLB_MAINTENANCE_OPERATION)
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
#define THREAD_EXCEPTION_FLAG_IS_HARDWARE_SINGLE_STEP (1 << THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_HARDWARE_SINGLE_STEP)
#endif
/* ams::kern::arch::arm64::KThreadContext, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_thread_context.hpp */
#define THREAD_CONTEXT_SIZE 0x100
#define THREAD_CONTEXT_CPU_REGISTERS 0x000
#define THREAD_CONTEXT_X19 0x000
#define THREAD_CONTEXT_X20 0x008
#define THREAD_CONTEXT_X21 0x010
#define THREAD_CONTEXT_X22 0x018
#define THREAD_CONTEXT_X23 0x020
#define THREAD_CONTEXT_X24 0x028
#define THREAD_CONTEXT_X25 0x030
#define THREAD_CONTEXT_X26 0x038
#define THREAD_CONTEXT_X27 0x040
#define THREAD_CONTEXT_X28 0x048
#define THREAD_CONTEXT_X29 0x050
#define THREAD_CONTEXT_LR 0x058
#define THREAD_CONTEXT_SP 0x060
#define THREAD_CONTEXT_FPCR 0x068
#define THREAD_CONTEXT_FPSR 0x06C
#define THREAD_CONTEXT_FPU_REGISTERS 0x070
#define THREAD_CONTEXT_LOCKED 0x0F0
#define THREAD_CONTEXT_X19_X20 THREAD_CONTEXT_X19
#define THREAD_CONTEXT_X21_X22 THREAD_CONTEXT_X21
#define THREAD_CONTEXT_X23_X24 THREAD_CONTEXT_X23
#define THREAD_CONTEXT_X25_X26 THREAD_CONTEXT_X25
#define THREAD_CONTEXT_X27_X28 THREAD_CONTEXT_X27
#define THREAD_CONTEXT_X29_X30 THREAD_CONTEXT_X29
#define THREAD_CONTEXT_LR_SP THREAD_CONTEXT_LR
#define THREAD_CONTEXT_SP_FPCR_FPSR THREAD_CONTEXT_SP
#define THREAD_CONTEXT_FPCR_FPSR THREAD_CONTEXT_FPCR
#define THREAD_CONTEXT_FPU64_Q8 (THREAD_CONTEXT_FPU_REGISTERS + 0x00)
#define THREAD_CONTEXT_FPU64_Q9 (THREAD_CONTEXT_FPU_REGISTERS + 0x10)
#define THREAD_CONTEXT_FPU64_Q10 (THREAD_CONTEXT_FPU_REGISTERS + 0x20)
#define THREAD_CONTEXT_FPU64_Q11 (THREAD_CONTEXT_FPU_REGISTERS + 0x30)
#define THREAD_CONTEXT_FPU64_Q12 (THREAD_CONTEXT_FPU_REGISTERS + 0x40)
#define THREAD_CONTEXT_FPU64_Q13 (THREAD_CONTEXT_FPU_REGISTERS + 0x50)
#define THREAD_CONTEXT_FPU64_Q14 (THREAD_CONTEXT_FPU_REGISTERS + 0x60)
#define THREAD_CONTEXT_FPU64_Q15 (THREAD_CONTEXT_FPU_REGISTERS + 0x70)
#define THREAD_CONTEXT_FPU64_Q8_Q9 THREAD_CONTEXT_FPU64_Q8
#define THREAD_CONTEXT_FPU64_Q10_Q11 THREAD_CONTEXT_FPU64_Q10
#define THREAD_CONTEXT_FPU64_Q12_Q13 THREAD_CONTEXT_FPU64_Q12
#define THREAD_CONTEXT_FPU64_Q14_Q15 THREAD_CONTEXT_FPU64_Q14
#define THREAD_CONTEXT_FPU32_Q4 (THREAD_CONTEXT_FPU_REGISTERS + 0x00)
#define THREAD_CONTEXT_FPU32_Q5 (THREAD_CONTEXT_FPU_REGISTERS + 0x10)
#define THREAD_CONTEXT_FPU32_Q6 (THREAD_CONTEXT_FPU_REGISTERS + 0x20)
#define THREAD_CONTEXT_FPU32_Q7 (THREAD_CONTEXT_FPU_REGISTERS + 0x30)
#define THREAD_CONTEXT_FPU32_Q4_Q5 THREAD_CONTEXT_FPU32_Q4
#define THREAD_CONTEXT_FPU32_Q6_Q7 THREAD_CONTEXT_FPU32_Q6
#define THREAD_FPU64_CONTEXT_Q0 0x000
#define THREAD_FPU64_CONTEXT_Q1 0x010
#define THREAD_FPU64_CONTEXT_Q2 0x020
#define THREAD_FPU64_CONTEXT_Q3 0x030
#define THREAD_FPU64_CONTEXT_Q4 0x040
#define THREAD_FPU64_CONTEXT_Q5 0x050
#define THREAD_FPU64_CONTEXT_Q6 0x060
#define THREAD_FPU64_CONTEXT_Q7 0x070
#define THREAD_FPU64_CONTEXT_Q16 0x080
#define THREAD_FPU64_CONTEXT_Q17 0x090
#define THREAD_FPU64_CONTEXT_Q18 0x0A0
#define THREAD_FPU64_CONTEXT_Q19 0x0B0
#define THREAD_FPU64_CONTEXT_Q20 0x0C0
#define THREAD_FPU64_CONTEXT_Q21 0x0D0
#define THREAD_FPU64_CONTEXT_Q22 0x0E0
#define THREAD_FPU64_CONTEXT_Q23 0x0F0
#define THREAD_FPU64_CONTEXT_Q24 0x100
#define THREAD_FPU64_CONTEXT_Q25 0x110
#define THREAD_FPU64_CONTEXT_Q26 0x120
#define THREAD_FPU64_CONTEXT_Q27 0x130
#define THREAD_FPU64_CONTEXT_Q28 0x140
#define THREAD_FPU64_CONTEXT_Q29 0x150
#define THREAD_FPU64_CONTEXT_Q30 0x160
#define THREAD_FPU64_CONTEXT_Q31 0x170
#define THREAD_FPU64_CONTEXT_Q0_Q1 THREAD_FPU64_CONTEXT_Q0
#define THREAD_FPU64_CONTEXT_Q2_Q3 THREAD_FPU64_CONTEXT_Q2
#define THREAD_FPU64_CONTEXT_Q4_Q5 THREAD_FPU64_CONTEXT_Q4
#define THREAD_FPU64_CONTEXT_Q6_Q7 THREAD_FPU64_CONTEXT_Q6
#define THREAD_FPU64_CONTEXT_Q16_Q17 THREAD_FPU64_CONTEXT_Q16
#define THREAD_FPU64_CONTEXT_Q18_Q19 THREAD_FPU64_CONTEXT_Q18
#define THREAD_FPU64_CONTEXT_Q20_Q21 THREAD_FPU64_CONTEXT_Q20
#define THREAD_FPU64_CONTEXT_Q22_Q23 THREAD_FPU64_CONTEXT_Q22
#define THREAD_FPU64_CONTEXT_Q24_Q25 THREAD_FPU64_CONTEXT_Q24
#define THREAD_FPU64_CONTEXT_Q26_Q27 THREAD_FPU64_CONTEXT_Q26
#define THREAD_FPU64_CONTEXT_Q28_Q29 THREAD_FPU64_CONTEXT_Q28
#define THREAD_FPU64_CONTEXT_Q30_Q31 THREAD_FPU64_CONTEXT_Q30
#define THREAD_FPU32_CONTEXT_Q0 0x000
#define THREAD_FPU32_CONTEXT_Q1 0x010
#define THREAD_FPU32_CONTEXT_Q2 0x020
#define THREAD_FPU32_CONTEXT_Q3 0x030
#define THREAD_FPU32_CONTEXT_Q8 0x040
#define THREAD_FPU32_CONTEXT_Q9 0x050
#define THREAD_FPU32_CONTEXT_Q10 0x060
#define THREAD_FPU32_CONTEXT_Q11 0x070
#define THREAD_FPU32_CONTEXT_Q12 0x080
#define THREAD_FPU32_CONTEXT_Q13 0x090
#define THREAD_FPU32_CONTEXT_Q14 0x0A0
#define THREAD_FPU32_CONTEXT_Q15 0x0B0
#define THREAD_FPU32_CONTEXT_Q0_Q1 THREAD_FPU32_CONTEXT_Q0
#define THREAD_FPU32_CONTEXT_Q2_Q3 THREAD_FPU32_CONTEXT_Q2
#define THREAD_FPU32_CONTEXT_Q8_Q9 THREAD_FPU32_CONTEXT_Q8
#define THREAD_FPU32_CONTEXT_Q10_Q11 THREAD_FPU32_CONTEXT_Q10
#define THREAD_FPU32_CONTEXT_Q12_Q13 THREAD_FPU32_CONTEXT_Q12
#define THREAD_FPU32_CONTEXT_Q14_Q15 THREAD_FPU32_CONTEXT_Q14
/* ams::kern::arch::arm64::KExceptionContext, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp */
#define EXCEPTION_CONTEXT_SIZE 0x120
#define EXCEPTION_CONTEXT_X0 0x000
#define EXCEPTION_CONTEXT_X1 0x008
#define EXCEPTION_CONTEXT_X2 0x010
#define EXCEPTION_CONTEXT_X3 0x018
#define EXCEPTION_CONTEXT_X4 0x020
#define EXCEPTION_CONTEXT_X5 0x028
#define EXCEPTION_CONTEXT_X6 0x030
#define EXCEPTION_CONTEXT_X7 0x038
#define EXCEPTION_CONTEXT_X8 0x040
#define EXCEPTION_CONTEXT_X9 0x048
#define EXCEPTION_CONTEXT_X10 0x050
#define EXCEPTION_CONTEXT_X11 0x058
#define EXCEPTION_CONTEXT_X12 0x060
#define EXCEPTION_CONTEXT_X13 0x068
#define EXCEPTION_CONTEXT_X14 0x070
#define EXCEPTION_CONTEXT_X15 0x078
#define EXCEPTION_CONTEXT_X16 0x080
#define EXCEPTION_CONTEXT_X17 0x088
#define EXCEPTION_CONTEXT_X18 0x090
#define EXCEPTION_CONTEXT_X19 0x098
#define EXCEPTION_CONTEXT_X20 0x0A0
#define EXCEPTION_CONTEXT_X21 0x0A8
#define EXCEPTION_CONTEXT_X22 0x0B0
#define EXCEPTION_CONTEXT_X23 0x0B8
#define EXCEPTION_CONTEXT_X24 0x0C0
#define EXCEPTION_CONTEXT_X25 0x0C8
#define EXCEPTION_CONTEXT_X26 0x0D0
#define EXCEPTION_CONTEXT_X27 0x0D8
#define EXCEPTION_CONTEXT_X28 0x0E0
#define EXCEPTION_CONTEXT_X29 0x0E8
#define EXCEPTION_CONTEXT_X30 0x0F0
#define EXCEPTION_CONTEXT_SP 0x0F8
#define EXCEPTION_CONTEXT_PC 0x100
#define EXCEPTION_CONTEXT_PSR 0x108
#define EXCEPTION_CONTEXT_TPIDR 0x110
#define EXCEPTION_CONTEXT_X0_X1 EXCEPTION_CONTEXT_X0
#define EXCEPTION_CONTEXT_X2_X3 EXCEPTION_CONTEXT_X2
#define EXCEPTION_CONTEXT_X4_X5 EXCEPTION_CONTEXT_X4
#define EXCEPTION_CONTEXT_X6_X7 EXCEPTION_CONTEXT_X6
#define EXCEPTION_CONTEXT_X8_X9 EXCEPTION_CONTEXT_X8
#define EXCEPTION_CONTEXT_X10_X11 EXCEPTION_CONTEXT_X10
#define EXCEPTION_CONTEXT_X12_X13 EXCEPTION_CONTEXT_X12
#define EXCEPTION_CONTEXT_X14_X15 EXCEPTION_CONTEXT_X14
#define EXCEPTION_CONTEXT_X16_X17 EXCEPTION_CONTEXT_X16
#define EXCEPTION_CONTEXT_X18_X19 EXCEPTION_CONTEXT_X18
#define EXCEPTION_CONTEXT_X20_X21 EXCEPTION_CONTEXT_X20
#define EXCEPTION_CONTEXT_X22_X23 EXCEPTION_CONTEXT_X22
#define EXCEPTION_CONTEXT_X24_X25 EXCEPTION_CONTEXT_X24
#define EXCEPTION_CONTEXT_X26_X27 EXCEPTION_CONTEXT_X26
#define EXCEPTION_CONTEXT_X28_X29 EXCEPTION_CONTEXT_X28
#define EXCEPTION_CONTEXT_X30_SP EXCEPTION_CONTEXT_X30
#define EXCEPTION_CONTEXT_PC_PSR EXCEPTION_CONTEXT_PC
#define EXCEPTION_CONTEXT_X9_X10 EXCEPTION_CONTEXT_X9
#define EXCEPTION_CONTEXT_X19_X20 EXCEPTION_CONTEXT_X19
#define EXCEPTION_CONTEXT_X21_X22 EXCEPTION_CONTEXT_X21
#define EXCEPTION_CONTEXT_X23_X24 EXCEPTION_CONTEXT_X23
#define EXCEPTION_CONTEXT_X25_X26 EXCEPTION_CONTEXT_X25
#define EXCEPTION_CONTEXT_X27_X28 EXCEPTION_CONTEXT_X27
#define EXCEPTION_CONTEXT_X29_X30 EXCEPTION_CONTEXT_X29
#define EXCEPTION_CONTEXT_SP_PC EXCEPTION_CONTEXT_SP
#define EXCEPTION_CONTEXT_PSR_TPIDR EXCEPTION_CONTEXT_PSR
/* ams::svc::arch::arm64::ThreadLocalRegion, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libvapours/include/vapours/svc/arch/arm64/svc_thread_local_region.hpp */
#define THREAD_LOCAL_REGION_MESSAGE_BUFFER 0x000
#define THREAD_LOCAL_REGION_DISABLE_COUNT 0x100
#define THREAD_LOCAL_REGION_INTERRUPT_FLAG 0x102
#define THREAD_LOCAL_REGION_SIZE 0x200
/* ams::kern::init::KInitArguments, https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp */
#define INIT_ARGUMENTS_SIZE 0x28
#define INIT_ARGUMENTS_CPUACTLR 0x00
#define INIT_ARGUMENTS_CPUECTLR 0x08
#define INIT_ARGUMENTS_SP 0x10
#define INIT_ARGUMENTS_ENTRYPOINT 0x18
#define INIT_ARGUMENTS_ARGUMENT 0x20
/* ams::kern::KScheduler (::SchedulingState), https://github.com/Atmosphere-NX/Atmosphere/blob/master/libraries/libmesosphere/include/mesosphere/kern_k_scheduler.hpp */
/* NOTE: Due to constraints on ldarb relative offsets, KSCHEDULER_NEEDS_SCHEDULING cannot trivially be changed, and will require assembly edits. */
#define KSCHEDULER_NEEDS_SCHEDULING 0x00
#define KSCHEDULER_INTERRUPT_TASK_RUNNABLE 0x01
#define KSCHEDULER_HIGHEST_PRIORITY_THREAD 0x18
#define KSCHEDULER_IDLE_THREAD_STACK 0x20
#define KSCHEDULER_PREVIOUS_THREAD 0x28
#define KSCHEDULER_INTERRUPT_TASK_MANAGER 0x30
| 13,496
|
C++
|
.h
| 239
| 55.309623
| 187
| 0.776392
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,853
|
kern_k_hardware_timer.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_hardware_timer.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_hardware_timer_base.hpp>
namespace ams::kern::arch::arm64 {
class KHardwareTimer : public KInterruptTask, public KHardwareTimerBase {
private:
s64 m_maximum_time;
public:
constexpr KHardwareTimer() : KInterruptTask(), KHardwareTimerBase(), m_maximum_time(std::numeric_limits<s64>::max()) { /* ... */ }
public:
/* Public API. */
NOINLINE void Initialize();
NOINLINE void Finalize();
static s64 GetTick() {
return GetCount();
}
void RegisterAbsoluteTask(KTimerTask *task, s64 task_time) {
KScopedDisableDispatch dd;
KScopedSpinLock lk(this->GetLock());
if (this->RegisterAbsoluteTaskImpl(task, task_time)) {
if (task_time <= m_maximum_time) {
SetCompareValue(task_time);
EnableInterrupt();
}
}
}
private:
/* Hardware register accessors. */
static ALWAYS_INLINE void InitializeGlobalTimer() {
/* Set kernel control. */
cpu::CounterTimerKernelControlRegisterAccessor(0).SetEl0PctEn(true).Store();
/* Disable the physical timer. */
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store();
/* Set the compare value to the maximum. */
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits<u64>::max()).Store();
/* Enable the physical timer, with interrupt masked. */
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store();
}
static ALWAYS_INLINE void EnableInterrupt() {
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(false).Store();
}
static ALWAYS_INLINE void DisableInterrupt() {
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(true).SetIMask(true).Store();
}
static ALWAYS_INLINE void StopTimer() {
/* Set the compare value to the maximum. */
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(std::numeric_limits<u64>::max()).Store();
/* Disable the physical timer. */
cpu::CounterTimerPhysicalTimerControlRegisterAccessor(0).SetEnable(false).SetIMask(false).Store();
}
static ALWAYS_INLINE s64 GetCount() {
return cpu::CounterTimerPhysicalCountValueRegisterAccessor().GetCount();
}
static ALWAYS_INLINE void SetCompareValue(s64 value) {
cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor(0).SetCompareValue(static_cast<u64>(value)).Store();
}
public:
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
MESOSPHERE_UNUSED(interrupt_id);
return this;
}
virtual void DoTask() override;
};
}
| 3,954
|
C++
|
.h
| 79
| 38.493671
| 142
| 0.628401
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,854
|
kern_assembly_macros.h
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_assembly_macros.h
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/arch/arm64/kern_assembly_offsets.h>
#define ENABLE_FPU(tmp) \
mrs tmp, cpacr_el1; \
orr tmp, tmp, #0x300000; \
msr cpacr_el1, tmp; \
isb;
#define GET_THREAD_CONTEXT_AND_RESTORE_FPCR_FPSR(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
add ctx, sp, #(EXCEPTION_CONTEXT_SIZE + THREAD_STACK_PARAMETERS_THREAD_CONTEXT); \
ldp wtmp1, wtmp2, [ctx, #(THREAD_CONTEXT_FPCR_FPSR)]; \
msr fpcr, xtmp1; \
msr fpsr, xtmp2;
#define RESTORE_FPU64_CALLEE_SAVE_REGISTERS(ctx) \
ldp q8, q9, [ctx, #(THREAD_CONTEXT_FPU64_Q8_Q9)]; \
ldp q10, q11, [ctx, #(THREAD_CONTEXT_FPU64_Q10_Q11)]; \
ldp q12, q13, [ctx, #(THREAD_CONTEXT_FPU64_Q12_Q13)]; \
ldp q14, q15, [ctx, #(THREAD_CONTEXT_FPU64_Q14_Q15)];
#define RESTORE_FPU64_CALLER_SAVE_REGISTERS(tmp) \
ldr tmp, [sp, #(EXCEPTION_CONTEXT_SIZE + THREAD_STACK_PARAMETERS_CALLER_SAVE_FPU_REGISTERS)]; \
ldp q0, q1, [tmp, #(THREAD_FPU64_CONTEXT_Q0_Q1)]; \
ldp q2, q3, [tmp, #(THREAD_FPU64_CONTEXT_Q2_Q3)]; \
ldp q4, q5, [tmp, #(THREAD_FPU64_CONTEXT_Q4_Q5)]; \
ldp q6, q7, [tmp, #(THREAD_FPU64_CONTEXT_Q6_Q7)]; \
ldp q16, q17, [tmp, #(THREAD_FPU64_CONTEXT_Q16_Q17)]; \
ldp q18, q19, [tmp, #(THREAD_FPU64_CONTEXT_Q18_Q19)]; \
ldp q20, q21, [tmp, #(THREAD_FPU64_CONTEXT_Q20_Q21)]; \
ldp q22, q23, [tmp, #(THREAD_FPU64_CONTEXT_Q22_Q23)]; \
ldp q24, q25, [tmp, #(THREAD_FPU64_CONTEXT_Q24_Q25)]; \
ldp q26, q27, [tmp, #(THREAD_FPU64_CONTEXT_Q26_Q27)]; \
ldp q28, q29, [tmp, #(THREAD_FPU64_CONTEXT_Q28_Q29)]; \
ldp q30, q31, [tmp, #(THREAD_FPU64_CONTEXT_Q30_Q31)];
#define RESTORE_FPU64_ALL_REGISTERS(ctx, tmp) \
RESTORE_FPU64_CALLEE_SAVE_REGISTERS(ctx) \
RESTORE_FPU64_CALLER_SAVE_REGISTERS(tmp)
#define RESTORE_FPU32_CALLEE_SAVE_REGISTERS(ctx) \
ldp q4, q5, [ctx, #(THREAD_CONTEXT_FPU32_Q4_Q5)]; \
ldp q6, q7, [ctx, #(THREAD_CONTEXT_FPU32_Q6_Q7)];
#define RESTORE_FPU32_CALLER_SAVE_REGISTERS(tmp) \
ldr tmp, [sp, #(EXCEPTION_CONTEXT_SIZE + THREAD_STACK_PARAMETERS_CALLER_SAVE_FPU_REGISTERS)]; \
ldp q0, q1, [tmp, #(THREAD_FPU32_CONTEXT_Q0_Q1)]; \
ldp q2, q3, [tmp, #(THREAD_FPU32_CONTEXT_Q2_Q3)]; \
ldp q8, q9, [tmp, #(THREAD_FPU32_CONTEXT_Q8_Q9)]; \
ldp q10, q11, [tmp, #(THREAD_FPU32_CONTEXT_Q10_Q11)]; \
ldp q12, q13, [tmp, #(THREAD_FPU32_CONTEXT_Q12_Q13)]; \
ldp q14, q15, [tmp, #(THREAD_FPU32_CONTEXT_Q14_Q15)];
#define RESTORE_FPU32_ALL_REGISTERS(ctx, tmp) \
RESTORE_FPU32_CALLEE_SAVE_REGISTERS(ctx) \
RESTORE_FPU32_CALLER_SAVE_REGISTERS(tmp)
#define ENABLE_AND_RESTORE_FPU(ctx, xtmp1, xtmp2, wtmp1, wtmp2, label_32, label_done) \
ENABLE_FPU(xtmp1) \
GET_THREAD_CONTEXT_AND_RESTORE_FPCR_FPSR(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
\
ldrb wtmp1, [sp, #(EXCEPTION_CONTEXT_SIZE + THREAD_STACK_PARAMETERS_EXCEPTION_FLAGS)]; \
tbz wtmp1, #(THREAD_EXCEPTION_FLAG_BIT_INDEX_IS_FPU_64_BIT), label_32##f; \
\
RESTORE_FPU64_ALL_REGISTERS(ctx, xtmp1) \
\
b label_done##f; \
\
label_32: \
RESTORE_FPU32_ALL_REGISTERS(ctx, xtmp1) \
label_done:
#define ENABLE_AND_RESTORE_FPU64(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
ENABLE_FPU(xtmp1) \
GET_THREAD_CONTEXT_AND_RESTORE_FPCR_FPSR(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
RESTORE_FPU64_ALL_REGISTERS(ctx, xtmp1)
#define ENABLE_AND_RESTORE_FPU32(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
ENABLE_FPU(xtmp1) \
GET_THREAD_CONTEXT_AND_RESTORE_FPCR_FPSR(ctx, xtmp1, xtmp2, wtmp1, wtmp2) \
RESTORE_FPU32_ALL_REGISTERS(ctx, xtmp1)
#define ERET_WITH_SPECULATION_BARRIER \
eret; \
dsb nsh; \
isb
| 6,202
|
C++
|
.h
| 89
| 61.741573
| 103
| 0.460089
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,855
|
kern_k_interrupt_controller.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_controller.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#if 1
#include <mesosphere/arch/arm/kern_generic_interrupt_controller.hpp>
namespace ams::kern::arch::arm64 {
using ams::kern::arch::arm::GicDistributor;
using ams::kern::arch::arm::GicCpuInterface;
using ams::kern::arch::arm::KInterruptController;
}
#else
#error "Unknown board for KInterruptController"
#endif
| 1,119
|
C++
|
.h
| 29
| 35.482759
| 76
| 0.744229
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,856
|
kern_k_spin_lock.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_spin_lock.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_select_cpu.hpp>
namespace ams::kern::arch::arm64 {
class KNotAlignedSpinLock {
private:
u32 m_packed_tickets;
public:
constexpr KNotAlignedSpinLock() : m_packed_tickets(0) { /* ... */ }
ALWAYS_INLINE void Lock() {
u32 tmp0, tmp1, tmp2;
__asm__ __volatile__(
" prfm pstl1keep, %[m_packed_tickets]\n"
"1:\n"
" ldaxr %w[tmp0], %[m_packed_tickets]\n"
" add %w[tmp2], %w[tmp0], #0x10000\n"
" stxr %w[tmp1], %w[tmp2], %[m_packed_tickets]\n"
" cbnz %w[tmp1], 1b\n"
" \n"
" and %w[tmp1], %w[tmp0], #0xFFFF\n"
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
" b.eq 3f\n"
" sevl\n"
"2:\n"
" wfe\n"
" ldaxrh %w[tmp1], %[m_packed_tickets]\n"
" cmp %w[tmp1], %w[tmp0], lsr #16\n"
" b.ne 2b\n"
"3:\n"
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [tmp2]"=&r"(tmp2), [m_packed_tickets]"+Q"(m_packed_tickets)
:
: "cc", "memory"
);
}
ALWAYS_INLINE void Unlock() {
const u32 value = m_packed_tickets + 1;
__asm__ __volatile__(
" stlrh %w[value], %[m_packed_tickets]\n"
: [m_packed_tickets]"+Q"(m_packed_tickets)
: [value]"r"(value)
: "memory"
);
}
};
static_assert(sizeof(KNotAlignedSpinLock) == sizeof(u32));
class KAlignedSpinLock {
private:
alignas(cpu::DataCacheLineSize) u16 m_current_ticket;
alignas(cpu::DataCacheLineSize) u16 m_next_ticket;
public:
constexpr KAlignedSpinLock() : m_current_ticket(0), m_next_ticket(0) { /* ... */ }
ALWAYS_INLINE void Lock() {
u32 tmp0, tmp1, got_lock;
__asm__ __volatile__(
" prfm pstl1keep, %[m_next_ticket]\n"
"1:\n"
" ldxrh %w[tmp0], %[m_next_ticket]\n"
" add %w[tmp1], %w[tmp0], #0x1\n"
" stxrh %w[got_lock], %w[tmp1], %[m_next_ticket]\n"
" cbnz %w[got_lock], 1b\n"
" \n"
" sevl\n"
"2:\n"
" wfe\n"
" ldaxrh %w[tmp1], %[m_current_ticket]\n"
" cmp %w[tmp1], %w[tmp0]\n"
" b.ne 2b\n"
: [tmp0]"=&r"(tmp0), [tmp1]"=&r"(tmp1), [got_lock]"=&r"(got_lock), [m_next_ticket]"+Q"(m_next_ticket)
: [m_current_ticket]"Q"(m_current_ticket)
: "cc", "memory"
);
}
ALWAYS_INLINE void Unlock() {
const u32 value = m_current_ticket + 1;
__asm__ __volatile__(
" stlrh %w[value], %[m_current_ticket]\n"
: [m_current_ticket]"+Q"(m_current_ticket)
: [value]"r"(value)
: "memory"
);
}
};
static_assert(sizeof(KAlignedSpinLock) == 2 * cpu::DataCacheLineSize);
using KSpinLock = KNotAlignedSpinLock;
}
| 4,300
|
C++
|
.h
| 100
| 28.87
| 121
| 0.446407
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,857
|
kern_k_page_table_entry.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern::arch::arm64 {
constexpr size_t BlocksPerContiguousBlock = 0x10;
constexpr size_t BlocksPerTable = PageSize / sizeof(u64);
constexpr size_t L1BlockSize = 1_GB;
constexpr size_t L1ContiguousBlockSize = BlocksPerContiguousBlock * L1BlockSize;
constexpr size_t L2BlockSize = 2_MB;
constexpr size_t L2ContiguousBlockSize = BlocksPerContiguousBlock * L2BlockSize;
constexpr size_t L3BlockSize = PageSize;
constexpr size_t L3ContiguousBlockSize = BlocksPerContiguousBlock * L3BlockSize;
class PageTableEntry {
public:
struct InvalidTag{};
struct TableTag{};
struct BlockTag{};
struct SeparateContiguousTag{};
enum Permission : u64 {
Permission_KernelRWX = ((0ul << 53) | (1ul << 54) | (0ul << 6)),
Permission_KernelRX = ((0ul << 53) | (1ul << 54) | (2ul << 6)),
Permission_KernelR = ((1ul << 53) | (1ul << 54) | (2ul << 6)),
Permission_KernelRW = ((1ul << 53) | (1ul << 54) | (0ul << 6)),
Permission_UserRX = ((1ul << 53) | (0ul << 54) | (3ul << 6)),
Permission_UserR = ((1ul << 53) | (1ul << 54) | (3ul << 6)),
Permission_UserRW = ((1ul << 53) | (1ul << 54) | (1ul << 6)),
};
enum Shareable : u64 {
Shareable_NonShareable = (0 << 8),
Shareable_OuterShareable = (2 << 8),
Shareable_InnerShareable = (3 << 8),
};
/* Official attributes are: */
/* 0x00, 0x04, 0xFF, 0x44. 4-7 are unused. */
enum PageAttribute : u64 {
PageAttribute_Device_nGnRnE = (0 << 2),
PageAttribute_Device_nGnRE = (1 << 2),
PageAttribute_NormalMemory = (2 << 2),
PageAttribute_NormalMemoryNotCacheable = (3 << 2),
};
enum AccessFlag : u64 {
AccessFlag_NotAccessed = (0 << 10),
AccessFlag_Accessed = (1 << 10),
};
enum MappingFlag : u64 {
MappingFlag_NotMapped = (0 << 0),
MappingFlag_Mapped = (1 << 0),
};
enum SoftwareReservedBit : u8 {
SoftwareReservedBit_None = 0,
SoftwareReservedBit_DisableMergeHead = (1u << 0),
SoftwareReservedBit_DisableMergeHeadAndBody = (1u << 1),
SoftwareReservedBit_DisableMergeHeadTail = (1u << 2),
SoftwareReservedBit_Valid = (1u << 3),
};
static constexpr ALWAYS_INLINE std::underlying_type<SoftwareReservedBit>::type EncodeSoftwareReservedBits(bool head, bool head_body, bool tail) {
return (head ? SoftwareReservedBit_DisableMergeHead : SoftwareReservedBit_None) | (head_body ? SoftwareReservedBit_DisableMergeHeadAndBody : SoftwareReservedBit_None) | (tail ? SoftwareReservedBit_DisableMergeHeadTail : SoftwareReservedBit_None);
}
enum ExtensionFlag : u64 {
ExtensionFlag_DisableMergeHead = (static_cast<u64>(SoftwareReservedBit_DisableMergeHead) << 55),
ExtensionFlag_DisableMergeHeadAndBody = (static_cast<u64>(SoftwareReservedBit_DisableMergeHeadAndBody) << 55),
ExtensionFlag_DisableMergeTail = (static_cast<u64>(SoftwareReservedBit_DisableMergeHeadTail) << 55),
ExtensionFlag_Valid = (static_cast<u64>(SoftwareReservedBit_Valid) << 55),
ExtensionFlag_ValidAndMapped = (ExtensionFlag_Valid | MappingFlag_Mapped),
ExtensionFlag_TestTableMask = (ExtensionFlag_Valid | (1ul << 1)),
};
enum Type : u64 {
Type_None = 0x0,
Type_L1Block = ExtensionFlag_Valid,
Type_L1Table = 0x2,
Type_L2Block = ExtensionFlag_Valid,
Type_L2Table = 0x2,
Type_L3Block = ExtensionFlag_TestTableMask,
};
enum ContigType : u64 {
ContigType_NotContiguous = (0x0ul << 52),
ContigType_Contiguous = (0x1ul << 52),
};
protected:
u64 m_attributes;
public:
/* Take in a raw attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry() : m_attributes() { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(u64 attr) : m_attributes(attr) { /* ... */ }
constexpr explicit ALWAYS_INLINE PageTableEntry(InvalidTag) : m_attributes(0) { /* ... */ }
/* Extend a previous attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry(const PageTableEntry &rhs, u64 new_attr) : m_attributes(rhs.m_attributes | new_attr) { /* ... */ }
/* Construct a new attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
: m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m))
{
/* ... */
}
/* Construct a table. */
constexpr explicit ALWAYS_INLINE PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn, size_t ref_count)
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | (ref_count << 2) | 0x3)
{
/* ... */
}
/* Construct a block. */
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig, bool page)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | (page ? ExtensionFlag_TestTableMask : ExtensionFlag_Valid))
{
/* ... */
}
constexpr explicit ALWAYS_INLINE PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, SeparateContiguousTag)
: PageTableEntry(attr, GetInteger(phys_addr))
{
/* ... */
}
protected:
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
return (m_attributes >> offset) & ((1ul << count) - 1);
}
constexpr ALWAYS_INLINE u64 SelectBits(size_t offset, size_t count) const {
return m_attributes & (((1ul << count) - 1) << offset);
}
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
m_attributes &= ~mask;
m_attributes |= (value & (mask >> offset)) << offset;
}
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
m_attributes &= ~mask;
m_attributes |= (value & mask);
}
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
const u64 mask = 1ul << offset;
if (enabled) {
m_attributes |= mask;
} else {
m_attributes &= ~mask;
}
}
public:
constexpr ALWAYS_INLINE u8 GetSoftwareReservedBits() const { return this->GetBits(55, 3); }
constexpr ALWAYS_INLINE bool IsHeadMergeDisabled() const { return (this->GetSoftwareReservedBits() & SoftwareReservedBit_DisableMergeHead) != 0; }
constexpr ALWAYS_INLINE bool IsHeadAndBodyMergeDisabled() const { return (this->GetSoftwareReservedBits() & SoftwareReservedBit_DisableMergeHeadAndBody) != 0; }
constexpr ALWAYS_INLINE bool IsTailMergeDisabled() const { return (this->GetSoftwareReservedBits() & SoftwareReservedBit_DisableMergeHeadTail) != 0; }
constexpr ALWAYS_INLINE bool IsHeadOrHeadAndBodyMergeDisabled() const { return (this->GetSoftwareReservedBits() & (SoftwareReservedBit_DisableMergeHead | SoftwareReservedBit_DisableMergeHeadAndBody)) != 0; }
constexpr ALWAYS_INLINE bool IsUserExecuteNever() const { return this->GetBits(54, 1) != 0; }
constexpr ALWAYS_INLINE bool IsPrivilegedExecuteNever() const { return this->GetBits(53, 1) != 0; }
constexpr ALWAYS_INLINE bool IsContiguous() const { return this->GetBits(52, 1) != 0; }
constexpr ALWAYS_INLINE bool IsGlobal() const { return this->GetBits(11, 1) == 0; }
constexpr ALWAYS_INLINE AccessFlag GetAccessFlag() const { return static_cast<AccessFlag>(this->SelectBits(10, 1)); }
constexpr ALWAYS_INLINE Shareable GetShareable() const { return static_cast<Shareable>(this->SelectBits(8, 2)); }
constexpr ALWAYS_INLINE PageAttribute GetPageAttribute() const { return static_cast<PageAttribute>(this->SelectBits(2, 3)); }
constexpr ALWAYS_INLINE int GetAccessFlagInteger() const { return static_cast<int>(this->GetBits(10, 1)); }
constexpr ALWAYS_INLINE int GetShareableInteger() const { return static_cast<int>(this->GetBits(8, 2)); }
constexpr ALWAYS_INLINE int GetPageAttributeInteger() const { return static_cast<int>(this->GetBits(2, 3)); }
constexpr ALWAYS_INLINE bool IsReadOnly() const { return this->GetBits(7, 1) != 0; }
constexpr ALWAYS_INLINE bool IsUserAccessible() const { return this->GetBits(6, 1) != 0; }
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBits(5, 1) != 0; }
constexpr ALWAYS_INLINE u64 GetTestTableMask() const { return (m_attributes & ExtensionFlag_TestTableMask); }
constexpr ALWAYS_INLINE bool IsBlock() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_Valid; }
constexpr ALWAYS_INLINE bool IsPage() const { return (m_attributes & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; }
constexpr ALWAYS_INLINE bool IsTable() const { return (m_attributes & ExtensionFlag_TestTableMask) == 2; }
constexpr ALWAYS_INLINE bool IsEmpty() const { return (m_attributes & ExtensionFlag_TestTableMask) == 0; }
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const { return this->SelectBits(12, 36); }
constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; }
constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; }
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetPrivilegedExecuteNever(bool en) { this->SetBit(53, en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetContiguous(bool en) { this->SetBit(52, en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetGlobal(bool en) { this->SetBit(11, !en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetAccessFlag(AccessFlag f) { this->SetBitsDirect(10, 1, f); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetShareable(Shareable s) { this->SetBitsDirect(8, 2, s); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetReadOnly(bool en) { this->SetBit(7, en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetUserAccessible(bool en) { this->SetBit(6, en); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetPageAttribute(PageAttribute a) { this->SetBitsDirect(2, 3, a); return *this; }
constexpr ALWAYS_INLINE decltype(auto) SetMapped(bool m) { static_assert(static_cast<u64>(MappingFlag_Mapped == (1 << 0))); this->SetBit(0, m); return *this; }
constexpr ALWAYS_INLINE size_t GetTableReferenceCount() const { return this->GetBits(2, 10); }
constexpr ALWAYS_INLINE decltype(auto) SetTableReferenceCount(size_t num) { this->SetBits(2, 10, num); return *this; }
constexpr ALWAYS_INLINE decltype(auto) OpenTableReferences(size_t num) { MESOSPHERE_ASSERT(this->GetTableReferenceCount() + num <= BlocksPerTable + 1); return this->SetTableReferenceCount(this->GetTableReferenceCount() + num); }
constexpr ALWAYS_INLINE decltype(auto) CloseTableReferences(size_t num) { MESOSPHERE_ASSERT(this->GetTableReferenceCount() >= num); return this->SetTableReferenceCount(this->GetTableReferenceCount() - num); }
constexpr ALWAYS_INLINE decltype(auto) SetValid() { MESOSPHERE_ASSERT((m_attributes & ExtensionFlag_Valid) == 0); m_attributes |= ExtensionFlag_Valid; return *this; }
constexpr ALWAYS_INLINE u64 GetEntryTemplateForMerge() const {
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
return m_attributes & BaseMask;
}
constexpr ALWAYS_INLINE bool IsForMerge(u64 attr) const {
constexpr u64 BaseMaskForMerge = ~static_cast<u64>(ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail);
return (m_attributes & BaseMaskForMerge) == attr;
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguousMask(size_t idx) {
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < BlocksPerContiguousBlock - 1) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateContiguous(size_t idx) const {
return m_attributes & GetEntryTemplateForSeparateContiguousMask(idx);
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparateMask(size_t idx) {
constexpr u64 BaseMask = (0xFFFF000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < BlocksPerContiguousBlock) {
return BaseMask | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < BlocksPerTable - 1) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForSeparate(size_t idx) const {
return m_attributes & GetEntryTemplateForSeparateMask(idx);
}
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafe() const {
return m_attributes;
}
constexpr ALWAYS_INLINE u64 GetRawAttributesUnsafeForSwap() const {
return m_attributes;
}
protected:
constexpr ALWAYS_INLINE u64 GetRawAttributes() const {
return m_attributes;
}
};
static_assert(sizeof(PageTableEntry) == sizeof(u64));
constexpr inline PageTableEntry InvalidPageTableEntry = PageTableEntry(PageTableEntry::InvalidTag{});
constexpr inline size_t MaxPageTableEntries = PageSize / sizeof(PageTableEntry);
class L1PageTableEntry : public PageTableEntry {
public:
constexpr explicit ALWAYS_INLINE L1PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr explicit ALWAYS_INLINE L1PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool pxn)
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr explicit ALWAYS_INLINE L1PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr explicit ALWAYS_INLINE L1PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
{
/* ... */
}
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
return this->SelectBits(30, 18);
}
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
return this->SelectBits(12, 36);
}
constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const {
if (this->IsTable()) {
out = this->GetTable();
return true;
} else {
return false;
}
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2BlockMask(size_t idx) {
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < L2ContiguousBlockSize / L2BlockSize) {
return BaseMask | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < (L1BlockSize - L2ContiguousBlockSize) / L2BlockSize) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
}
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
/* Check whether this has the same permission/etc as the desired attributes. */
return L1PageTableEntry(BlockTag{}, this->GetBlock(), rhs, sw_reserved_bits, contig).GetRawAttributes() == this->GetRawAttributes();
}
};
class L2PageTableEntry : public PageTableEntry {
public:
constexpr explicit ALWAYS_INLINE L2PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr explicit ALWAYS_INLINE L2PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool pxn)
: PageTableEntry((0x3ul << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr explicit ALWAYS_INLINE L2PageTableEntry(TableTag, KPhysicalAddress phys_addr, bool is_kernel, bool pxn)
: PageTableEntry(((is_kernel ? 0x3ul : 0) << 60) | (static_cast<u64>(pxn) << 59) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr explicit ALWAYS_INLINE L2PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x1)
{
/* ... */
}
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
return this->SelectBits(21, 27);
}
constexpr ALWAYS_INLINE KPhysicalAddress GetTable() const {
return this->SelectBits(12, 36);
}
constexpr ALWAYS_INLINE bool GetTable(KPhysicalAddress &out) const {
if (this->IsTable()) {
out = this->GetTable();
return true;
} else {
return false;
}
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2BlockMask(size_t idx) {
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < (L2ContiguousBlockSize / L2BlockSize) - 1) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL2Block(size_t idx) const {
return m_attributes & GetEntryTemplateForL2BlockMask(idx);
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) {
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < L3ContiguousBlockSize / L3BlockSize) {
return BaseMask | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < (L2BlockSize - L3ContiguousBlockSize) / L3BlockSize) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
}
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
/* Check whether this has the same permission/etc as the desired attributes. */
return L2PageTableEntry(BlockTag{}, this->GetBlock(), rhs, sw_reserved_bits, contig).GetRawAttributes() == this->GetRawAttributes();
}
};
class L3PageTableEntry : public PageTableEntry {
public:
constexpr explicit ALWAYS_INLINE L3PageTableEntry(InvalidTag) : PageTableEntry(InvalidTag{}) { /* ... */ }
constexpr explicit ALWAYS_INLINE L3PageTableEntry(BlockTag, KPhysicalAddress phys_addr, const PageTableEntry &attr, u8 sw_reserved_bits, bool contig)
: PageTableEntry(attr, (static_cast<u64>(sw_reserved_bits) << 55) | (static_cast<u64>(contig) << 52) | GetInteger(phys_addr) | 0x3)
{
/* ... */
}
constexpr ALWAYS_INLINE bool IsBlock() const { return (GetRawAttributes() & ExtensionFlag_TestTableMask) == ExtensionFlag_TestTableMask; }
constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 3; }
constexpr ALWAYS_INLINE KPhysicalAddress GetBlock() const {
return this->SelectBits(12, 36);
}
static constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3BlockMask(size_t idx) {
constexpr u64 BaseMask = (0xFFF0000000000FFFul & ~static_cast<u64>((0x1ul << 52) | ExtensionFlag_TestTableMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody | ExtensionFlag_DisableMergeTail));
if (idx == 0) {
return BaseMask | ExtensionFlag_DisableMergeHead | ExtensionFlag_DisableMergeHeadAndBody;
} else if (idx < (L3ContiguousBlockSize / L3BlockSize) - 1) {
return BaseMask;
} else {
return BaseMask | ExtensionFlag_DisableMergeTail;
}
}
constexpr ALWAYS_INLINE u64 GetEntryTemplateForL3Block(size_t idx) const {
return m_attributes & GetEntryTemplateForL3BlockMask(idx);
}
constexpr ALWAYS_INLINE bool IsCompatibleWithAttribute(const PageTableEntry &rhs, u8 sw_reserved_bits, bool contig) const {
/* Check whether this has the same permission/etc as the desired attributes. */
return L3PageTableEntry(BlockTag{}, this->GetBlock(), rhs, sw_reserved_bits, contig).GetRawAttributes() == this->GetRawAttributes();
}
};
constexpr inline L1PageTableEntry InvalidL1PageTableEntry = L1PageTableEntry(PageTableEntry::InvalidTag{});
constexpr inline L2PageTableEntry InvalidL2PageTableEntry = L2PageTableEntry(PageTableEntry::InvalidTag{});
constexpr inline L3PageTableEntry InvalidL3PageTableEntry = L3PageTableEntry(PageTableEntry::InvalidTag{});
}
| 27,568
|
C++
|
.h
| 396
| 55.613636
| 262
| 0.607595
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,858
|
kern_cpu_system_registers.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_cpu_system_registers.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
namespace ams::kern::arch::arm64::cpu {
#define MESOSPHERE_CPU_GET_SYSREG(name) \
({ \
u64 temp_value; \
__asm__ __volatile__("mrs %0, " #name "" : "=&r"(temp_value) :: "memory"); \
temp_value; \
})
#define MESOSPHERE_CPU_SET_SYSREG(name, value) \
({ \
__asm__ __volatile__("msr " #name ", %0" :: "r"(value) : "memory", "cc"); \
})
#define MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(name, reg_name) \
ALWAYS_INLINE void Set##name(u64 value) { MESOSPHERE_CPU_SET_SYSREG(reg_name, value); } \
ALWAYS_INLINE u64 Get##name() { return MESOSPHERE_CPU_GET_SYSREG(reg_name); }
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr0El1, ttbr0_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Ttbr1El1, ttbr1_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TcrEl1, tcr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MairEl1, mair_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrEl1, tpidr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(VbarEl1, vbar_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(FarEl1, far_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(ParEl1, par_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SctlrEl1, sctlr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpuActlrEl1, s3_1_c15_c2_0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpuEctlrEl1, s3_1_c15_c2_1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CsselrEl1, csselr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CcsidrEl1, ccsidr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(OslarEl1, oslar_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrEl0, tpidr_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(TpidrRoEl0, tpidrro_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(ElrEl1, elr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(EsrEl1, esr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SpsrEl1, spsr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr0El1, afsr0_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Afsr1El1, afsr1_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(MdscrEl1, mdscr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CpacrEl1, cpacr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(ContextidrEl1, contextidr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CntkCtlEl1, cntkctl_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CntpCtlEl0, cntp_ctl_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CntpCvalEl0, cntp_cval_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(CntvCvalEl0, cntv_cval_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(Daif, daif)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(SpEl0, sp_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(IdAa64Dfr0El1, id_aa64dfr0_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcrEl0, pmcr_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmUserEnrEl0, pmuserenr_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcCntrEl0, pmccntr_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmSelrEl0, pmselr_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmcCfiltrEl0, pmccfiltr_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmIntEnSetEl1, pmintenset_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmCntEnSetEl0, pmcntenset_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmOvsSetEl0, pmovsset_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmIntEnClrEl1, pmintenclr_el1)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmCntEnClrEl0, pmcntenclr_el0)
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmOvsClrEl0, pmovsclr_el0)
#define FOR_I_IN_0_TO_30(HANDLER, ...) \
HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \
HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \
HANDLER(8, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(11, ## __VA_ARGS__) \
HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__) \
HANDLER(16, ## __VA_ARGS__) HANDLER(17, ## __VA_ARGS__) HANDLER(18, ## __VA_ARGS__) HANDLER(19, ## __VA_ARGS__) \
HANDLER(20, ## __VA_ARGS__) HANDLER(21, ## __VA_ARGS__) HANDLER(22, ## __VA_ARGS__) HANDLER(23, ## __VA_ARGS__) \
HANDLER(24, ## __VA_ARGS__) HANDLER(25, ## __VA_ARGS__) HANDLER(26, ## __VA_ARGS__) HANDLER(27, ## __VA_ARGS__) \
HANDLER(28, ## __VA_ARGS__) HANDLER(29, ## __VA_ARGS__) HANDLER(30, ## __VA_ARGS__)
#define MESOSPHERE_CPU_DEFINE_PMEV_ACCESSORS(ID, ...) \
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevCntr##ID##El0, pmevcntr##ID##_el0) \
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(PmevTyper##ID##El0, pmevtyper##ID##_el0)
FOR_I_IN_0_TO_30(MESOSPHERE_CPU_DEFINE_PMEV_ACCESSORS)
#undef MESOSPHERE_CPU_DEFINE_PMEV_ACCESSORS
#undef FOR_I_IN_0_TO_30
#define FOR_I_IN_0_TO_15(HANDLER, ...) \
HANDLER(0, ## __VA_ARGS__) HANDLER(1, ## __VA_ARGS__) HANDLER(2, ## __VA_ARGS__) HANDLER(3, ## __VA_ARGS__) \
HANDLER(4, ## __VA_ARGS__) HANDLER(5, ## __VA_ARGS__) HANDLER(6, ## __VA_ARGS__) HANDLER(7, ## __VA_ARGS__) \
HANDLER(8, ## __VA_ARGS__) HANDLER(9, ## __VA_ARGS__) HANDLER(10, ## __VA_ARGS__) HANDLER(11, ## __VA_ARGS__) \
HANDLER(12, ## __VA_ARGS__) HANDLER(13, ## __VA_ARGS__) HANDLER(14, ## __VA_ARGS__) HANDLER(15, ## __VA_ARGS__)
#define MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS(ID, ...) \
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWcr##ID##El1, dbgwcr##ID##_el1) \
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgWvr##ID##El1, dbgwvr##ID##_el1) \
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBcr##ID##El1, dbgbcr##ID##_el1) \
MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS(DbgBvr##ID##El1, dbgbvr##ID##_el1)
FOR_I_IN_0_TO_15(MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS)
#undef MESOSPHERE_CPU_DEFINE_DBG_SYSREG_ACCESSORS
/* Base class for register accessors. */
class GenericRegisterAccessorBase {
NON_COPYABLE(GenericRegisterAccessorBase);
NON_MOVEABLE(GenericRegisterAccessorBase);
private:
u64 m_value;
public:
constexpr ALWAYS_INLINE GenericRegisterAccessorBase(u64 v) : m_value(v) { /* ... */ }
protected:
constexpr ALWAYS_INLINE u64 GetValue() const {
return m_value;
}
constexpr ALWAYS_INLINE u64 GetBits(size_t offset, size_t count) const {
return (m_value >> offset) & ((1ul << count) - 1);
}
constexpr ALWAYS_INLINE void SetBits(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
m_value &= ~mask;
m_value |= (value & (mask >> offset)) << offset;
}
constexpr ALWAYS_INLINE void SetBitsDirect(size_t offset, size_t count, u64 value) {
const u64 mask = ((1ul << count) - 1) << offset;
m_value &= ~mask;
m_value |= (value & mask);
}
constexpr ALWAYS_INLINE void SetBit(size_t offset, bool enabled) {
const u64 mask = 1ul << offset;
if (enabled) {
m_value |= mask;
} else {
m_value &= ~mask;
}
}
};
template<typename Derived>
class GenericRegisterAccessor : public GenericRegisterAccessorBase {
public:
constexpr ALWAYS_INLINE GenericRegisterAccessor(u64 v) : GenericRegisterAccessorBase(v) { /* ... */ }
protected:
ALWAYS_INLINE void Store() const {
static_cast<const Derived *>(this)->Store();
}
};
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(name) class name##RegisterAccessor : public GenericRegisterAccessor<name##RegisterAccessor>
#define MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(accessor, reg_name) \
ALWAYS_INLINE accessor##RegisterAccessor() : GenericRegisterAccessor(MESOSPHERE_CPU_GET_SYSREG(reg_name)) { /* ... */ } \
constexpr ALWAYS_INLINE accessor##RegisterAccessor(u64 v) : GenericRegisterAccessor(v) { /* ... */ } \
\
ALWAYS_INLINE void Store() { const u64 v = this->GetValue(); MESOSPHERE_CPU_SET_SYSREG(reg_name, v); }
/* Accessors. */
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MemoryAccessIndirection) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MemoryAccessIndirection, mair_el1)
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(TranslationControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(TranslationControl, tcr_el1)
constexpr ALWAYS_INLINE size_t GetT0Size() const {
const size_t shift_value = this->GetBits(0, 6);
return size_t(1) << (size_t(64) - shift_value);
}
constexpr ALWAYS_INLINE size_t GetT1Size() const {
const size_t shift_value = this->GetBits(16, 6);
return size_t(1) << (size_t(64) - shift_value);
}
constexpr ALWAYS_INLINE bool GetEpd0() const {
return this->GetBits(7, 1) != 0;
}
constexpr ALWAYS_INLINE decltype(auto) SetEpd0(bool set) {
this->SetBit(7, set);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ArchitecturalFeatureAccessControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ArchitecturalFeatureAccessControl, cpacr_el1)
constexpr ALWAYS_INLINE decltype(auto) SetFpEnabled(bool en) {
if (en) {
this->SetBits(20, 2, 0x3);
} else {
this->SetBits(20, 2, 0x0);
}
return *this;
}
constexpr ALWAYS_INLINE bool IsFpEnabled() {
return this->GetBits(20, 2) != 0;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(DebugFeature) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(DebugFeature, id_aa64dfr0_el1)
constexpr ALWAYS_INLINE size_t GetNumWatchpoints() const {
return this->GetBits(20, 4);
}
constexpr ALWAYS_INLINE size_t GetNumBreakpoints() const {
return this->GetBits(12, 4);
}
constexpr ALWAYS_INLINE size_t GetNumContextAwareBreakpoints() const {
return this->GetBits(28, 4);
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MonitorDebugSystemControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MonitorDebugSystemControl, mdscr_el1)
constexpr ALWAYS_INLINE bool GetMde() const {
return this->GetBits(15, 1) != 0;
}
constexpr ALWAYS_INLINE size_t GetTdcc() const {
return this->GetBits(12, 1) != 0;
}
constexpr ALWAYS_INLINE bool GetSoftwareStep() const {
return this->GetBits(0, 1) != 0;
}
constexpr ALWAYS_INLINE decltype(auto) SetMde(bool set) {
this->SetBit(15, set);
return *this;
}
constexpr ALWAYS_INLINE decltype(auto) SetTdcc(bool set) {
this->SetBit(12, set);
return *this;
}
constexpr ALWAYS_INLINE decltype(auto) SetSoftwareStep(bool set) {
this->SetBit(0, set);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MultiprocessorAffinity) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MultiprocessorAffinity, mpidr_el1)
constexpr ALWAYS_INLINE u64 GetAff0() const {
return this->GetBits(0, 8);
}
constexpr ALWAYS_INLINE u64 GetAff1() const {
return this->GetBits(8, 8);
}
constexpr ALWAYS_INLINE u64 GetAff2() const {
return this->GetBits(16, 8);
}
constexpr ALWAYS_INLINE u64 GetAff3() const {
return this->GetBits(32, 8);
}
constexpr ALWAYS_INLINE u64 GetCpuOnArgument() const {
constexpr u64 Mask = 0x000000FF00FFFF00ul;
return this->GetValue() & Mask;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ThreadId) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ThreadId, tpidr_el1)
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(OsLockAccess) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(OsLockAccess, oslar_el1)
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(ContextId) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(ContextId, contextidr_el1)
constexpr ALWAYS_INLINE decltype(auto) SetProcId(u32 proc_id) {
this->SetBits(0, BITSIZEOF(proc_id), proc_id);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(MainId) {
public:
enum class Implementer {
ArmLimited = 0x41,
};
enum class PrimaryPartNumber {
CortexA53 = 0xD03,
CortexA57 = 0xD07,
};
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(MainId, midr_el1)
public:
constexpr ALWAYS_INLINE Implementer GetImplementer() const {
return static_cast<Implementer>(this->GetBits(24, 8));
}
constexpr ALWAYS_INLINE u64 GetVariant() const {
return this->GetBits(20, 4);
}
constexpr ALWAYS_INLINE u64 GetArchitecture() const {
return this->GetBits(16, 4);
}
constexpr ALWAYS_INLINE PrimaryPartNumber GetPrimaryPartNumber() const {
return static_cast<PrimaryPartNumber>(this->GetBits(4, 12));
}
constexpr ALWAYS_INLINE u64 GetRevision() const {
return this->GetBits(0, 4);
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(SystemControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(SystemControl, sctlr_el1)
constexpr ALWAYS_INLINE decltype(auto) SetWxn(bool en) {
this->SetBit(19, en);
return *this;
}
constexpr ALWAYS_INLINE bool GetWxn() const {
return this->GetBits(19, 1) != 0;
}
};
/* Accessors for timer registers. */
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerKernelControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerKernelControl, cntkctl_el1)
constexpr ALWAYS_INLINE decltype(auto) SetEl0PctEn(bool en) {
this->SetBit(0, en);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerControl, cntp_ctl_el0)
constexpr ALWAYS_INLINE decltype(auto) SetEnable(bool en) {
this->SetBit(0, en);
return *this;
}
constexpr ALWAYS_INLINE decltype(auto) SetIMask(bool en) {
this->SetBit(1, en);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalTimerCompareValue) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalTimerCompareValue, cntp_cval_el0)
constexpr ALWAYS_INLINE u64 GetCompareValue() {
return this->GetValue();
}
constexpr ALWAYS_INLINE decltype(auto) SetCompareValue(u64 value) {
this->SetBits(0, BITSIZEOF(value), value);
return *this;
}
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CounterTimerPhysicalCountValue) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CounterTimerPhysicalCountValue, cntpct_el0)
constexpr ALWAYS_INLINE u64 GetCount() {
return this->GetValue();
}
};
/* Accessors for cache registers. */
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheLineId) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheLineId, clidr_el1)
public:
constexpr ALWAYS_INLINE int GetLevelsOfCoherency() const {
return static_cast<int>(this->GetBits(24, 3));
}
constexpr ALWAYS_INLINE int GetLevelsOfUnification() const {
return static_cast<int>(this->GetBits(21, 3));
}
/* TODO: Other bitfield accessors? */
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(CacheSizeId) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(CacheSizeId, ccsidr_el1)
public:
constexpr ALWAYS_INLINE int GetNumberOfSets() const {
return static_cast<int>(this->GetBits(13, 15));
}
constexpr ALWAYS_INLINE int GetAssociativity() const {
return static_cast<int>(this->GetBits(3, 10));
}
constexpr ALWAYS_INLINE int GetLineSize() const {
return static_cast<int>(this->GetBits(0, 3));
}
/* TODO: Other bitfield accessors? */
};
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS(PerformanceMonitorsControl) {
public:
MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS(PerformanceMonitorsControl, pmcr_el0)
public:
constexpr ALWAYS_INLINE u64 GetN() const {
return this->GetBits(11, 5);
}
constexpr ALWAYS_INLINE decltype(auto) SetEventCounterReset(bool en) {
this->SetBit(1, en);
return *this;
}
constexpr ALWAYS_INLINE decltype(auto) SetCycleCounterReset(bool en) {
this->SetBit(2, en);
return *this;
}
/* TODO: Other bitfield accessors? */
};
#undef FOR_I_IN_0_TO_15
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS_FUNCTIONS
#undef MESOSPHERE_CPU_SYSREG_ACCESSOR_CLASS
#undef MESOSPHERE_CPU_DEFINE_SYSREG_ACCESSORS
#undef MESOSPHERE_CPU_GET_SYSREG
#undef MESOSPHERE_CPU_SET_SYSREG
}
| 20,102
|
C++
|
.h
| 392
| 40.201531
| 140
| 0.589117
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,859
|
kern_k_interrupt_manager.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_manager.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_spin_lock.hpp>
#include <mesosphere/kern_k_interrupt_task.hpp>
#include <mesosphere/kern_select_interrupt_controller.hpp>
namespace ams::kern::arch::arm64 {
class KInterruptManager {
NON_COPYABLE(KInterruptManager);
NON_MOVEABLE(KInterruptManager);
private:
struct KCoreLocalInterruptEntry {
KInterruptHandler *handler;
bool manually_cleared;
bool needs_clear;
u8 priority;
constexpr KCoreLocalInterruptEntry()
: handler(nullptr), manually_cleared(false), needs_clear(false), priority(KInterruptController::PriorityLevel_Low)
{
/* ... */
}
};
struct KGlobalInterruptEntry {
KInterruptHandler *handler;
bool manually_cleared;
bool needs_clear;
constexpr KGlobalInterruptEntry() : handler(nullptr), manually_cleared(false), needs_clear(false) { /* ... */ }
};
private:
KCoreLocalInterruptEntry m_core_local_interrupts[cpu::NumCores][KInterruptController::NumLocalInterrupts]{};
KInterruptController m_interrupt_controller{};
KInterruptController::LocalState m_local_states[cpu::NumCores]{};
bool m_local_state_saved[cpu::NumCores]{};
mutable KSpinLock m_global_interrupt_lock{};
KGlobalInterruptEntry m_global_interrupts[KInterruptController::NumGlobalInterrupts]{};
KInterruptController::GlobalState m_global_state{};
bool m_global_state_saved{};
private:
ALWAYS_INLINE KSpinLock &GetGlobalInterruptLock() const { return m_global_interrupt_lock; }
ALWAYS_INLINE KGlobalInterruptEntry &GetGlobalInterruptEntry(s32 irq) { return m_global_interrupts[KInterruptController::GetGlobalInterruptIndex(irq)]; }
ALWAYS_INLINE KCoreLocalInterruptEntry &GetLocalInterruptEntry(s32 irq) { return m_core_local_interrupts[GetCurrentCoreId()][KInterruptController::GetLocalInterruptIndex(irq)]; }
bool OnHandleInterrupt();
public:
constexpr KInterruptManager() = default;
NOINLINE void Initialize(s32 core_id);
NOINLINE void Finalize(s32 core_id);
NOINLINE void Save(s32 core_id);
NOINLINE void Restore(s32 core_id);
bool IsInterruptDefined(s32 irq) const {
return m_interrupt_controller.IsInterruptDefined(irq);
}
bool IsGlobal(s32 irq) const {
return m_interrupt_controller.IsGlobal(irq);
}
bool IsLocal(s32 irq) const {
return m_interrupt_controller.IsLocal(irq);
}
NOINLINE Result BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
NOINLINE Result UnbindHandler(s32 irq, s32 core);
NOINLINE Result ClearInterrupt(s32 irq, s32 core_id);
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
m_interrupt_controller.SendInterProcessorInterrupt(irq, core_mask);
}
ALWAYS_INLINE void SendInterProcessorInterrupt(s32 irq) {
m_interrupt_controller.SendInterProcessorInterrupt(irq);
}
static void HandleInterrupt(bool user_mode);
private:
Result BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level);
Result BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear);
Result UnbindGlobal(s32 irq);
Result UnbindLocal(s32 irq);
Result ClearGlobal(s32 irq);
Result ClearLocal(s32 irq);
private:
[[nodiscard]] static ALWAYS_INLINE u32 GetInterruptsEnabledState() {
u64 intr_state;
__asm__ __volatile__("mrs %[intr_state], daif\n"
"ubfx %[intr_state], %[intr_state], #7, #1"
: [intr_state]"=r"(intr_state)
:: "memory");
return intr_state;
}
public:
static ALWAYS_INLINE void EnableInterrupts() {
__asm__ __volatile__("msr daifclr, #2" ::: "memory");
}
static ALWAYS_INLINE void DisableInterrupts() {
__asm__ __volatile__("msr daifset, #2" ::: "memory");
}
[[nodiscard]] static ALWAYS_INLINE u32 GetInterruptsEnabledStateAndDisableInterrupts() {
const auto intr_state = GetInterruptsEnabledState();
DisableInterrupts();
return intr_state;
}
[[nodiscard]] static ALWAYS_INLINE u32 GetInterruptsEnabledStateAndEnableInterrupts() {
const auto intr_state = GetInterruptsEnabledState();
EnableInterrupts();
return intr_state;
}
static ALWAYS_INLINE void RestoreInterrupts(u32 intr_state) {
u64 tmp;
__asm__ __volatile__("mrs %[tmp], daif\n"
"bfi %[tmp], %[intr_state], #7, #1\n"
"msr daif, %[tmp]"
: [tmp]"=&r"(tmp)
: [intr_state]"r"(intr_state)
: "memory");
}
static ALWAYS_INLINE bool AreInterruptsEnabled() {
return GetInterruptsEnabledState() == 0;
}
};
}
| 6,508
|
C++
|
.h
| 129
| 37.108527
| 190
| 0.596508
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,860
|
kern_k_exception_context.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_exception_context.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
namespace ams::kern::arch::arm64 {
struct KExceptionContext {
u64 x[(30 - 0) + 1];
u64 sp;
u64 pc;
u32 psr;
u32 write;
u64 tpidr;
u64 reserved;
constexpr void GetSvcThreadContext(ams::svc::LastThreadContext *out) const {
if ((this->psr & 0x10) == 0) {
/* aarch64 thread. */
out->fp = this->x[29];
out->sp = this->sp;
out->lr = this->x[30];
out->pc = this->pc;
} else {
/* aarch32 thread. */
out->fp = this->x[11];
out->sp = this->x[13];
out->lr = this->x[14];
out->pc = this->pc;
}
}
};
static_assert(sizeof(KExceptionContext) == EXCEPTION_CONTEXT_SIZE);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 0]) == EXCEPTION_CONTEXT_X0);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 1]) == EXCEPTION_CONTEXT_X1);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 2]) == EXCEPTION_CONTEXT_X2);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 3]) == EXCEPTION_CONTEXT_X3);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 4]) == EXCEPTION_CONTEXT_X4);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 5]) == EXCEPTION_CONTEXT_X5);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 6]) == EXCEPTION_CONTEXT_X6);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 7]) == EXCEPTION_CONTEXT_X7);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 8]) == EXCEPTION_CONTEXT_X8);
static_assert(AMS_OFFSETOF(KExceptionContext, x[ 9]) == EXCEPTION_CONTEXT_X9);
static_assert(AMS_OFFSETOF(KExceptionContext, x[10]) == EXCEPTION_CONTEXT_X10);
static_assert(AMS_OFFSETOF(KExceptionContext, x[11]) == EXCEPTION_CONTEXT_X11);
static_assert(AMS_OFFSETOF(KExceptionContext, x[12]) == EXCEPTION_CONTEXT_X12);
static_assert(AMS_OFFSETOF(KExceptionContext, x[13]) == EXCEPTION_CONTEXT_X13);
static_assert(AMS_OFFSETOF(KExceptionContext, x[14]) == EXCEPTION_CONTEXT_X14);
static_assert(AMS_OFFSETOF(KExceptionContext, x[15]) == EXCEPTION_CONTEXT_X15);
static_assert(AMS_OFFSETOF(KExceptionContext, x[16]) == EXCEPTION_CONTEXT_X16);
static_assert(AMS_OFFSETOF(KExceptionContext, x[17]) == EXCEPTION_CONTEXT_X17);
static_assert(AMS_OFFSETOF(KExceptionContext, x[18]) == EXCEPTION_CONTEXT_X18);
static_assert(AMS_OFFSETOF(KExceptionContext, x[19]) == EXCEPTION_CONTEXT_X19);
static_assert(AMS_OFFSETOF(KExceptionContext, x[20]) == EXCEPTION_CONTEXT_X20);
static_assert(AMS_OFFSETOF(KExceptionContext, x[21]) == EXCEPTION_CONTEXT_X21);
static_assert(AMS_OFFSETOF(KExceptionContext, x[22]) == EXCEPTION_CONTEXT_X22);
static_assert(AMS_OFFSETOF(KExceptionContext, x[23]) == EXCEPTION_CONTEXT_X23);
static_assert(AMS_OFFSETOF(KExceptionContext, x[24]) == EXCEPTION_CONTEXT_X24);
static_assert(AMS_OFFSETOF(KExceptionContext, x[25]) == EXCEPTION_CONTEXT_X25);
static_assert(AMS_OFFSETOF(KExceptionContext, x[26]) == EXCEPTION_CONTEXT_X26);
static_assert(AMS_OFFSETOF(KExceptionContext, x[27]) == EXCEPTION_CONTEXT_X27);
static_assert(AMS_OFFSETOF(KExceptionContext, x[28]) == EXCEPTION_CONTEXT_X28);
static_assert(AMS_OFFSETOF(KExceptionContext, x[29]) == EXCEPTION_CONTEXT_X29);
static_assert(AMS_OFFSETOF(KExceptionContext, x[30]) == EXCEPTION_CONTEXT_X30);
static_assert(AMS_OFFSETOF(KExceptionContext, sp) == EXCEPTION_CONTEXT_SP);
static_assert(AMS_OFFSETOF(KExceptionContext, pc) == EXCEPTION_CONTEXT_PC);
static_assert(AMS_OFFSETOF(KExceptionContext, psr) == EXCEPTION_CONTEXT_PSR);
static_assert(AMS_OFFSETOF(KExceptionContext, tpidr) == EXCEPTION_CONTEXT_TPIDR);
}
| 4,457
|
C++
|
.h
| 79
| 49.873418
| 85
| 0.688843
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,861
|
kern_secure_monitor_base.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_secure_monitor_base.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
namespace ams::kern::arch::arm64::smc {
template<int SmcId>
void SecureMonitorCall(u64 *buf) {
/* Load arguments into registers. */
register u64 x0 asm("x0") = buf[0];
register u64 x1 asm("x1") = buf[1];
register u64 x2 asm("x2") = buf[2];
register u64 x3 asm("x3") = buf[3];
register u64 x4 asm("x4") = buf[4];
register u64 x5 asm("x5") = buf[5];
register u64 x6 asm("x6") = buf[6];
register u64 x7 asm("x7") = buf[7];
/* Backup the current thread pointer. */
const uintptr_t current_thread_pointer_value = cpu::GetCurrentThreadPointerValue();
/* Perform the call. */
__asm__ __volatile__("smc %c[smc_id]"
: "+r"(x0), "+r"(x1), "+r"(x2), "+r"(x3), "+r"(x4), "+r"(x5), "+r"(x6), "+r"(x7)
: [smc_id]"i"(SmcId)
: "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", "x16", "x17", "x18", "cc", "memory"
);
/* Restore the current thread pointer into X18. */
cpu::SetCurrentThreadPointerValue(current_thread_pointer_value);
/* Store arguments to output. */
buf[0] = x0;
buf[1] = x1;
buf[2] = x2;
buf[3] = x3;
buf[4] = x4;
buf[5] = x5;
buf[6] = x6;
buf[7] = x7;
}
enum PsciFunction {
PsciFunction_CpuSuspend = 0xC4000001,
PsciFunction_CpuOff = 0x84000002,
PsciFunction_CpuOn = 0xC4000003,
};
template<int SmcId>
u64 PsciCall(PsciFunction function, u64 x1 = 0, u64 x2 = 0, u64 x3 = 0, u64 x4 = 0, u64 x5 = 0, u64 x6 = 0, u64 x7 = 0) {
ams::svc::lp64::SecureMonitorArguments args = { { function, x1, x2, x3, x4, x5, x6, x7 } };
SecureMonitorCall<SmcId>(args.r);
return args.r[0];
}
template<int SmcId>
u64 CpuOn(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
return PsciCall<SmcId>(PsciFunction_CpuOn, core_id, entrypoint, arg);
}
}
| 2,784
|
C++
|
.h
| 66
| 34.666667
| 125
| 0.587583
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,862
|
kern_k_page_table_impl.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_impl.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#include <mesosphere/kern_k_memory_layout.hpp>
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
namespace ams::kern::arch::arm64 {
class KPageTableImpl {
NON_COPYABLE(KPageTableImpl);
NON_MOVEABLE(KPageTableImpl);
public:
struct TraversalEntry {
KPhysicalAddress phys_addr;
size_t block_size;
u8 sw_reserved_bits;
u8 attr;
constexpr bool IsHeadMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHead) != 0; }
constexpr bool IsHeadAndBodyMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadAndBody) != 0; }
constexpr bool IsTailMergeDisabled() const { return (this->sw_reserved_bits & PageTableEntry::SoftwareReservedBit_DisableMergeHeadTail) != 0; }
};
enum EntryLevel : u32 {
EntryLevel_L3 = 0,
EntryLevel_L2 = 1,
EntryLevel_L1 = 2,
EntryLevel_Count = 3,
};
struct TraversalContext {
PageTableEntry *level_entries[EntryLevel_Count];
EntryLevel level;
bool is_contiguous;
};
private:
static constexpr size_t PageBits = util::CountTrailingZeros(PageSize);
static constexpr size_t NumLevels = 3;
static constexpr size_t LevelBits = 9;
static_assert(NumLevels > 0);
template<size_t Offset, size_t Count>
static constexpr ALWAYS_INLINE u64 GetBits(u64 value) {
return (value >> Offset) & ((1ul << Count) - 1);
}
static constexpr ALWAYS_INLINE u64 GetBits(u64 value, size_t offset, size_t count) {
return (value >> offset) & ((1ul << count) - 1);
}
template<size_t Offset, size_t Count>
static constexpr ALWAYS_INLINE u64 SelectBits(u64 value) {
return value & (((1ul << Count) - 1) << Offset);
}
static constexpr ALWAYS_INLINE u64 SelectBits(u64 value, size_t offset, size_t count) {
return value & (((1ul << count) - 1) << offset);
}
static constexpr ALWAYS_INLINE uintptr_t GetL0Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 0), LevelBits>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL1Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 1), LevelBits>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL2Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 2), LevelBits>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL3Index(KProcessAddress addr) { return GetBits<PageBits + LevelBits * (NumLevels - 3), LevelBits>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1)>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2)>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3)>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL1Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 1) + 4>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL2Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 2) + 4>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetContiguousL3Offset(KProcessAddress addr) { return GetBits<0, PageBits + LevelBits * (NumLevels - 3) + 4>(GetInteger(addr)); }
static constexpr ALWAYS_INLINE uintptr_t GetBlock(const PageTableEntry *pte, EntryLevel level) { return SelectBits(pte->GetRawAttributesUnsafe(), PageBits + LevelBits * level, LevelBits * (NumLevels + 1 - level)); }
static constexpr ALWAYS_INLINE uintptr_t GetOffset(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), 0, PageBits + LevelBits * level); }
static ALWAYS_INLINE KVirtualAddress GetPageTableVirtualAddress(KPhysicalAddress addr) {
return KMemoryLayout::GetLinearVirtualAddress(addr);
}
public:
static constexpr ALWAYS_INLINE uintptr_t GetLevelIndex(KProcessAddress addr, EntryLevel level) { return GetBits(GetInteger(addr), PageBits + LevelBits * level, LevelBits); }
private:
L1PageTableEntry *m_table;
bool m_is_kernel;
u32 m_num_entries;
public:
ALWAYS_INLINE KVirtualAddress GetTableEntry(KVirtualAddress table, size_t index) const {
return table + index * sizeof(PageTableEntry);
}
ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KProcessAddress address) const {
return GetPointer<L1PageTableEntry>(GetTableEntry(KVirtualAddress(m_table), GetL1Index(address) & (m_num_entries - 1)));
}
ALWAYS_INLINE L2PageTableEntry *GetL2EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
return GetPointer<L2PageTableEntry>(GetTableEntry(table, GetL2Index(address)));
}
ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KProcessAddress address) const {
return GetL2EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
}
ALWAYS_INLINE L3PageTableEntry *GetL3EntryFromTable(KVirtualAddress table, KProcessAddress address) const {
return GetPointer<L3PageTableEntry>(GetTableEntry(table, GetL3Index(address)));
}
ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KProcessAddress address) const {
return GetL3EntryFromTable(KMemoryLayout::GetLinearVirtualAddress(entry->GetTable()), address);
}
static constexpr size_t GetBlockSize(EntryLevel level, bool contiguous = false) {
return 1 << (PageBits + LevelBits * level + 4 * contiguous);
}
public:
constexpr explicit KPageTableImpl(util::ConstantInitializeTag) : m_table(), m_is_kernel(), m_num_entries() { /* ... */ }
explicit KPageTableImpl() { /* ... */ }
size_t GetNumL1Entries() const { return m_num_entries; }
NOINLINE void InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end);
NOINLINE void InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end);
L1PageTableEntry *Finalize();
void Dump(uintptr_t start, size_t size) const;
size_t CountPageTables() const;
bool BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const;
bool ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const;
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress virt_addr) const;
static bool MergePages(KVirtualAddress *out, TraversalContext *context);
void SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const;
KProcessAddress GetAddressForContext(const TraversalContext *context) const {
KProcessAddress addr = m_is_kernel ? static_cast<uintptr_t>(-GetBlockSize(EntryLevel_L1)) * m_num_entries : 0;
for (u32 level = context->level; level <= EntryLevel_L1; ++level) {
addr += ((reinterpret_cast<uintptr_t>(context->level_entries[level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1)) << (PageBits + LevelBits * level);
}
return addr;
}
};
}
| 9,048
|
C++
|
.h
| 131
| 57.244275
| 227
| 0.666779
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,863
|
kern_k_interrupt_name.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_interrupt_name.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
namespace ams::kern::arch::arm64 {
namespace interrupt_name {
enum KInterruptName : s32 {
/* SGIs */
KInterruptName_ThreadTerminate = 0,
KInterruptName_CacheOperation = 1,
KInterruptName_Scheduler = 2,
KInterruptName_CoreBarrier = 3,
KInterruptName_PerformanceCounter = 4,
/* PPIs */
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
KInterruptName_VirtualMaintenance = 25,
KInterruptName_HypervisorTimer = 26,
KInterruptName_VirtualTimer = 27,
KInterruptName_LegacyNFiq = 28,
KInterruptName_SecurePhysicalTimer = 29,
KInterruptName_NonSecurePhysicalTimer = 30,
KInterruptName_LegacyNIrq = 31,
#elif defined(ATMOSPHERE_BOARD_QEMU_VIRT)
KInterruptName_VirtualTimer = 27,
KInterruptName_SecurePhysicalTimer = 29,
KInterruptName_NonSecurePhysicalTimer = 30,
#endif
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
KInterruptName_MemoryController = 109,
#endif
};
};
}
| 1,876
|
C++
|
.h
| 45
| 34.422222
| 76
| 0.636862
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,864
|
kern_k_supervisor_page_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_supervisor_page_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/arch/arm64/kern_k_page_table.hpp>
namespace ams::kern::arch::arm64 {
class KSupervisorPageTable {
private:
KPageTable m_page_table;
public:
constexpr KSupervisorPageTable() : m_page_table(util::ConstantInitialize) { /* ... */ }
NOINLINE void Initialize(s32 core_id);
void Activate() {
m_page_table.ActivateKernel();
}
void ActivateForInit() {
this->Activate();
/* Invalidate entire TLB. */
cpu::InvalidateEntireTlb();
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, region_start, region_num_pages, state, perm));
}
Result UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
R_RETURN(m_page_table.UnmapPages(address, num_pages, state));
}
Result MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(m_page_table.MapPageGroup(out_addr, pg, region_start, region_num_pages, state, perm));
}
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
}
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
return m_page_table.GetPhysicalAddress(out, address);
}
void DumpMemoryBlocks() const {
return m_page_table.DumpMemoryBlocks();
}
void DumpPageTable() const {
return m_page_table.DumpPageTable();
}
size_t CountPageTables() const {
return m_page_table.CountPageTables();
}
};
}
| 2,931
|
C++
|
.h
| 60
| 39.166667
| 219
| 0.64881
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,865
|
kern_k_process_page_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_process_page_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_common.hpp>
#include <mesosphere/arch/arm64/kern_k_page_table.hpp>
namespace ams::kern::arch::arm64 {
class KProcessPageTable {
private:
KPageTable m_page_table;
public:
void Activate(size_t process_index, u64 id) {
/* Activate the page table with the specified contextidr. */
m_page_table.ActivateProcess(process_index, id);
}
Result Initialize(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) {
R_RETURN(m_page_table.InitializeForProcess(flags, from_back, pool, code_address, code_size, system_resource, resource_limit, process_index));
}
void Finalize() { m_page_table.Finalize(); }
ALWAYS_INLINE KScopedLightLock AcquireDeviceMapLock() {
return m_page_table.AcquireDeviceMapLock();
}
Result SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
R_RETURN(m_page_table.SetMemoryPermission(addr, size, perm));
}
Result SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission perm) {
R_RETURN(m_page_table.SetProcessMemoryPermission(addr, size, perm));
}
Result SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
R_RETURN(m_page_table.SetMemoryAttribute(addr, size, mask, attr));
}
Result SetHeapSize(KProcessAddress *out, size_t size) {
R_RETURN(m_page_table.SetHeapSize(out, size));
}
Result SetMaxHeapSize(size_t size) {
R_RETURN(m_page_table.SetMaxHeapSize(size));
}
Result QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
R_RETURN(m_page_table.QueryInfo(out_info, out_page_info, addr));
}
Result QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
R_RETURN(m_page_table.QueryPhysicalAddress(out, address));
}
Result QueryStaticMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
R_RETURN(m_page_table.QueryStaticMapping(out, address, size));
}
Result QueryIoMapping(KProcessAddress *out, KPhysicalAddress address, size_t size) const {
R_RETURN(m_page_table.QueryIoMapping(out, address, size));
}
Result MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
R_RETURN(m_page_table.MapMemory(dst_address, src_address, size));
}
Result UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
R_RETURN(m_page_table.UnmapMemory(dst_address, src_address, size));
}
Result MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
R_RETURN(m_page_table.MapCodeMemory(dst_address, src_address, size));
}
Result UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
R_RETURN(m_page_table.UnmapCodeMemory(dst_address, src_address, size));
}
Result MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
R_RETURN(m_page_table.MapIo(phys_addr, size, perm));
}
Result MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
R_RETURN(m_page_table.MapIoRegion(dst_address, phys_addr, size, mapping, perm));
}
Result UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping) {
R_RETURN(m_page_table.UnmapIoRegion(dst_address, phys_addr, size, mapping));
}
Result MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
R_RETURN(m_page_table.MapStatic(phys_addr, size, perm));
}
Result MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
R_RETURN(m_page_table.MapRegion(region_type, perm));
}
Result MapInsecurePhysicalMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.MapInsecurePhysicalMemory(address, size));
}
Result UnmapInsecurePhysicalMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnmapInsecurePhysicalMemory(address, size));
}
Result MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
R_RETURN(m_page_table.MapPageGroup(addr, pg, state, perm));
}
Result UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
R_RETURN(m_page_table.UnmapPageGroup(address, pg, state));
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, KMemoryState state, KMemoryPermission perm) {
R_RETURN(m_page_table.MapPages(out_addr, num_pages, alignment, phys_addr, state, perm));
}
Result MapPages(KProcessAddress *out_addr, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(m_page_table.MapPages(out_addr, num_pages, state, perm));
}
Result MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
R_RETURN(m_page_table.MapPages(address, num_pages, state, perm));
}
Result UnmapPages(KProcessAddress addr, size_t num_pages, KMemoryState state) {
R_RETURN(m_page_table.UnmapPages(addr, num_pages, state));
}
Result MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
R_RETURN(m_page_table.MakeAndOpenPageGroup(out, address, num_pages, state_mask, state, perm_mask, perm, attr_mask, attr));
}
Result InvalidateProcessDataCache(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.InvalidateProcessDataCache(address, size));
}
Result InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.InvalidateCurrentProcessDataCache(address, size));
}
Result ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod) {
R_RETURN(m_page_table.ReadDebugMemory(buffer, address, size, force_debug_prod));
}
Result ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) {
R_RETURN(m_page_table.ReadDebugIoMemory(buffer, address, size, state));
}
Result WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
R_RETURN(m_page_table.WriteDebugMemory(address, buffer, size));
}
Result WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state) {
R_RETURN(m_page_table.WriteDebugIoMemory(address, buffer, size, state));
}
Result LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap) {
R_RETURN(m_page_table.LockForMapDeviceAddressSpace(out_is_io, address, size, perm, is_aligned, check_heap));
}
Result LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
R_RETURN(m_page_table.LockForUnmapDeviceAddressSpace(address, size, check_heap));
}
Result UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnlockForDeviceAddressSpace(address, size));
}
Result UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnlockForDeviceAddressSpacePartialMap(address, size));
}
Result OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
R_RETURN(m_page_table.OpenMemoryRangeForMapDeviceAddressSpace(out, address, size, perm, is_aligned));
}
Result OpenMemoryRangeForUnmapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.OpenMemoryRangeForUnmapDeviceAddressSpace(out, address, size));
}
Result LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.LockForIpcUserBuffer(out, address, size));
}
Result UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnlockForIpcUserBuffer(address, size));
}
Result LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
R_RETURN(m_page_table.LockForTransferMemory(out, address, size, perm));
}
Result UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
R_RETURN(m_page_table.UnlockForTransferMemory(address, size, pg));
}
Result LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.LockForCodeMemory(out, address, size));
}
Result UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
R_RETURN(m_page_table.UnlockForCodeMemory(address, size, pg));
}
Result OpenMemoryRangeForProcessCacheOperation(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size) {
R_RETURN(m_page_table.OpenMemoryRangeForProcessCacheOperation(out, address, size));
}
Result CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
R_RETURN(m_page_table.CopyMemoryFromLinearToUser(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
}
Result CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
R_RETURN(m_page_table.CopyMemoryFromLinearToKernel(dst_addr, size, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
}
Result CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
R_RETURN(m_page_table.CopyMemoryFromUserToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr));
}
Result CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
R_RETURN(m_page_table.CopyMemoryFromKernelToLinear(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr));
}
Result CopyMemoryFromHeapToHeap(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
R_RETURN(m_page_table.CopyMemoryFromHeapToHeap(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
}
Result CopyMemoryFromHeapToHeapWithoutCheckDestination(KProcessPageTable &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
R_RETURN(m_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table.m_page_table, dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr, src_addr, src_state_mask, src_state, src_test_perm, src_attr_mask, src_attr));
}
Result SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KProcessPageTable &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
R_RETURN(m_page_table.SetupForIpc(out_dst_addr, size, src_addr, src_page_table.m_page_table, test_perm, dst_state, send));
}
Result CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
R_RETURN(m_page_table.CleanupForIpcServer(address, size, dst_state));
}
Result CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
R_RETURN(m_page_table.CleanupForIpcClient(address, size, dst_state));
}
Result MapPhysicalMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.MapPhysicalMemory(address, size));
}
Result UnmapPhysicalMemory(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnmapPhysicalMemory(address, size));
}
Result MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.MapPhysicalMemoryUnsafe(address, size));
}
Result UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
R_RETURN(m_page_table.UnmapPhysicalMemoryUnsafe(address, size));
}
Result UnmapProcessMemory(KProcessAddress dst_address, size_t size, KProcessPageTable &src_page_table, KProcessAddress src_address) {
R_RETURN(m_page_table.UnmapProcessMemory(dst_address, size, src_page_table.m_page_table, src_address));
}
void DumpMemoryBlocks() const {
return m_page_table.DumpMemoryBlocks();
}
void DumpPageTable() const {
return m_page_table.DumpPageTable();
}
size_t CountPageTables() const {
return m_page_table.CountPageTables();
}
bool GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
return m_page_table.GetPhysicalAddress(out, address);
}
bool Contains(KProcessAddress addr, size_t size) const { return m_page_table.Contains(addr, size); }
bool IsInAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInAliasRegion(addr, size); }
bool IsInUnsafeAliasRegion(KProcessAddress addr, size_t size) const { return m_page_table.IsInUnsafeAliasRegion(addr, size); }
bool CanContain(KProcessAddress addr, size_t size, KMemoryState state) const { return m_page_table.CanContain(addr, size, state); }
bool CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const { return m_page_table.CanContain(addr, size, state); }
KProcessAddress GetAddressSpaceStart() const { return m_page_table.GetAddressSpaceStart(); }
KProcessAddress GetHeapRegionStart() const { return m_page_table.GetHeapRegionStart(); }
KProcessAddress GetAliasRegionStart() const { return m_page_table.GetAliasRegionStart(); }
KProcessAddress GetStackRegionStart() const { return m_page_table.GetStackRegionStart(); }
KProcessAddress GetKernelMapRegionStart() const { return m_page_table.GetKernelMapRegionStart(); }
KProcessAddress GetAliasCodeRegionStart() const { return m_page_table.GetAliasCodeRegionStart(); }
size_t GetAddressSpaceSize() const { return m_page_table.GetAddressSpaceSize(); }
size_t GetHeapRegionSize() const { return m_page_table.GetHeapRegionSize(); }
size_t GetAliasRegionSize() const { return m_page_table.GetAliasRegionSize(); }
size_t GetStackRegionSize() const { return m_page_table.GetStackRegionSize(); }
size_t GetKernelMapRegionSize() const { return m_page_table.GetKernelMapRegionSize(); }
size_t GetAliasCodeRegionSize() const { return m_page_table.GetAliasCodeRegionSize(); }
size_t GetAliasRegionExtraSize() const { return m_page_table.GetAliasRegionExtraSize(); }
size_t GetNormalMemorySize() const { return m_page_table.GetNormalMemorySize(); }
size_t GetCodeSize() const { return m_page_table.GetCodeSize(); }
size_t GetCodeDataSize() const { return m_page_table.GetCodeDataSize(); }
size_t GetAliasCodeSize() const { return m_page_table.GetAliasCodeSize(); }
size_t GetAliasCodeDataSize() const { return m_page_table.GetAliasCodeDataSize(); }
u32 GetAllocateOption() const { return m_page_table.GetAllocateOption(); }
KPhysicalAddress GetHeapPhysicalAddress(KVirtualAddress address) const {
return m_page_table.GetHeapPhysicalAddress(address);
}
KVirtualAddress GetHeapVirtualAddress(KPhysicalAddress address) const {
return m_page_table.GetHeapVirtualAddress(address);
}
KBlockInfoManager *GetBlockInfoManager() {
return m_page_table.GetBlockInfoManager();
}
KPageTableBase &GetBasePageTable() {
return m_page_table;
}
};
}
| 19,638
|
C++
|
.h
| 264
| 60.924242
| 370
| 0.670399
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,866
|
kern_k_init_page_table.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_common.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/arch/arm64/kern_k_page_table_entry.hpp>
#include <mesosphere/kern_select_system_control.hpp>
namespace ams::kern::arch::arm64::init {
/* NOTE: Nintendo uses virtual functions, rather than a concept + template. */
template<typename T>
concept IsInitialPageAllocator = requires (T &t, KPhysicalAddress phys_addr, size_t size) {
{ t.Allocate(size) } -> std::same_as<KPhysicalAddress>;
{ t.Free(phys_addr, size) } -> std::same_as<void>;
};
class KInitialPageTable {
private:
KPhysicalAddress m_l1_tables[2];
u32 m_num_entries[2];
public:
template<IsInitialPageAllocator PageAllocator>
KInitialPageTable(KVirtualAddress start_address, KVirtualAddress end_address, PageAllocator &allocator) {
/* Set tables. */
m_l1_tables[0] = AllocateNewPageTable(allocator, 0);
m_l1_tables[1] = AllocateNewPageTable(allocator, 0);
/* Set counts. */
m_num_entries[0] = MaxPageTableEntries;
m_num_entries[1] = ((end_address / L1BlockSize) & (MaxPageTableEntries - 1)) - ((start_address / L1BlockSize) & (MaxPageTableEntries - 1)) + 1;
}
KInitialPageTable() {
/* Set tables. */
m_l1_tables[0] = util::AlignDown(cpu::GetTtbr0El1(), PageSize);
m_l1_tables[1] = util::AlignDown(cpu::GetTtbr1El1(), PageSize);
/* Set counts. */
cpu::TranslationControlRegisterAccessor tcr;
m_num_entries[0] = tcr.GetT0Size() / L1BlockSize;
m_num_entries[1] = tcr.GetT1Size() / L1BlockSize;
/* Check counts. */
MESOSPHERE_INIT_ABORT_UNLESS(0 < m_num_entries[0] && m_num_entries[0] <= MaxPageTableEntries);
MESOSPHERE_INIT_ABORT_UNLESS(0 < m_num_entries[1] && m_num_entries[1] <= MaxPageTableEntries);
}
constexpr ALWAYS_INLINE uintptr_t GetTtbr0L1TableAddress() const {
return GetInteger(m_l1_tables[0]);
}
constexpr ALWAYS_INLINE uintptr_t GetTtbr1L1TableAddress() const {
return GetInteger(m_l1_tables[1]);
}
private:
constexpr ALWAYS_INLINE L1PageTableEntry *GetL1Entry(KVirtualAddress address, u64 phys_to_virt_offset = 0) const {
const size_t index = (GetInteger(address) >> (BITSIZEOF(address) - 1)) & 1;
L1PageTableEntry *l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[index]) + phys_to_virt_offset);
return l1_table + ((GetInteger(address) / L1BlockSize) & (m_num_entries[index] - 1));
}
static constexpr ALWAYS_INLINE L2PageTableEntry *GetL2Entry(const L1PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) {
L2PageTableEntry *l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(entry->GetTable()) + phys_to_virt_offset);
return l2_table + ((GetInteger(address) / L2BlockSize) & (MaxPageTableEntries - 1));
}
static constexpr ALWAYS_INLINE L3PageTableEntry *GetL3Entry(const L2PageTableEntry *entry, KVirtualAddress address, u64 phys_to_virt_offset = 0) {
L3PageTableEntry *l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(entry->GetTable()) + phys_to_virt_offset);
return l3_table + ((GetInteger(address) / L3BlockSize) & (MaxPageTableEntries - 1));
}
template<IsInitialPageAllocator PageAllocator>
static ALWAYS_INLINE KPhysicalAddress AllocateNewPageTable(PageAllocator &allocator, u64 phys_to_virt_offset) {
MESOSPHERE_UNUSED(phys_to_virt_offset);
return allocator.Allocate(PageSize);
}
static ALWAYS_INLINE void ClearNewPageTable(KPhysicalAddress address, u64 phys_to_virt_offset) {
/* Convert to a deferenceable address, and clear. */
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(GetInteger(address) + phys_to_virt_offset);
for (size_t i = 0; i < PageSize / sizeof(u64); ++i) {
ptr[i] = 0;
}
}
public:
static consteval size_t GetMaximumOverheadSize(size_t size) {
return (util::DivideUp(size, L1BlockSize) + util::DivideUp(size, L2BlockSize)) * PageSize;
}
private:
size_t NOINLINE GetBlockCount(KVirtualAddress virt_addr, size_t size, size_t block_size) {
const KVirtualAddress end_virt_addr = virt_addr + size;
size_t count = 0;
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
virt_addr += L1BlockSize;
if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) {
count++;
}
continue;
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
virt_addr += advance_size;
if (l2_entry->IsMappedBlock() && block_size == advance_size) {
count++;
}
continue;
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty());
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
virt_addr += advance_size;
if (l3_entry->IsMappedBlock() && block_size == advance_size) {
count++;
}
}
return count;
}
KVirtualAddress NOINLINE GetBlockByIndex(KVirtualAddress virt_addr, size_t size, size_t block_size, size_t index) {
const KVirtualAddress end_virt_addr = virt_addr + size;
size_t count = 0;
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) {
if ((count++) == index) {
return virt_addr;
}
}
virt_addr += L1BlockSize;
continue;
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
if (l2_entry->IsMappedBlock() && block_size == advance_size) {
if ((count++) == index) {
return virt_addr;
}
}
virt_addr += advance_size;
continue;
}
/* Non empty and non-block must be table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty());
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
if (l3_entry->IsMappedBlock() && block_size == advance_size) {
if ((count++) == index) {
return virt_addr;
}
}
virt_addr += advance_size;
}
return Null<KVirtualAddress>;
}
PageTableEntry *GetMappingEntry(KVirtualAddress virt_addr, size_t block_size) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
if (l1_entry->IsMappedBlock()) {
MESOSPHERE_INIT_ABORT_UNLESS(block_size == L1BlockSize);
return l1_entry;
}
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock()) {
const size_t real_size = (l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size);
return l2_entry;
}
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
const size_t real_size = (l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(real_size == block_size);
return l3_entry;
}
void NOINLINE SwapBlocks(KVirtualAddress src_virt_addr, KVirtualAddress dst_virt_addr, size_t block_size, bool do_copy) {
static_assert(L2ContiguousBlockSize / L2BlockSize == L3ContiguousBlockSize / L3BlockSize);
const bool contig = (block_size == L2ContiguousBlockSize || block_size == L3ContiguousBlockSize);
const size_t num_mappings = contig ? L2ContiguousBlockSize / L2BlockSize : 1;
/* Unmap the source. */
PageTableEntry *src_entry = this->GetMappingEntry(src_virt_addr, block_size);
const auto src_saved = *src_entry;
for (size_t i = 0; i < num_mappings; i++) {
src_entry[i] = InvalidPageTableEntry;
}
/* Unmap the target. */
PageTableEntry *dst_entry = this->GetMappingEntry(dst_virt_addr, block_size);
const auto dst_saved = *dst_entry;
for (size_t i = 0; i < num_mappings; i++) {
dst_entry[i] = InvalidPageTableEntry;
}
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
/* Copy data, if we should. */
const u64 negative_block_size_for_mask = static_cast<u64>(-static_cast<s64>(block_size));
const u64 offset_mask = negative_block_size_for_mask & ((1ul << 48) - 1);
const KVirtualAddress copy_src_addr = KVirtualAddress(src_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
const KVirtualAddress copy_dst_addr = KVirtualAddress(dst_saved.GetRawAttributesUnsafeForSwap() & offset_mask);
if (do_copy) {
u8 tmp[0x100];
for (size_t ofs = 0; ofs < block_size; ofs += sizeof(tmp)) {
std::memcpy(tmp, GetVoidPointer(copy_src_addr + ofs), sizeof(tmp));
std::memcpy(GetVoidPointer(copy_src_addr + ofs), GetVoidPointer(copy_dst_addr + ofs), sizeof(tmp));
std::memcpy(GetVoidPointer(copy_dst_addr + ofs), tmp, sizeof(tmp));
}
cpu::DataSynchronizationBarrierInnerShareable();
}
/* Swap the mappings. */
const u64 attr_preserve_mask = (block_size - 1) | 0xFFFF000000000000ul;
const size_t shift_for_contig = contig ? 4 : 0;
size_t advanced_size = 0;
const u64 src_attr_val = src_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask;
const u64 dst_attr_val = dst_saved.GetRawAttributesUnsafeForSwap() & attr_preserve_mask;
for (size_t i = 0; i < num_mappings; i++) {
reinterpret_cast<u64 *>(src_entry)[i] = GetInteger(copy_dst_addr + (advanced_size >> shift_for_contig)) | src_attr_val;
reinterpret_cast<u64 *>(dst_entry)[i] = GetInteger(copy_src_addr + (advanced_size >> shift_for_contig)) | dst_attr_val;
advanced_size += block_size;
}
cpu::DataSynchronizationBarrierInnerShareable();
}
void NOINLINE PhysicallyRandomize(KVirtualAddress virt_addr, size_t size, size_t block_size, bool do_copy) {
const size_t block_count = this->GetBlockCount(virt_addr, size, block_size);
if (block_count > 1) {
for (size_t cur_block = 0; cur_block < block_count; cur_block++) {
const size_t target_block = KSystemControl::Init::GenerateRandomRange(cur_block, block_count - 1);
if (cur_block != target_block) {
const KVirtualAddress cur_virt_addr = this->GetBlockByIndex(virt_addr, size, block_size, cur_block);
const KVirtualAddress target_virt_addr = this->GetBlockByIndex(virt_addr, size, block_size, target_block);
MESOSPHERE_INIT_ABORT_UNLESS(cur_virt_addr != Null<KVirtualAddress>);
MESOSPHERE_INIT_ABORT_UNLESS(target_virt_addr != Null<KVirtualAddress>);
this->SwapBlocks(cur_virt_addr, target_virt_addr, block_size, do_copy);
}
}
}
}
public:
template<IsInitialPageAllocator PageAllocator>
void NOINLINE Map(KVirtualAddress virt_addr, size_t size, KPhysicalAddress phys_addr, const PageTableEntry &attr, PageAllocator &allocator, u64 phys_to_virt_offset) {
/* Ensure that addresses and sizes are page aligned. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(phys_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
/* Iteratively map pages until the requested region is mapped. */
while (size > 0) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr, phys_to_virt_offset);
/* Can we make an L1 block? */
if (util::IsAligned(GetInteger(virt_addr), L1BlockSize) && util::IsAligned(GetInteger(phys_addr), L1BlockSize) && size >= L1BlockSize) {
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
virt_addr += L1BlockSize;
phys_addr += L1BlockSize;
size -= L1BlockSize;
continue;
}
/* If we don't already have an L2 table, we need to make a new one. */
if (!l1_entry->IsMappedTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
cpu::DataSynchronizationBarrierInnerShareable();
*l1_entry = L1PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
}
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr, phys_to_virt_offset);
/* Can we make a contiguous L2 block? */
if (util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L2ContiguousBlockSize) && size >= L2ContiguousBlockSize) {
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true);
virt_addr += L2BlockSize;
phys_addr += L2BlockSize;
size -= L2BlockSize;
}
continue;
}
/* Can we make an L2 block? */
if (util::IsAligned(GetInteger(virt_addr), L2BlockSize) && util::IsAligned(GetInteger(phys_addr), L2BlockSize) && size >= L2BlockSize) {
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
virt_addr += L2BlockSize;
phys_addr += L2BlockSize;
size -= L2BlockSize;
continue;
}
/* If we don't already have an L3 table, we need to make a new one. */
if (!l2_entry->IsMappedTable()) {
KPhysicalAddress new_table = AllocateNewPageTable(allocator, phys_to_virt_offset);
cpu::DataSynchronizationBarrierInnerShareable();
*l2_entry = L2PageTableEntry(PageTableEntry::TableTag{}, new_table, attr.IsPrivilegedExecuteNever());
}
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr, phys_to_virt_offset);
/* Can we make a contiguous L3 block? */
if (util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize) && util::IsAligned(GetInteger(phys_addr), L3ContiguousBlockSize) && size >= L3ContiguousBlockSize) {
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, true);
virt_addr += L3BlockSize;
phys_addr += L3BlockSize;
size -= L3BlockSize;
}
continue;
}
/* Make an L3 block. */
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, attr, PageTableEntry::SoftwareReservedBit_None, false);
virt_addr += L3BlockSize;
phys_addr += L3BlockSize;
size -= L3BlockSize;
}
/* Ensure data consistency after our mapping is added. */
cpu::DataSynchronizationBarrierInnerShareable();
}
void UnmapTtbr0Entries(u64 phys_to_virt_offset) {
/* Ensure data consistency before we unmap. */
cpu::DataSynchronizationBarrierInnerShareable();
/* Define helper, as we only want to clear non-nGnRE pages. */
constexpr auto ShouldUnmap = [](const PageTableEntry *entry) ALWAYS_INLINE_LAMBDA -> bool {
return entry->GetPageAttribute() != PageTableEntry::PageAttribute_Device_nGnRE;
};
/* Iterate all L1 entries. */
L1PageTableEntry * const l1_table = reinterpret_cast<L1PageTableEntry *>(GetInteger(m_l1_tables[0]) + phys_to_virt_offset);
for (size_t l1_index = 0; l1_index < m_num_entries[0]; l1_index++) {
/* Get L1 entry. */
L1PageTableEntry * const l1_entry = l1_table + l1_index;
if (l1_entry->IsMappedBlock()) {
/* Unmap the L1 entry, if we should. */
if (ShouldUnmap(l1_entry)) {
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
}
} else if (l1_entry->IsMappedTable()) {
/* Get the L2 table. */
L2PageTableEntry * const l2_table = reinterpret_cast<L2PageTableEntry *>(GetInteger(l1_entry->GetTable()) + phys_to_virt_offset);
/* Unmap all L2 entries, as relevant. */
size_t remaining_l2_entries = 0;
for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) {
/* Get L2 entry. */
L2PageTableEntry * const l2_entry = l2_table + l2_index;
if (l2_entry->IsMappedBlock()) {
const size_t num_to_clear = (l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize) / L2BlockSize;
if (ShouldUnmap(l2_entry)) {
for (size_t i = 0; i < num_to_clear; ++i) {
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
}
} else {
remaining_l2_entries += num_to_clear;
}
l2_index = l2_index + num_to_clear - 1;
} else if (l2_entry->IsMappedTable()) {
/* Get the L3 table. */
L3PageTableEntry * const l3_table = reinterpret_cast<L3PageTableEntry *>(GetInteger(l2_entry->GetTable()) + phys_to_virt_offset);
/* Unmap all L3 entries, as relevant. */
size_t remaining_l3_entries = 0;
for (size_t l3_index = 0; l3_index < MaxPageTableEntries; ++l3_index) {
/* Get L3 entry. */
if (L3PageTableEntry * const l3_entry = l3_table + l3_index; l3_entry->IsMappedBlock()) {
const size_t num_to_clear = (l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize) / L3BlockSize;
if (ShouldUnmap(l3_entry)) {
for (size_t i = 0; i < num_to_clear; ++i) {
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
}
} else {
remaining_l3_entries += num_to_clear;
}
l3_index = l3_index + num_to_clear - 1;
}
}
/* If we unmapped all L3 entries, clear the L2 entry. */
if (remaining_l3_entries == 0) {
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
} else {
remaining_l2_entries++;
}
}
}
/* If we unmapped all L2 entries, clear the L1 entry. */
if (remaining_l2_entries == 0) {
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
}
}
}
/* Invalidate the entire tlb. */
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
}
KPhysicalAddress GetPhysicalAddress(KVirtualAddress virt_addr) const {
/* Get the L1 entry. */
const L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
if (l1_entry->IsMappedBlock()) {
return l1_entry->GetBlock() + (GetInteger(virt_addr) & (L1BlockSize - 1));
}
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
/* Get the L2 entry. */
const L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock()) {
return l2_entry->GetBlock() + (GetInteger(virt_addr) & (L2BlockSize - 1));
}
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* Get the L3 entry. */
const L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
return l3_entry->GetBlock() + (GetInteger(virt_addr) & (L3BlockSize - 1));
}
KPhysicalAddress GetPhysicalAddressOfRandomizedRange(KVirtualAddress virt_addr, size_t size) const {
/* Define tracking variables for ourselves to use. */
KPhysicalAddress min_phys_addr = Null<KPhysicalAddress>;
KPhysicalAddress max_phys_addr = Null<KPhysicalAddress>;
/* Ensure the range we're querying is valid. */
const KVirtualAddress end_virt_addr = virt_addr + size;
if (virt_addr > end_virt_addr) {
MESOSPHERE_INIT_ABORT_UNLESS(size == 0);
return min_phys_addr;
}
auto UpdateExtents = [&](const KPhysicalAddress block, size_t block_size) ALWAYS_INLINE_LAMBDA {
/* Ensure that we are allowed to have the block here. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), block_size));
MESOSPHERE_INIT_ABORT_UNLESS(block_size <= GetInteger(end_virt_addr) - GetInteger(virt_addr));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), block_size));
MESOSPHERE_INIT_ABORT_UNLESS(size >= block_size);
const KPhysicalAddress block_end = block + block_size;
/* We want to update min phys addr when it's 0 or > block. */
/* This is equivalent in two's complement to (n - 1) >= block. */
if ((GetInteger(min_phys_addr) - 1) >= GetInteger(block)) {
min_phys_addr = block;
}
/* Update max phys addr when it's 0 or < block_end. */
if (GetInteger(max_phys_addr) < GetInteger(block_end) || GetInteger(max_phys_addr) == 0) {
max_phys_addr = block_end;
}
/* Traverse onwards. */
virt_addr += block_size;
};
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped, update. */
if (l1_entry->IsMappedBlock()) {
UpdateExtents(l1_entry->GetBlock(), L1BlockSize);
continue;
}
/* Not a block, so we must have a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock()) {
UpdateExtents(l2_entry->GetBlock(), l2_entry->IsContiguous() ? L2ContiguousBlockSize : L2BlockSize);
continue;
}
/* Not a block, so we must have a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* We must have a mapped l3 entry to inspect. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
UpdateExtents(l3_entry->GetBlock(), l3_entry->IsContiguous() ? L3ContiguousBlockSize : L3BlockSize);
}
/* Ensure we got the right range. */
MESOSPHERE_INIT_ABORT_UNLESS(GetInteger(max_phys_addr) - GetInteger(min_phys_addr) == size);
/* Write the address that we found. */
return min_phys_addr;
}
bool IsFree(KVirtualAddress virt_addr, size_t size) {
/* Ensure that addresses and sizes are page aligned. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
const KVirtualAddress end_virt_addr = virt_addr + size;
while (virt_addr < end_virt_addr) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped, the address isn't free. */
if (l1_entry->IsMappedBlock()) {
return false;
}
if (!l1_entry->IsMappedTable()) {
/* Not a table, so just move to check the next region. */
virt_addr = util::AlignDown(GetInteger(virt_addr) + L1BlockSize, L1BlockSize);
continue;
}
/* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock()) {
return false;
}
if (!l2_entry->IsMappedTable()) {
/* Not a table, so just move to check the next region. */
virt_addr = util::AlignDown(GetInteger(virt_addr) + L2BlockSize, L2BlockSize);
continue;
}
/* Table, so check if we're mapped in L3. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
if (l3_entry->IsMappedBlock()) {
return false;
}
/* Not a block, so move on to check the next page. */
virt_addr = util::AlignDown(GetInteger(virt_addr) + L3BlockSize, L3BlockSize);
}
return true;
}
void Reprotect(KVirtualAddress virt_addr, size_t size, const PageTableEntry &attr_before, const PageTableEntry &attr_after) {
/* Ensure that addresses and sizes are page aligned. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(size, PageSize));
/* Iteratively reprotect pages until the requested region is reprotected. */
while (size > 0) {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* Check if an L1 block is present. */
if (l1_entry->IsMappedBlock()) {
/* Ensure that we are allowed to have an L1 block here. */
const KPhysicalAddress block = l1_entry->GetBlock();
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(size >= L1BlockSize);
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, false));
/* Invalidate the existing L1 block. */
*static_cast<PageTableEntry *>(l1_entry) = InvalidPageTableEntry;
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
/* Create new L1 block. */
*l1_entry = L1PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, PageTableEntry::SoftwareReservedBit_None, false);
virt_addr += L1BlockSize;
size -= L1BlockSize;
continue;
}
/* Not a block, so we must be a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l1_entry->IsMappedTable());
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock()) {
const KPhysicalAddress block = l2_entry->GetBlock();
if (l2_entry->IsContiguous()) {
/* Ensure that we are allowed to have a contiguous L2 block here. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(size >= L2ContiguousBlockSize);
/* Invalidate the existing contiguous L2 block. */
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
/* Ensure that the entry is valid. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry[i].IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, true));
static_cast<PageTableEntry *>(l2_entry)[i] = InvalidPageTableEntry;
}
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
/* Create a new contiguous L2 block. */
for (size_t i = 0; i < L2ContiguousBlockSize / L2BlockSize; i++) {
l2_entry[i] = L2PageTableEntry(PageTableEntry::BlockTag{}, block + L2BlockSize * i, attr_after, PageTableEntry::SoftwareReservedBit_None, true);
}
virt_addr += L2ContiguousBlockSize;
size -= L2ContiguousBlockSize;
} else {
/* Ensure that we are allowed to have an L2 block here. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L2BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L2BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(size >= L2BlockSize);
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, false));
/* Invalidate the existing L2 block. */
*static_cast<PageTableEntry *>(l2_entry) = InvalidPageTableEntry;
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
/* Create new L2 block. */
*l2_entry = L2PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, PageTableEntry::SoftwareReservedBit_None, false);
virt_addr += L2BlockSize;
size -= L2BlockSize;
}
continue;
}
/* Not a block, so we must be a table. */
MESOSPHERE_INIT_ABORT_UNLESS(l2_entry->IsMappedTable());
/* We must have a mapped l3 entry to reprotect. */
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock());
const KPhysicalAddress block = l3_entry->GetBlock();
if (l3_entry->IsContiguous()) {
/* Ensure that we are allowed to have a contiguous L3 block here. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3ContiguousBlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(size >= L3ContiguousBlockSize);
/* Invalidate the existing contiguous L3 block. */
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
/* Ensure that the entry is valid. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry[i].IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, true));
static_cast<PageTableEntry *>(l3_entry)[i] = InvalidPageTableEntry;
}
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
/* Create a new contiguous L3 block. */
for (size_t i = 0; i < L3ContiguousBlockSize / L3BlockSize; i++) {
l3_entry[i] = L3PageTableEntry(PageTableEntry::BlockTag{}, block + L3BlockSize * i, attr_after, PageTableEntry::SoftwareReservedBit_None, true);
}
virt_addr += L3ContiguousBlockSize;
size -= L3ContiguousBlockSize;
} else {
/* Ensure that we are allowed to have an L3 block here. */
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L3BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(block), L3BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(size >= L3BlockSize);
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsCompatibleWithAttribute(attr_before, PageTableEntry::SoftwareReservedBit_None, false));
/* Invalidate the existing L3 block. */
*static_cast<PageTableEntry *>(l3_entry) = InvalidPageTableEntry;
cpu::DataSynchronizationBarrierInnerShareable();
cpu::InvalidateEntireTlb();
/* Create new L3 block. */
*l3_entry = L3PageTableEntry(PageTableEntry::BlockTag{}, block, attr_after, PageTableEntry::SoftwareReservedBit_None, false);
virt_addr += L3BlockSize;
size -= L3BlockSize;
}
}
/* Ensure data consistency after we complete reprotection. */
cpu::DataSynchronizationBarrierInnerShareable();
}
void PhysicallyRandomize(KVirtualAddress virt_addr, size_t size, bool do_copy) {
this->PhysicallyRandomize(virt_addr, size, L1BlockSize, do_copy);
this->PhysicallyRandomize(virt_addr, size, L2ContiguousBlockSize, do_copy);
this->PhysicallyRandomize(virt_addr, size, L2BlockSize, do_copy);
this->PhysicallyRandomize(virt_addr, size, L3ContiguousBlockSize, do_copy);
this->PhysicallyRandomize(virt_addr, size, L3BlockSize, do_copy);
cpu::StoreCacheForInit(GetVoidPointer(virt_addr), size);
}
};
class KInitialPageAllocator final {
private:
static constexpr inline size_t FreeUnitSize = BITSIZEOF(u64) * PageSize;
struct FreeListEntry {
FreeListEntry *next;
size_t size;
};
public:
struct State {
uintptr_t start_address;
uintptr_t end_address;
FreeListEntry *free_head;
};
private:
State m_state;
public:
constexpr ALWAYS_INLINE KInitialPageAllocator() : m_state{} { /* ... */ }
ALWAYS_INLINE void Initialize(uintptr_t address) {
m_state.start_address = address;
m_state.end_address = address;
}
ALWAYS_INLINE void InitializeFromState(const State *state) {
m_state = *state;
}
ALWAYS_INLINE void GetFinalState(State *out) {
*out = m_state;
m_state = {};
}
private:
bool CanAllocate(size_t align, size_t size) const {
for (auto *cur = m_state.free_head; cur != nullptr; cur = cur->next) {
const uintptr_t cur_last = reinterpret_cast<uintptr_t>(cur) + cur->size - 1;
const uintptr_t alloc_last = util::AlignUp(reinterpret_cast<uintptr_t>(cur), align) + size - 1;
if (alloc_last <= cur_last) {
return true;
}
}
return false;
}
bool TryAllocate(uintptr_t address, size_t size) {
/* Try to allocate the region. */
auto **prev_next = std::addressof(m_state.free_head);
for (auto *cur = m_state.free_head; cur != nullptr; prev_next = std::addressof(cur->next), cur = cur->next) {
const uintptr_t cur_start = reinterpret_cast<uintptr_t>(cur);
const uintptr_t cur_last = cur_start + cur->size - 1;
if (cur_start <= address && address + size - 1 <= cur_last) {
auto *alloc = reinterpret_cast<FreeListEntry *>(address);
/* Perform fragmentation at front. */
if (cur != alloc) {
prev_next = std::addressof(cur->next);
*alloc = {
.next = cur->next,
.size = cur_start + cur->size - address,
};
*cur = {
.next = alloc,
.size = address - cur_start,
};
}
/* Perform fragmentation at tail. */
if (alloc->size != size) {
auto *next = reinterpret_cast<FreeListEntry *>(address + size);
*next = {
.next = alloc->next,
.size = alloc->size - size,
};
*alloc = {
.next = next,
.size = size,
};
}
*prev_next = alloc->next;
return true;
}
}
return false;
}
public:
KPhysicalAddress Allocate(size_t align, size_t size) {
/* Ensure that the free list is non-empty. */
while (!this->CanAllocate(align, size)) {
this->Free(m_state.end_address, FreeUnitSize);
m_state.end_address += FreeUnitSize;
}
/* Allocate a random address. */
const uintptr_t aligned_start = util::AlignUp(m_state.start_address, align);
const uintptr_t aligned_end = util::AlignDown(m_state.end_address, align);
const size_t ind_max = ((aligned_end - aligned_start) / align) - 1;
while (true) {
if (const uintptr_t random_address = aligned_start + (KSystemControl::Init::GenerateRandomRange(0, ind_max) * align); this->TryAllocate(random_address, size)) {
/* Clear the allocated pages. */
volatile u64 *ptr = reinterpret_cast<volatile u64 *>(random_address);
for (size_t i = 0; i < size / sizeof(u64); ++i) {
ptr[i] = 0;
}
return random_address;
}
}
}
KPhysicalAddress Allocate(size_t size) {
return this->Allocate(size, size);
}
void Free(KPhysicalAddress phys_addr, size_t size) {
auto **prev_next = std::addressof(m_state.free_head);
auto *new_chunk = reinterpret_cast<FreeListEntry *>(GetInteger(phys_addr));
if (auto *cur = m_state.free_head; cur != nullptr) {
const uintptr_t new_start = reinterpret_cast<uintptr_t>(new_chunk);
const uintptr_t new_end = GetInteger(phys_addr) + size;
while (true) {
/* Attempt coalescing. */
const uintptr_t cur_start = reinterpret_cast<uintptr_t>(cur);
const uintptr_t cur_end = cur_start + cur->size;
if (new_start < new_end) {
if (new_end < cur_start) {
*new_chunk = {
.next = cur,
.size = size,
};
break;
} else if (new_end == cur_start) {
*new_chunk = {
.next = cur->next,
.size = cur->size + size,
};
break;
}
} else if (cur_end == new_start) {
cur->size += size;
return;
}
prev_next = std::addressof(cur->next);
if (cur->next != nullptr) {
cur = cur->next;
} else {
*new_chunk = {
.next = nullptr,
.size = size,
};
cur->next = new_chunk;
return;
}
}
} else {
*new_chunk = {
.next = nullptr,
.size = size,
};
}
*prev_next = new_chunk;
}
};
static_assert(IsInitialPageAllocator<KInitialPageAllocator>);
}
| 51,195
|
C++
|
.h
| 799
| 42.827284
| 186
| 0.510639
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,867
|
kern_k_init_arguments.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_arguments.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <mesosphere/kern_select_assembly_offsets.h>
namespace ams::kern::init {
struct alignas(util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)) KInitArguments {
u64 cpuactlr;
u64 cpuectlr;
u64 sp;
u64 entrypoint;
u64 argument;
};
static_assert(alignof(KInitArguments) == util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE));
static_assert(sizeof(KInitArguments) == std::max(INIT_ARGUMENTS_SIZE, util::CeilingPowerOfTwo(INIT_ARGUMENTS_SIZE)));
static_assert(AMS_OFFSETOF(KInitArguments, cpuactlr) == INIT_ARGUMENTS_CPUACTLR);
static_assert(AMS_OFFSETOF(KInitArguments, cpuectlr) == INIT_ARGUMENTS_CPUECTLR);
static_assert(AMS_OFFSETOF(KInitArguments, sp) == INIT_ARGUMENTS_SP);
static_assert(AMS_OFFSETOF(KInitArguments, entrypoint) == INIT_ARGUMENTS_ENTRYPOINT);
static_assert(AMS_OFFSETOF(KInitArguments, argument) == INIT_ARGUMENTS_ARGUMENT);
}
| 1,604
|
C++
|
.h
| 33
| 44.787879
| 121
| 0.729592
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,868
|
kern_k_interrupt_controller.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm/kern_k_interrupt_controller.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
#if 1
#include <mesosphere/arch/arm/kern_generic_interrupt_controller.hpp>
#else
#error "Unknown board for KInterruptController"
#endif
| 909
|
C++
|
.h
| 24
| 35.75
| 76
| 0.767045
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,869
|
kern_generic_interrupt_controller.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/arch/arm/kern_generic_interrupt_controller.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#include <mesosphere/kern_select_cpu.hpp>
#include <mesosphere/kern_k_typed_address.hpp>
namespace ams::kern::arch::arm {
struct GicDistributor {
u32 ctlr;
u32 typer;
u32 iidr;
u32 reserved_0x0c;
u32 statusr;
u32 reserved_0x14[3];
u32 impldef_0x20[8];
u32 setspi_nsr;
u32 reserved_0x44;
u32 clrspi_nsr;
u32 reserved_0x4c;
u32 setspi_sr;
u32 reserved_0x54;
u32 clrspi_sr;
u32 reserved_0x5c[9];
u32 igroupr[32];
u32 isenabler[32];
u32 icenabler[32];
u32 ispendr[32];
u32 icpendr[32];
u32 isactiver[32];
u32 icactiver[32];
union {
u8 bytes[1020];
u32 words[255];
} ipriorityr;
u32 _0x7fc;
union {
u8 bytes[1020];
u32 words[255];
} itargetsr;
u32 _0xbfc;
u32 icfgr[64];
u32 igrpmodr[32];
u32 _0xd80[32];
u32 nsacr[64];
u32 sgir;
u32 _0xf04[3];
u32 cpendsgir[4];
u32 spendsgir[4];
u32 reserved_0xf30[52];
static constexpr size_t SgirCpuTargetListShift = 16;
enum SgirTargetListFilter : u32 {
SgirTargetListFilter_CpuTargetList = (0 << 24),
SgirTargetListFilter_Others = (1 << 24),
SgirTargetListFilter_Self = (2 << 24),
SgirTargetListFilter_Reserved = (3 << 24),
};
};
static_assert(util::is_pod<GicDistributor>::value);
static_assert(sizeof(GicDistributor) == 0x1000);
struct GicCpuInterface {
u32 ctlr;
u32 pmr;
u32 bpr;
u32 iar;
u32 eoir;
u32 rpr;
u32 hppir;
u32 abpr;
u32 aiar;
u32 aeoir;
u32 ahppir;
u32 statusr;
u32 reserved_30[4];
u32 impldef_40[36];
u32 apr[4];
u32 nsapr[4];
u32 reserved_f0[3];
u32 iidr;
u32 reserved_100[960];
u32 dir;
u32 _0x1004[1023];
};
static_assert(util::is_pod<GicCpuInterface>::value);
static_assert(sizeof(GicCpuInterface) == 0x2000);
struct KInterruptController {
NON_COPYABLE(KInterruptController);
NON_MOVEABLE(KInterruptController);
public:
static constexpr s32 NumSoftwareInterrupts = 16;
static constexpr s32 NumLocalInterrupts = NumSoftwareInterrupts + 16;
static constexpr s32 NumGlobalInterrupts = 988;
static constexpr s32 NumInterrupts = NumLocalInterrupts + NumGlobalInterrupts;
static constexpr s32 NumPriorityLevels = 4;
public:
struct LocalState {
u32 isenabler[NumLocalInterrupts / 32];
u32 ipriorityr[NumLocalInterrupts / 4];
u32 itargetsr[NumLocalInterrupts / 4];
u32 icfgr[NumLocalInterrupts / 16];
};
struct GlobalState {
u32 isenabler[NumGlobalInterrupts / 32];
u32 ipriorityr[NumGlobalInterrupts / 4];
u32 itargetsr[NumGlobalInterrupts / 4];
u32 icfgr[NumGlobalInterrupts / 16];
};
enum PriorityLevel : u8 {
PriorityLevel_High = 0,
PriorityLevel_Low = NumPriorityLevels - 1,
PriorityLevel_Timer = 1,
PriorityLevel_Scheduler = 2,
};
private:
static constinit inline u32 s_mask[cpu::NumCores];
private:
volatile GicDistributor *m_gicd;
volatile GicCpuInterface *m_gicc;
public:
constexpr KInterruptController() : m_gicd(nullptr), m_gicc(nullptr) { /* ... */ }
void Initialize(s32 core_id);
void Finalize(s32 core_id);
void SaveCoreLocal(LocalState *state) const;
void SaveGlobal(GlobalState *state) const;
void RestoreCoreLocal(const LocalState *state) const;
void RestoreGlobal(const GlobalState *state) const;
public:
u32 GetIrq() const {
return m_gicc->iar;
}
static constexpr s32 ConvertRawIrq(u32 irq) {
return (irq == 0x3FF) ? -1 : (irq & 0x3FF);
}
void Enable(s32 irq) const {
m_gicd->isenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
}
void Disable(s32 irq) const {
m_gicd->icenabler[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
}
void Clear(s32 irq) const {
m_gicd->icpendr[irq / BITSIZEOF(u32)] = (1u << (irq % BITSIZEOF(u32)));
}
void SetTarget(s32 irq, s32 core_id) const {
m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] | GetGicMask(core_id);
}
void ClearTarget(s32 irq, s32 core_id) const {
m_gicd->itargetsr.bytes[irq] = m_gicd->itargetsr.bytes[irq] & ~GetGicMask(core_id);
}
void SetPriorityLevel(s32 irq, s32 level) const {
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
m_gicd->ipriorityr.bytes[irq] = ToGicPriorityValue(level);
}
s32 GetPriorityLevel(s32 irq) const {
return FromGicPriorityValue(m_gicd->ipriorityr.bytes[irq]);
}
void SetPriorityLevel(s32 level) const {
MESOSPHERE_ASSERT(PriorityLevel_High <= level && level <= PriorityLevel_Low);
m_gicc->pmr = ToGicPriorityValue(level);
}
void SetEdge(s32 irq) const {
u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
cfg |= (0x2 << (2 * (irq % (BITSIZEOF(u32) / 2))));
m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
}
void SetLevel(s32 irq) const {
u32 cfg = m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)];
cfg &= ~(0x3 << (2 * (irq % (BITSIZEOF(u32) / 2))));
cfg |= (0x0 << (2 * (irq % (BITSIZEOF(u32) / 2))));
m_gicd->icfgr[irq / (BITSIZEOF(u32) / 2)] = cfg;
}
void SendInterProcessorInterrupt(s32 irq, u64 core_mask) {
MESOSPHERE_ASSERT(IsSoftware(irq));
m_gicd->sgir = GetCpuTargetListMask(irq, core_mask);
}
void SendInterProcessorInterrupt(s32 irq) {
MESOSPHERE_ASSERT(IsSoftware(irq));
m_gicd->sgir = GicDistributor::SgirTargetListFilter_Others | irq;
}
void EndOfInterrupt(u32 irq) const {
m_gicc->eoir = irq;
}
bool IsInterruptDefined(s32 irq) const {
const s32 num_interrupts = std::min(32 + 32 * (m_gicd->typer & 0x1F), static_cast<u32>(NumInterrupts));
return (0 <= irq && irq < num_interrupts);
}
public:
static constexpr ALWAYS_INLINE bool IsSoftware(s32 id) {
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
return id < NumSoftwareInterrupts;
}
static constexpr ALWAYS_INLINE bool IsLocal(s32 id) {
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
return id < NumLocalInterrupts;
}
static constexpr ALWAYS_INLINE bool IsGlobal(s32 id) {
MESOSPHERE_ASSERT(0 <= id && id < NumInterrupts);
return NumLocalInterrupts <= id;
}
static constexpr size_t GetGlobalInterruptIndex(s32 id) {
MESOSPHERE_ASSERT(IsGlobal(id));
return id - NumLocalInterrupts;
}
static constexpr size_t GetLocalInterruptIndex(s32 id) {
MESOSPHERE_ASSERT(IsLocal(id));
return id;
}
private:
static constexpr size_t PriorityShift = BITSIZEOF(u8) - util::CountTrailingZeros(NumPriorityLevels);
static_assert(PriorityShift < BITSIZEOF(u8));
static_assert(util::IsPowerOfTwo(NumPriorityLevels));
static constexpr ALWAYS_INLINE u8 ToGicPriorityValue(s32 level) {
return (level << PriorityShift) | ((1 << PriorityShift) - 1);
}
static constexpr ALWAYS_INLINE s32 FromGicPriorityValue(u8 priority) {
return (priority >> PriorityShift) & (NumPriorityLevels - 1);
}
static constexpr ALWAYS_INLINE s32 GetCpuTargetListMask(s32 irq, u64 core_mask) {
MESOSPHERE_ASSERT(IsSoftware(irq));
MESOSPHERE_ASSERT(core_mask < (1ul << cpu::NumCores));
return GicDistributor::SgirTargetListFilter_CpuTargetList | irq | (static_cast<u16>(core_mask) << GicDistributor::SgirCpuTargetListShift);
}
static ALWAYS_INLINE s32 GetGicMask(s32 core_id) {
return s_mask[core_id];
}
ALWAYS_INLINE void SetGicMask(s32 core_id) const {
s_mask[core_id] = m_gicd->itargetsr.bytes[0];
}
NOINLINE void SetupInterruptLines(s32 core_id) const;
};
}
| 10,105
|
C++
|
.h
| 243
| 29.987654
| 154
| 0.563359
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
8,870
|
kern_init_elf.hpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/include/mesosphere/init/kern_init_elf.hpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vapours.hpp>
#ifdef ATMOSPHERE_ARCH_ARM64
#include <mesosphere/init/kern_init_elf64.hpp>
namespace ams::kern::init::Elf {
using namespace ams::kern::init::Elf::Elf64;
enum RelocationType {
R_ARCHITECTURE_RELATIVE = 0x403, /* Real name R_AARCH64_RELATIVE */
};
}
#else
#error "Unknown Architecture"
#endif
namespace ams::kern::init::Elf {
/* API to apply relocations or call init array. */
void ApplyRelocations(uintptr_t base_address, const Dyn *dynamic);
void CallInitArrayFuncs(uintptr_t init_array_start, uintptr_t init_array_end);
}
| 1,264
|
C++
|
.h
| 33
| 34.787879
| 82
| 0.726754
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.