id
int64 0
755k
| file_name
stringlengths 3
109
| file_path
stringlengths 13
185
| content
stringlengths 31
9.38M
| size
int64 31
9.38M
| language
stringclasses 1
value | extension
stringclasses 11
values | total_lines
int64 1
340k
| avg_line_length
float64 2.18
149k
| max_line_length
int64 7
2.22M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 6
65
| repo_stars
int64 100
47.3k
| repo_forks
int64 0
12k
| repo_open_issues
int64 0
3.4k
| repo_license
stringclasses 9
values | repo_extraction_date
stringclasses 92
values | exact_duplicates_redpajama
bool 2
classes | near_duplicates_redpajama
bool 2
classes | exact_duplicates_githubcode
bool 2
classes | exact_duplicates_stackv2
bool 1
class | exact_duplicates_stackv1
bool 2
classes | near_duplicates_githubcode
bool 2
classes | near_duplicates_stackv1
bool 2
classes | near_duplicates_stackv2
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6,924
|
charger_api.cpp
|
Atmosphere-NX_Atmosphere/libraries/libexosphere/source/charger/charger_api.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <exosphere.hpp>
namespace ams::charger {
namespace {
/* https://www.ti.com/lit/ds/symlink/bq24193.pdf */
constexpr inline int I2cAddressBq24193 = 0x6B;
constexpr inline int Bq24193RegisterInputSourceControl = 0x00;
/* 8.5.1.1 EN_HIZ */
enum EnHiZ : u8 {
EnHiZ_Disable = (0u << 7),
EnHiZ_Enable = (1u << 7),
EnHiZ_Mask = (1u << 7),
};
}
bool IsHiZMode() {
return (i2c::QueryByte(i2c::Port_1, I2cAddressBq24193, Bq24193RegisterInputSourceControl) & EnHiZ_Mask) == EnHiZ_Enable;
}
void EnterHiZMode() {
u8 ctrl = i2c::QueryByte(i2c::Port_1, I2cAddressBq24193, Bq24193RegisterInputSourceControl);
ctrl &= ~EnHiZ_Mask;
ctrl |= EnHiZ_Enable;
i2c::SendByte(i2c::Port_1, I2cAddressBq24193, Bq24193RegisterInputSourceControl, ctrl);
}
void ExitHiZMode() {
u8 ctrl = i2c::QueryByte(i2c::Port_1, I2cAddressBq24193, Bq24193RegisterInputSourceControl);
ctrl &= ~EnHiZ_Mask;
ctrl |= EnHiZ_Disable;
i2c::SendByte(i2c::Port_1, I2cAddressBq24193, Bq24193RegisterInputSourceControl, ctrl);
}
}
| 1,825
|
C++
|
.cpp
| 44
| 35.636364
| 128
| 0.678531
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,925
|
kern_debug_log.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_debug_log.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_debug_log_impl.hpp"
namespace ams::kern {
namespace {
constinit KSpinLock g_debug_log_lock;
constinit bool g_initialized_impl;
/* NOTE: Nintendo's print buffer is size 0x100. */
constinit char g_print_buffer[0x400];
void PutString(const char *str) {
/* Only print if the implementation is initialized. */
if (AMS_UNLIKELY(!g_initialized_impl)) {
return;
}
#if defined(MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING)
KDebugLogImpl::PutStringBySemihosting(str);
#else
while (*str) {
/* Get a character. */
const char c = *(str++);
/* Print the character. */
if (c == '\n') {
KDebugLogImpl::PutChar('\r');
}
KDebugLogImpl::PutChar(c);
}
KDebugLogImpl::Flush();
#endif
}
#if defined(MESOSPHERE_ENABLE_DEBUG_PRINT)
Result PutUserString(ams::kern::svc::KUserPointer<const char *> user_str, size_t len) {
/* Only print if the implementation is initialized. */
if (!g_initialized_impl) {
R_SUCCEED();
}
#if defined(MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING)
/* TODO: should we do this properly? */
KDebugLogImpl::PutStringBySemihosting(user_str.GetUnsafePointer());
MESOSPHERE_UNUSED(len);
#else
for (size_t i = 0; i < len; ++i) {
/* Get a character. */
char c;
R_TRY(user_str.CopyArrayElementTo(std::addressof(c), i));
/* Print the character. */
if (c == '\n') {
KDebugLogImpl::PutChar('\r');
}
KDebugLogImpl::PutChar(c);
}
KDebugLogImpl::Flush();
#endif
R_SUCCEED();
}
#endif
ALWAYS_INLINE void FormatU64(char * const dst, u64 value) {
/* Adjust, so that we can print the value backwards. */
char *cur = dst + 2 * sizeof(value);
/* Format the value in (as %016lx) */
while (cur > dst) {
/* Extract the digit. */
const auto digit = value & 0xF;
value >>= 4;
*(--cur) = (digit <= 9) ? ('0' + digit) : ('a' + digit - 10);
}
}
}
void KDebugLog::Initialize() {
if (KTargetSystem::IsDebugLoggingEnabled()) {
KScopedInterruptDisable di;
KScopedSpinLock lk(g_debug_log_lock);
if (!g_initialized_impl) {
g_initialized_impl = KDebugLogImpl::Initialize();
}
}
}
void KDebugLog::Printf(const char *format, ...) {
if (KTargetSystem::IsDebugLoggingEnabled()) {
::std::va_list vl;
va_start(vl, format);
VPrintf(format, vl);
va_end(vl);
}
}
void KDebugLog::VPrintf(const char *format, ::std::va_list vl) {
if (KTargetSystem::IsDebugLoggingEnabled()) {
KScopedInterruptDisable di;
KScopedSpinLock lk(g_debug_log_lock);
VSNPrintf(g_print_buffer, util::size(g_print_buffer), format, vl);
PutString(g_print_buffer);
}
}
void KDebugLog::VSNPrintf(char *dst, const size_t dst_size, const char *format, ::std::va_list vl) {
::ams::util::TVSNPrintf(dst, dst_size, format, vl);
}
void KDebugLog::LogException(const char *str) {
if (KTargetSystem::IsDebugLoggingEnabled()) {
/* Get the current program ID. */
/* NOTE: Nintendo does this after printing the string, */
/* but it seems wise to avoid holding the lock/disabling interrupts */
/* for longer than is strictly necessary. */
char suffix[18];
if (const auto *cur_process = GetCurrentProcessPointer(); AMS_LIKELY(cur_process != nullptr)) {
FormatU64(suffix, cur_process->GetProgramId());
suffix[16] = '\n';
suffix[17] = '\x00';
} else {
suffix[0] = '\n';
suffix[1] = '\x00';
}
KScopedInterruptDisable di;
KScopedSpinLock lk(g_debug_log_lock);
/* Log the string. */
PutString(str);
/* Log the program id (and newline) suffix. */
PutString(suffix);
}
}
Result KDebugLog::PrintUserString(ams::kern::svc::KUserPointer<const char *> user_str, size_t len) {
/* If printing is enabled, print the user string. */
#if defined(MESOSPHERE_ENABLE_DEBUG_PRINT)
if (KTargetSystem::IsDebugLoggingEnabled()) {
KScopedInterruptDisable di;
KScopedSpinLock lk(g_debug_log_lock);
R_TRY(PutUserString(user_str, len));
}
#else
MESOSPHERE_UNUSED(user_str, len);
#endif
R_SUCCEED();
}
void KDebugLog::Save() {
if (KTargetSystem::IsDebugLoggingEnabled()) {
KScopedInterruptDisable di;
KScopedSpinLock lk(g_debug_log_lock);
KDebugLogImpl::Save();
}
}
void KDebugLog::Restore() {
if (KTargetSystem::IsDebugLoggingEnabled()) {
KScopedInterruptDisable di;
KScopedSpinLock lk(g_debug_log_lock);
KDebugLogImpl::Restore();
}
}
}
| 6,280
|
C++
|
.cpp
| 160
| 28.20625
| 107
| 0.549564
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,926
|
kern_k_debug_base.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_debug_base.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
ALWAYS_INLINE KDebugBase *GetDebugObject(KProcess *process) {
return static_cast<KDebugBase *>(process->GetDebugObject());
}
}
void KDebugBase::Initialize() {
/* Clear the continue flags. */
m_continue_flags = 0;
m_is_force_debug_prod = GetCurrentProcess().CanForceDebugProd();
}
bool KDebugBase::Is64Bit() const {
MESOSPHERE_ASSERT(m_lock.IsLockedByCurrentThread());
MESOSPHERE_ASSERT(m_is_attached);
KProcess * const process = this->GetProcessUnsafe();
MESOSPHERE_ASSERT(process != nullptr);
return process->Is64Bit();
}
Result KDebugBase::QueryMemoryInfo(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, KProcessAddress address) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Check that we're still attached now that we're locked. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Check that the process isn't terminated. */
R_UNLESS(!process->IsTerminated(), svc::ResultProcessTerminated());
/* Query the mapping's info. */
KMemoryInfo info;
R_TRY(process->GetPageTable().QueryInfo(std::addressof(info), out_page_info, address));
/* Write output. */
*out_memory_info = info.GetSvcMemoryInfo();
R_SUCCEED();
}
Result KDebugBase::ReadMemory(KProcessAddress buffer, KProcessAddress address, size_t size) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Check that we're still attached now that we're locked. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Check that the process isn't terminated. */
R_UNLESS(!process->IsTerminated(), svc::ResultProcessTerminated());
/* Get the page tables. */
KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable();
KProcessPageTable &target_pt = process->GetPageTable();
/* Verify that the regions are in range. */
R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(debugger_pt.Contains(buffer, size), svc::ResultInvalidCurrentMemory());
/* Iterate over the target process's memory blocks. */
KProcessAddress cur_address = address;
size_t remaining = size;
while (remaining > 0) {
/* Get the current memory info. */
KMemoryInfo info;
ams::svc::PageInfo pi;
R_TRY(target_pt.QueryInfo(std::addressof(info), std::addressof(pi), cur_address));
/* Check that the memory is accessible. */
R_UNLESS(info.GetState() != static_cast<KMemoryState>(ams::svc::MemoryState_Inaccessible), svc::ResultInvalidAddress());
/* Get the current size. */
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetSvcState() != ams::svc::MemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.ReadDebugMemory(GetVoidPointer(buffer), cur_address, cur_size, this->IsForceDebugProd()));
} else {
/* Only allow IO memory to be read if not force debug prod. */
R_UNLESS(!this->IsForceDebugProd(), svc::ResultInvalidCurrentMemory());
/* The memory is IO memory. */
R_TRY(target_pt.ReadDebugIoMemory(GetVoidPointer(buffer), cur_address, cur_size, info.GetState()));
}
/* Advance. */
buffer += cur_size;
cur_address += cur_size;
remaining -= cur_size;
}
R_SUCCEED();
}
Result KDebugBase::WriteMemory(KProcessAddress buffer, KProcessAddress address, size_t size) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Check that we're still attached now that we're locked. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Check that the process isn't terminated. */
R_UNLESS(!process->IsTerminated(), svc::ResultProcessTerminated());
/* Get the page tables. */
KProcessPageTable &debugger_pt = GetCurrentProcess().GetPageTable();
KProcessPageTable &target_pt = process->GetPageTable();
/* Verify that the regions are in range. */
R_UNLESS(target_pt.Contains(address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(debugger_pt.Contains(buffer, size), svc::ResultInvalidCurrentMemory());
/* Iterate over the target process's memory blocks. */
KProcessAddress cur_address = address;
size_t remaining = size;
while (remaining > 0) {
/* Get the current memory info. */
KMemoryInfo info;
ams::svc::PageInfo pi;
R_TRY(target_pt.QueryInfo(std::addressof(info), std::addressof(pi), cur_address));
/* Check that the memory is accessible. */
R_UNLESS(info.GetState() != static_cast<KMemoryState>(ams::svc::MemoryState_Inaccessible), svc::ResultInvalidAddress());
/* Get the current size. */
const size_t cur_size = std::min(remaining, info.GetEndAddress() - GetInteger(cur_address));
/* Read the memory. */
if (info.GetSvcState() != ams::svc::MemoryState_Io) {
/* The memory is normal memory. */
R_TRY(target_pt.WriteDebugMemory(cur_address, GetVoidPointer(buffer), cur_size));
} else {
/* The memory is IO memory. */
R_TRY(target_pt.WriteDebugIoMemory(cur_address, GetVoidPointer(buffer), cur_size, info.GetState()));
}
/* Advance. */
buffer += cur_size;
cur_address += cur_size;
remaining -= cur_size;
}
R_SUCCEED();
}
Result KDebugBase::GetRunningThreadInfo(ams::svc::LastThreadContext *out_context, u64 *out_thread_id) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Get the thread info. */
{
KScopedSchedulerLock sl;
/* Get the running thread. */
const s32 core_id = GetCurrentCoreId();
KThread *thread = process->GetRunningThread(core_id);
/* We want to check that the thread is actually running. */
/* If it is, then the scheduler will have just switched from the thread to the current thread. */
/* This implies exactly one switch will have taken place, and the current thread will be on the current core. */
const auto &scheduler = Kernel::GetScheduler(core_id);
if (!(thread != nullptr && thread->GetActiveCore() == core_id && process->GetRunningThreadSwitchCount(core_id) + 1 == scheduler.GetSwitchCount())) {
/* The most recent thread switch was from a thread other than the expected one to the current one. */
/* We want to use the appropriate result to inform userland about what thread we switched from. */
if (scheduler.GetIdleCount() + 1 == scheduler.GetSwitchCount()) {
/* We switched from the idle thread. */
R_THROW(svc::ResultNoThread());
} else {
/* We switched from some other unknown thread. */
R_THROW(svc::ResultUnknownThread());
}
}
/* Get the thread's exception context. */
GetExceptionContext(thread)->GetSvcThreadContext(out_context);
/* Get the thread's id. */
*out_thread_id = thread->GetId();
}
R_SUCCEED();
}
Result KDebugBase::Attach(KProcess *target) {
/* Check that the process isn't null. */
MESOSPHERE_ASSERT(target != nullptr);
/* Clear ourselves as unattached. */
m_is_attached = false;
/* Attach to the process. */
{
/* Lock both ourselves, the target process, and the scheduler. */
KScopedLightLock state_lk(target->GetStateLock());
KScopedLightLock list_lk(target->GetListLock());
KScopedLightLock this_lk(m_lock);
KScopedSchedulerLock sl;
/* Check that the process isn't already being debugged. */
R_UNLESS(!target->IsAttachedToDebugger(), svc::ResultBusy());
{
/* Ensure the process is in a state that allows for debugging. */
const KProcess::State state = target->GetState();
switch (state) {
case KProcess::State_Created:
case KProcess::State_Running:
/* Created and running processes can only be debugged if the debugger is not ForceDebugProd. */
R_UNLESS(!this->IsForceDebugProd(), svc::ResultInvalidState());
break;
case KProcess::State_Crashed:
break;
case KProcess::State_CreatedAttached:
case KProcess::State_RunningAttached:
case KProcess::State_DebugBreak:
R_THROW(svc::ResultBusy());
case KProcess::State_Terminating:
case KProcess::State_Terminated:
R_THROW(svc::ResultProcessTerminated());
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* Attach to the target. */
m_process_holder.Attach(target);
m_is_attached = true;
/* Set ourselves as the process's attached object. */
m_old_process_state = target->SetDebugObject(this);
/* Send an event for our attaching to the process. */
this->PushDebugEvent(ams::svc::DebugEvent_CreateProcess, nullptr, 0);
/* Send events for attaching to each thread in the process. */
{
auto end = target->GetThreadList().end();
for (auto it = target->GetThreadList().begin(); it != end; ++it) {
/* Request that we suspend the thread. */
it->RequestSuspend(KThread::SuspendType_Debug);
/* If the thread is in a state for us to do so, generate the event. */
if (const auto thread_state = it->GetState(); thread_state == KThread::ThreadState_Runnable || thread_state == KThread::ThreadState_Waiting) {
/* Mark the thread as attached to. */
it->SetDebugAttached();
/* Send the event. */
const uintptr_t params[2] = { it->GetId(), GetInteger(it->GetThreadLocalRegionAddress()) };
this->PushDebugEvent(ams::svc::DebugEvent_CreateThread, params, util::size(params));
}
}
}
/* Send the process's jit debug info, if relevant. */
if (KEventInfo *jit_info = target->GetJitDebugInfo(); jit_info != nullptr) {
this->EnqueueDebugEventInfo(jit_info);
}
/* Send an exception event to represent our attaching. */
const uintptr_t params[1] = { static_cast<uintptr_t>(ams::svc::DebugException_DebuggerAttached) };
this->PushDebugEvent(ams::svc::DebugEvent_Exception, params, util::size(params));
/* Signal. */
this->NotifyAvailable();
}
}
R_SUCCEED();
}
Result KDebugBase::BreakProcess() {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Get the process pointer. */
KProcess * const target = this->GetProcessUnsafe();
/* Lock both ourselves, the target process, and the scheduler. */
KScopedLightLock state_lk(target->GetStateLock());
KScopedLightLock list_lk(target->GetListLock());
KScopedLightLock this_lk(m_lock);
KScopedSchedulerLock sl;
/* Check that we're still attached now that we're locked. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Check that the process isn't terminated. */
R_UNLESS(!target->IsTerminated(), svc::ResultProcessTerminated());
/* Get the currently active threads. */
constexpr u64 ThreadIdNoThread = -1ll;
constexpr u64 ThreadIdUnknownThread = -2ll;
uintptr_t debug_info_params[1 + cpu::NumCores] = { static_cast<uintptr_t>(ams::svc::DebugException_DebuggerBreak), };
for (size_t i = 0; i < cpu::NumCores; ++i) {
/* Get the currently running thread. */
KThread *thread = target->GetRunningThread(i);
/* Check that the thread's idle count is correct. */
if (target->GetRunningThreadIdleCount(i) == Kernel::GetScheduler(i).GetIdleCount()) {
if (thread != nullptr && static_cast<size_t>(thread->GetActiveCore()) == i) {
debug_info_params[1 + i] = thread->GetId();
} else {
/* We found an unknown thread. */
debug_info_params[1 + i] = ThreadIdUnknownThread;
}
} else {
/* We didn't find a thread. */
debug_info_params[1 + i] = ThreadIdNoThread;
}
}
/* Suspend all the threads in the process. */
{
auto end = target->GetThreadList().end();
for (auto it = target->GetThreadList().begin(); it != end; ++it) {
/* Request that we suspend the thread. */
it->RequestSuspend(KThread::SuspendType_Debug);
}
}
/* Send an exception event to represent our breaking the process. */
this->PushDebugEvent(ams::svc::DebugEvent_Exception, debug_info_params, util::size(debug_info_params));
/* Signal. */
this->NotifyAvailable();
/* Set the process as breaked. */
target->SetDebugBreak();
R_SUCCEED();
}
Result KDebugBase::TerminateProcess() {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), ResultSuccess());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), ResultSuccess());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Get the process pointer. */
KProcess * const target = this->GetProcessUnsafe();
/* Terminate the process. */
target->Terminate();
R_SUCCEED();
}
Result KDebugBase::GetThreadContext(ams::svc::ThreadContext *out, u64 thread_id, u32 context_flags) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Check that we're still attached now that we're locked. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Get the thread from its id. */
KThread *thread = KThread::GetThreadFromId(thread_id);
R_UNLESS(thread != nullptr, svc::ResultInvalidId());
ON_SCOPE_EXIT { thread->Close(); };
/* Verify that the thread is owned by our process. */
R_UNLESS(process == thread->GetOwnerProcess(), svc::ResultInvalidId());
/* Verify that the thread isn't terminated. */
R_UNLESS(thread->GetState() != KThread::ThreadState_Terminated, svc::ResultTerminationRequested());
/* Check that the thread is not the current one. */
/* NOTE: Nintendo does not check this, and thus the following loop will deadlock. */
R_UNLESS(thread != GetCurrentThreadPointer(), svc::ResultInvalidId());
/* Try to get the thread context until the thread isn't current on any core. */
while (true) {
KScopedSchedulerLock sl;
/* The thread needs to be requested for debug suspension. */
R_UNLESS(thread->IsSuspendRequested(KThread::SuspendType_Debug), svc::ResultInvalidState());
/* If the thread's raw state isn't runnable, check if it's current on some core. */
if (thread->GetRawState() != KThread::ThreadState_Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
if (thread == Kernel::GetScheduler(i).GetSchedulerCurrentThread()) {
current = true;
break;
}
}
/* If the thread is current, retry until it isn't. */
if (current) {
continue;
}
}
/* Get the thread context. */
static_assert(std::derived_from<KDebug, KDebugBase>);
R_RETURN(static_cast<KDebug *>(this)->GetThreadContextImpl(out, thread, context_flags));
}
}
Result KDebugBase::SetThreadContext(const ams::svc::ThreadContext &ctx, u64 thread_id, u32 context_flags) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Check that we're still attached now that we're locked. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Get the thread from its id. */
KThread *thread = KThread::GetThreadFromId(thread_id);
R_UNLESS(thread != nullptr, svc::ResultInvalidId());
ON_SCOPE_EXIT { thread->Close(); };
/* Verify that the thread is owned by our process. */
R_UNLESS(process == thread->GetOwnerProcess(), svc::ResultInvalidId());
/* Verify that the thread isn't terminated. */
R_UNLESS(thread->GetState() != KThread::ThreadState_Terminated, svc::ResultTerminationRequested());
/* Check that the thread is not the current one. */
/* NOTE: Nintendo does not check this, and thus the following loop will deadlock. */
R_UNLESS(thread != GetCurrentThreadPointer(), svc::ResultInvalidId());
/* Try to get the thread context until the thread isn't current on any core. */
while (true) {
KScopedSchedulerLock sl;
/* The thread needs to be requested for debug suspension. */
R_UNLESS(thread->IsSuspendRequested(KThread::SuspendType_Debug), svc::ResultInvalidState());
/* If the thread's raw state isn't runnable, check if it's current on some core. */
if (thread->GetRawState() != KThread::ThreadState_Runnable) {
bool current = false;
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
if (thread == Kernel::GetScheduler(i).GetSchedulerCurrentThread()) {
current = true;
break;
}
}
/* If the thread is current, retry until it isn't. */
if (current) {
continue;
}
}
/* Update thread single-step state. */
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
{
if ((context_flags & ams::svc::ThreadContextFlag_SetSingleStep) != 0) {
/* Set single step. */
thread->SetHardwareSingleStep();
/* If no other thread flags are present, we're done. */
R_SUCCEED_IF((context_flags & ~ams::svc::ThreadContextFlag_SetSingleStep) == 0);
} else if ((context_flags & ams::svc::ThreadContextFlag_ClearSingleStep) != 0) {
/* Clear single step. */
thread->ClearHardwareSingleStep();
/* If no other thread flags are present, we're done. */
R_SUCCEED_IF((context_flags & ~ams::svc::ThreadContextFlag_ClearSingleStep) == 0);
}
}
#endif
/* Verify that the thread's svc state is valid. */
if (thread->IsCallingSvc()) {
const u8 svc_id = thread->GetSvcId();
const bool is_valid_svc = svc_id == svc::SvcId_Break ||
svc_id == svc::SvcId_ReturnFromException;
R_UNLESS(is_valid_svc, svc::ResultInvalidState());
}
/* Set the thread context. */
static_assert(std::derived_from<KDebug, KDebugBase>);
R_RETURN(static_cast<KDebug *>(this)->SetThreadContextImpl(ctx, thread, context_flags));
}
}
Result KDebugBase::ContinueDebug(const u32 flags, const u64 *thread_ids, size_t num_thread_ids) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Get the process pointer. */
KProcess * const target = this->GetProcessUnsafe();
/* Lock both ourselves, the target process, and the scheduler. */
KScopedLightLock state_lk(target->GetStateLock());
KScopedLightLock list_lk(target->GetListLock());
KScopedLightLock this_lk(m_lock);
KScopedSchedulerLock sl;
/* Check that we're still attached now that we're locked. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Check that the process isn't terminated. */
R_UNLESS(!target->IsTerminated(), svc::ResultProcessTerminated());
/* Check that we have no pending events. */
R_UNLESS(m_event_info_list.empty(), svc::ResultBusy());
/* Clear the target's JIT debug info. */
target->ClearJitDebugInfo();
/* Set our continue flags. */
m_continue_flags = flags;
/* Iterate over threads, continuing them as we should. */
bool has_debug_break_thread = false;
{
/* Parse our flags. */
const bool exception_handled = (m_continue_flags & ams::svc::ContinueFlag_ExceptionHandled) != 0;
const bool continue_all = (m_continue_flags & ams::svc::ContinueFlag_ContinueAll) != 0;
const bool continue_others = (m_continue_flags & ams::svc::ContinueFlag_ContinueOthers) != 0;
/* Update each thread. */
auto end = target->GetThreadList().end();
for (auto it = target->GetThreadList().begin(); it != end; ++it) {
/* Determine if we should continue the thread. */
bool should_continue;
{
if (continue_all) {
/* Continue all threads. */
should_continue = true;
} else if (continue_others) {
/* Continue the thread if it doesn't match one of our target ids. */
const u64 thread_id = it->GetId();
should_continue = true;
for (size_t i = 0; i < num_thread_ids; ++i) {
if (thread_ids[i] == thread_id) {
should_continue = false;
break;
}
}
} else {
/* Continue the thread if it matches one of our target ids. */
const u64 thread_id = it->GetId();
should_continue = false;
for (size_t i = 0; i < num_thread_ids; ++i) {
if (thread_ids[i] == thread_id) {
should_continue = true;
break;
}
}
}
}
/* Continue the thread if we should. */
if (should_continue) {
if (exception_handled) {
it->SetDebugExceptionResult(svc::ResultStopProcessingException());
}
it->Resume(KThread::SuspendType_Debug);
}
/* If the thread has debug suspend requested, note so. */
if (it->IsSuspendRequested(KThread::SuspendType_Debug)) {
has_debug_break_thread = true;
}
}
}
/* Set the process's state. */
if (has_debug_break_thread) {
target->SetDebugBreak();
} else {
target->SetAttached();
}
R_SUCCEED();
}
KEventInfo *KDebugBase::CreateDebugEvent(ams::svc::DebugEvent event, u64 cur_thread_id, const uintptr_t *params, size_t num_params) {
/* Allocate a new event. */
KEventInfo *info = KEventInfo::Allocate();
/* Populate the event info. */
if (info != nullptr) {
/* Set common fields. */
info->event = event;
info->thread_id = 0;
info->flags = ams::svc::DebugEventFlag_Stopped;
/* Set event specific fields. */
switch (event) {
case ams::svc::DebugEvent_CreateProcess:
{
/* Check parameters. */
MESOSPHERE_ASSERT(params == nullptr);
MESOSPHERE_ASSERT(num_params == 0);
}
break;
case ams::svc::DebugEvent_CreateThread:
{
/* Check parameters. */
MESOSPHERE_ASSERT(params != nullptr);
MESOSPHERE_ASSERT(num_params == 2);
/* Set the thread id. */
info->thread_id = params[0];
/* Set the thread creation info. */
info->info.create_thread.thread_id = params[0];
info->info.create_thread.tls_address = params[1];
}
break;
case ams::svc::DebugEvent_ExitProcess:
{
/* Check parameters. */
MESOSPHERE_ASSERT(params != nullptr);
MESOSPHERE_ASSERT(num_params == 1);
/* Set the exit reason. */
info->info.exit_process.reason = static_cast<ams::svc::ProcessExitReason>(params[0]);
/* Clear the thread id and flags. */
info->thread_id = 0;
info->flags = 0;
}
break;
case ams::svc::DebugEvent_ExitThread:
{
/* Check parameters. */
MESOSPHERE_ASSERT(params != nullptr);
MESOSPHERE_ASSERT(num_params == 2);
/* Set the thread id. */
info->thread_id = params[0];
/* Set the exit reason. */
info->info.exit_thread.reason = static_cast<ams::svc::ThreadExitReason>(params[1]);
}
break;
case ams::svc::DebugEvent_Exception:
{
/* Check parameters. */
MESOSPHERE_ASSERT(params != nullptr);
MESOSPHERE_ASSERT(num_params >= 1);
/* Set the thread id. */
info->thread_id = cur_thread_id;
/* Set the exception type, and clear the count. */
info->info.exception.exception_type = static_cast<ams::svc::DebugException>(params[0]);
info->info.exception.exception_data_count = 0;
switch (static_cast<ams::svc::DebugException>(params[0])) {
case ams::svc::DebugException_UndefinedInstruction:
case ams::svc::DebugException_BreakPoint:
case ams::svc::DebugException_UndefinedSystemCall:
{
MESOSPHERE_ASSERT(num_params >= 3);
info->info.exception.exception_address = params[1];
info->info.exception.exception_data_count = 1;
info->info.exception.exception_data[0] = params[2];
}
break;
case ams::svc::DebugException_DebuggerAttached:
{
info->thread_id = 0;
info->info.exception.exception_address = 0;
}
break;
case ams::svc::DebugException_UserBreak:
{
MESOSPHERE_ASSERT(num_params >= 2);
info->info.exception.exception_address = params[1];
info->info.exception.exception_data_count = 0;
for (size_t i = 2; i < num_params; ++i) {
info->info.exception.exception_data[info->info.exception.exception_data_count++] = params[i];
}
}
break;
case ams::svc::DebugException_DebuggerBreak:
{
info->thread_id = 0;
info->info.exception.exception_address = 0;
info->info.exception.exception_data_count = 0;
for (size_t i = 1; i < num_params; ++i) {
info->info.exception.exception_data[info->info.exception.exception_data_count++] = params[i];
}
}
break;
case ams::svc::DebugException_MemorySystemError:
{
info->info.exception.exception_address = 0;
}
break;
case ams::svc::DebugException_InstructionAbort:
case ams::svc::DebugException_DataAbort:
case ams::svc::DebugException_AlignmentFault:
default:
{
MESOSPHERE_ASSERT(num_params >= 2);
info->info.exception.exception_address = params[1];
}
break;
}
}
break;
}
}
return info;
}
void KDebugBase::PushDebugEvent(ams::svc::DebugEvent event, const uintptr_t *params, size_t num_params) {
/* Create and enqueue and event. */
if (KEventInfo *new_info = CreateDebugEvent(event, GetCurrentThread().GetId(), params, num_params); new_info != nullptr) {
this->EnqueueDebugEventInfo(new_info);
}
}
void KDebugBase::EnqueueDebugEventInfo(KEventInfo *info) {
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Push the event to the back of the list. */
m_event_info_list.push_back(*info);
}
template<typename T> requires (std::same_as<T, ams::svc::lp64::DebugEventInfo> || std::same_as<T, ams::svc::ilp32::DebugEventInfo>)
Result KDebugBase::GetDebugEventInfoImpl(T *out) {
/* Check that we're attached. */
R_UNLESS(this->IsAttached(), svc::ResultProcessTerminated());
/* Open a reference to our process. */
R_UNLESS(this->OpenProcess(), svc::ResultProcessTerminated());
/* Close our reference to our process when we're done. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Pop an event info from our queue. */
KEventInfo *info = nullptr;
{
KScopedSchedulerLock sl;
/* Check that we have an event to dequeue. */
R_UNLESS(!m_event_info_list.empty(), svc::ResultNoEvent());
/* Pop the event from the front of the queue. */
info = std::addressof(m_event_info_list.front());
m_event_info_list.pop_front();
}
MESOSPHERE_ASSERT(info != nullptr);
/* Free the event info once we're done with it. */
ON_SCOPE_EXIT { KEventInfo::Free(info); };
/* Set common fields. */
out->type = info->event;
out->thread_id = info->thread_id;
out->flags = info->flags;
/* Set event specific fields. */
switch (info->event) {
case ams::svc::DebugEvent_CreateProcess:
{
out->info.create_process.program_id = process->GetProgramId();
out->info.create_process.process_id = process->GetId();
out->info.create_process.flags = process->GetCreateProcessFlags();
out->info.create_process.user_exception_context_address = GetInteger(process->GetProcessLocalRegionAddress());
std::memcpy(out->info.create_process.name, process->GetName(), sizeof(out->info.create_process.name));
}
break;
case ams::svc::DebugEvent_CreateThread:
{
out->info.create_thread.thread_id = info->info.create_thread.thread_id;
out->info.create_thread.tls_address = info->info.create_thread.tls_address;
}
break;
case ams::svc::DebugEvent_ExitProcess:
{
out->info.exit_process.reason = info->info.exit_process.reason;
}
break;
case ams::svc::DebugEvent_ExitThread:
{
out->info.exit_thread.reason = info->info.exit_thread.reason;
}
break;
case ams::svc::DebugEvent_Exception:
{
out->info.exception.type = info->info.exception.exception_type;
out->info.exception.address = info->info.exception.exception_address;
switch (info->info.exception.exception_type) {
case ams::svc::DebugException_UndefinedInstruction:
{
MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1);
/* Only save the instruction if the caller is not force debug prod. */
if (this->IsForceDebugProd()) {
out->info.exception.specific.undefined_instruction.insn = 0;
} else {
out->info.exception.specific.undefined_instruction.insn = info->info.exception.exception_data[0];
}
}
break;
case ams::svc::DebugException_BreakPoint:
{
MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1);
out->info.exception.specific.break_point.type = static_cast<ams::svc::BreakPointType>(info->info.exception.exception_data[0]);
out->info.exception.specific.break_point.address = 0;
}
break;
case ams::svc::DebugException_UserBreak:
{
MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 3);
out->info.exception.specific.user_break.break_reason = static_cast<ams::svc::BreakReason>(info->info.exception.exception_data[0]);
out->info.exception.specific.user_break.address = info->info.exception.exception_data[1];
out->info.exception.specific.user_break.size = info->info.exception.exception_data[2];
}
break;
case ams::svc::DebugException_DebuggerBreak:
{
/* TODO: How does this work with non-4 cpu count? */
static_assert(cpu::NumCores <= 4);
MESOSPHERE_ASSERT(info->info.exception.exception_data_count == cpu::NumCores);
out->info.exception.specific.debugger_break.active_thread_ids[0] = info->info.exception.exception_data[0];
out->info.exception.specific.debugger_break.active_thread_ids[1] = info->info.exception.exception_data[1];
out->info.exception.specific.debugger_break.active_thread_ids[2] = info->info.exception.exception_data[2];
out->info.exception.specific.debugger_break.active_thread_ids[3] = info->info.exception.exception_data[3];
}
break;
case ams::svc::DebugException_UndefinedSystemCall:
{
MESOSPHERE_ASSERT(info->info.exception.exception_data_count == 1);
out->info.exception.specific.undefined_system_call.id = info->info.exception.exception_data[0];
}
break;
default:
{
/* ... */
}
break;
}
}
break;
}
R_SUCCEED();
}
Result KDebugBase::GetDebugEventInfo(ams::svc::lp64::DebugEventInfo *out) {
R_RETURN(this->GetDebugEventInfoImpl(out));
}
Result KDebugBase::GetDebugEventInfo(ams::svc::ilp32::DebugEventInfo *out) {
R_RETURN(this->GetDebugEventInfoImpl(out));
}
void KDebugBase::Finalize() {
/* Perform base finalization. */
KSynchronizationObject::Finalize();
/* Perform post-synchronization finalization. */
this->OnFinalizeSynchronizationObject();
}
void KDebugBase::OnFinalizeSynchronizationObject() {
/* Detach from our process, if we have one. */
if (this->IsAttached() && this->OpenProcess()) {
/* Close the process when we're done with it. */
ON_SCOPE_EXIT { this->CloseProcess(); };
/* Get the process pointer. */
KProcess * const process = this->GetProcessUnsafe();
/* Lock both ourselves and the target process. */
KScopedLightLock state_lk(process->GetStateLock());
KScopedLightLock list_lk(process->GetListLock());
KScopedLightLock this_lk(m_lock);
/* Check that we're still attached. */
if (m_is_attached) {
KScopedSchedulerLock sl;
/* Detach ourselves from the process. */
process->ClearDebugObject(m_old_process_state);
/* Release all threads. */
const bool resume = (process->GetState() != KProcess::State_Crashed);
{
auto end = process->GetThreadList().end();
for (auto it = process->GetThreadList().begin(); it != end; ++it) {
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
/* Clear the thread's single-step state. */
it->ClearHardwareSingleStep();
#endif
if (resume) {
/* If the process isn't crashed, resume threads. */
it->Resume(KThread::SuspendType_Debug);
} else {
/* Otherwise, suspend them. */
it->RequestSuspend(KThread::SuspendType_Debug);
}
}
}
/* Note we're now unattached. */
m_is_attached = false;
/* Close the initial reference opened to our process. */
this->CloseProcess();
}
}
/* Free any pending events. */
{
KScopedSchedulerLock sl;
while (!m_event_info_list.empty()) {
KEventInfo *info = std::addressof(m_event_info_list.front());
m_event_info_list.pop_front();
KEventInfo::Free(info);
}
}
}
bool KDebugBase::IsSignaled() const {
bool empty;
{
KScopedSchedulerLock sl;
empty = m_event_info_list.empty();
}
return !empty || !m_is_attached || this->GetProcessUnsafe()->IsTerminated();
}
Result KDebugBase::ProcessDebugEvent(ams::svc::DebugEvent event, const uintptr_t *params, size_t num_params) {
/* Get the current process. */
KProcess *process = GetCurrentProcessPointer();
/* If the event is CreateThread and we've already attached, there's nothing to do. */
if (event == ams::svc::DebugEvent_CreateThread) {
R_SUCCEED_IF(GetCurrentThread().IsAttachedToDebugger());
}
while (true) {
/* Lock the process and the scheduler. */
KScopedLightLock state_lk(process->GetStateLock());
KScopedLightLock list_lk(process->GetListLock());
KScopedSchedulerLock sl;
/* If the current thread is terminating, we can't process an event. */
R_SUCCEED_IF(GetCurrentThread().IsTerminationRequested());
/* Get the debug object. If we have none, there's nothing to process. */
KDebugBase *debug = GetDebugObject(process);
R_SUCCEED_IF(debug == nullptr);
/* If the event is an exception and we don't have exception events enabled, we can't handle the event. */
if (event == ams::svc::DebugEvent_Exception && (debug->m_continue_flags & ams::svc::ContinueFlag_EnableExceptionEvent) == 0) {
GetCurrentThread().SetDebugExceptionResult(ResultSuccess());
R_THROW(svc::ResultNotHandled());
}
/* If the current thread is suspended, retry. */
if (GetCurrentThread().IsSuspended()) {
continue;
}
/* Suspend all the process's threads. */
{
auto end = process->GetThreadList().end();
for (auto it = process->GetThreadList().begin(); it != end; ++it) {
it->RequestSuspend(KThread::SuspendType_Debug);
}
}
/* Push the event. */
debug->PushDebugEvent(event, params, num_params);
debug->NotifyAvailable();
/* Set the process as breaked. */
process->SetDebugBreak();
/* If the event is an exception, set the result and clear single step. */
if (event == ams::svc::DebugEvent_Exception) {
GetCurrentThread().SetDebugExceptionResult(ResultSuccess());
}
/* Exit our retry loop. */
break;
}
/* If the event is an exception, get the exception result. */
if (event == ams::svc::DebugEvent_Exception) {
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* If the thread is terminating, we can't process the exception. */
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultStopProcessingException());
/* Get the debug object. */
if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) {
/* If we have one, check the debug exception. */
R_RETURN(GetCurrentThread().GetDebugExceptionResult());
} else {
/* We don't have a debug object, so stop processing the exception. */
R_THROW(svc::ResultStopProcessingException());
}
}
R_SUCCEED();
}
Result KDebugBase::OnDebugEvent(ams::svc::DebugEvent event, const uintptr_t *params, size_t num_params) {
if (KProcess *process = GetCurrentProcessPointer(); process != nullptr && process->IsAttachedToDebugger()) {
R_RETURN(ProcessDebugEvent(event, params, num_params));
}
R_SUCCEED();
}
Result KDebugBase::OnExitProcess(KProcess *process) {
MESOSPHERE_ASSERT(process != nullptr);
/* Check if we're attached to a debugger. */
if (process->IsAttachedToDebugger()) {
/* If we are, lock the scheduler. */
KScopedSchedulerLock sl;
/* Push the event. */
if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) {
const uintptr_t params[1] = { static_cast<uintptr_t>(ams::svc::ProcessExitReason_ExitProcess) };
debug->PushDebugEvent(ams::svc::DebugEvent_ExitProcess, params, util::size(params));
debug->NotifyAvailable();
}
}
R_SUCCEED();
}
Result KDebugBase::OnTerminateProcess(KProcess *process) {
MESOSPHERE_ASSERT(process != nullptr);
/* Check if we're attached to a debugger. */
if (process->IsAttachedToDebugger()) {
/* If we are, lock the scheduler. */
KScopedSchedulerLock sl;
/* Push the event. */
if (KDebugBase *debug = GetDebugObject(process); debug != nullptr) {
const uintptr_t params[1] = { static_cast<uintptr_t>(ams::svc::ProcessExitReason_TerminateProcess) };
debug->PushDebugEvent(ams::svc::DebugEvent_ExitProcess, params, util::size(params));
debug->NotifyAvailable();
}
}
R_SUCCEED();
}
Result KDebugBase::OnExitThread(KThread *thread) {
MESOSPHERE_ASSERT(thread != nullptr);
/* Check if we're attached to a debugger. */
if (KProcess *process = thread->GetOwnerProcess(); process != nullptr && process->IsAttachedToDebugger()) {
/* If we are, submit the event. */
const uintptr_t params[2] = { thread->GetId(), static_cast<uintptr_t>(thread->IsTerminationRequested() ? ams::svc::ThreadExitReason_TerminateThread : ams::svc::ThreadExitReason_ExitThread) };
R_TRY(OnDebugEvent(ams::svc::DebugEvent_ExitThread, params, util::size(params)));
}
R_SUCCEED();
}
}
| 51,213
|
C++
|
.cpp
| 955
| 38.012565
| 203
| 0.53862
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,927
|
kern_k_capabilities.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_capabilities.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KCapabilities::Initialize(const u32 *caps, s32 num_caps, KProcessPageTable *page_table) {
/* We're initializing an initial process. */
m_svc_access_flags.Reset();
m_irq_access_flags.Reset();
m_debug_capabilities = {0};
m_handle_table_size = 0;
m_intended_kernel_version = {0};
m_program_type = 0;
/* Initial processes may run on all cores. */
constexpr u64 VirtMask = cpu::VirtualCoreMask;
constexpr u64 PhysMask = cpu::ConvertVirtualCoreMaskToPhysical(VirtMask);
m_core_mask = VirtMask;
m_phys_core_mask = PhysMask;
/* Initial processes may use any user priority they like. */
m_priority_mask = ~0xFul;
/* Here, Nintendo sets the kernel version to the current kernel version. */
/* We will follow suit and set the version to the highest supported kernel version. */
m_intended_kernel_version.Set<KernelVersion::MajorVersion>(ams::svc::SupportedKernelMajorVersion);
m_intended_kernel_version.Set<KernelVersion::MinorVersion>(ams::svc::SupportedKernelMinorVersion);
/* Parse the capabilities array. */
R_RETURN(this->SetCapabilities(caps, num_caps, page_table));
}
Result KCapabilities::Initialize(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table) {
/* We're initializing a user process. */
m_svc_access_flags.Reset();
m_irq_access_flags.Reset();
m_debug_capabilities = {0};
m_handle_table_size = 0;
m_intended_kernel_version = {0};
m_program_type = 0;
/* User processes must specify what cores/priorities they can use. */
m_core_mask = 0;
m_priority_mask = 0;
/* Parse the user capabilities array. */
R_RETURN(this->SetCapabilities(user_caps, num_caps, page_table));
}
Result KCapabilities::SetCorePriorityCapability(const util::BitPack32 cap) {
/* We can't set core/priority if we've already set them. */
R_UNLESS(m_core_mask == 0, svc::ResultInvalidArgument());
R_UNLESS(m_priority_mask == 0, svc::ResultInvalidArgument());
/* Validate the core/priority. */
const auto min_core = cap.Get<CorePriority::MinimumCoreId>();
const auto max_core = cap.Get<CorePriority::MaximumCoreId>();
const auto max_prio = cap.Get<CorePriority::LowestThreadPriority>();
const auto min_prio = cap.Get<CorePriority::HighestThreadPriority>();
R_UNLESS(min_core <= max_core, svc::ResultInvalidCombination());
R_UNLESS(min_prio <= max_prio, svc::ResultInvalidCombination());
R_UNLESS(max_core < cpu::NumVirtualCores, svc::ResultInvalidCoreId());
MESOSPHERE_ASSERT(max_prio < BITSIZEOF(u64));
/* Set core mask. */
for (auto core_id = min_core; core_id <= max_core; core_id++) {
m_core_mask |= (1ul << core_id);
}
MESOSPHERE_ASSERT((m_core_mask & cpu::VirtualCoreMask) == m_core_mask);
/* Set physical core mask. */
m_phys_core_mask = cpu::ConvertVirtualCoreMaskToPhysical(m_core_mask);
/* Set priority mask. */
for (auto prio = min_prio; prio <= max_prio; prio++) {
m_priority_mask |= (1ul << prio);
}
/* We must have some core/priority we can use. */
R_UNLESS(m_core_mask != 0, svc::ResultInvalidArgument());
R_UNLESS(m_priority_mask != 0, svc::ResultInvalidArgument());
/* Processes must not have access to kernel thread priorities. */
R_UNLESS((m_priority_mask & 0xF) == 0, svc::ResultInvalidArgument());
R_SUCCEED();
}
Result KCapabilities::SetSyscallMaskCapability(const util::BitPack32 cap, u32 &set_svc) {
/* Validate the index. */
const auto mask = cap.Get<SyscallMask::Mask>();
const auto index = cap.Get<SyscallMask::Index>();
const u32 index_flag = (1u << index);
R_UNLESS((set_svc & index_flag) == 0, svc::ResultInvalidCombination());
set_svc |= index_flag;
/* Set SVCs. */
for (size_t i = 0; i < SyscallMask::Mask::Count; i++) {
const u32 svc_id = SyscallMask::Mask::Count * index + i;
if (mask & (1u << i)) {
R_UNLESS(this->SetSvcAllowed(svc_id), svc::ResultOutOfRange());
}
}
R_SUCCEED();
}
Result KCapabilities::MapRange(const util::BitPack32 cap, const util::BitPack32 size_cap, KProcessPageTable *page_table) {
/* Get/validate address/size */
#if defined(MESOSPHERE_ENABLE_LARGE_PHYSICAL_ADDRESS_CAPABILITIES)
const u64 phys_addr = static_cast<u64>(cap.Get<MapRange::Address>() | (size_cap.Get<MapRangeSize::AddressHigh>() << MapRange::Address::Count)) * PageSize;
#else
const u64 phys_addr = static_cast<u64>(cap.Get<MapRange::Address>()) * PageSize;
/* Validate reserved bits are unused. */
R_UNLESS(size_cap.Get<MapRangeSize::Reserved>() == 0, svc::ResultOutOfRange());
#endif
const size_t num_pages = size_cap.Get<MapRangeSize::Pages>();
const size_t size = num_pages * PageSize;
R_UNLESS(phys_addr == GetInteger(KPhysicalAddress(phys_addr)), svc::ResultInvalidAddress());
R_UNLESS(num_pages != 0, svc::ResultInvalidSize());
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, svc::ResultInvalidAddress());
/* Do the mapping. */
const KMemoryPermission perm = cap.Get<MapRange::ReadOnly>() ? KMemoryPermission_UserRead : KMemoryPermission_UserReadWrite;
if (size_cap.Get<MapRangeSize::Normal>()) {
R_RETURN(page_table->MapStatic(phys_addr, size, perm));
} else {
R_RETURN(page_table->MapIo(phys_addr, size, perm));
}
}
Result KCapabilities::MapIoPage(const util::BitPack32 cap, KProcessPageTable *page_table) {
/* Get/validate address/size */
const u64 phys_addr = cap.Get<MapIoPage::Address>() * PageSize;
const size_t num_pages = 1;
const size_t size = num_pages * PageSize;
R_UNLESS(phys_addr == GetInteger(KPhysicalAddress(phys_addr)), svc::ResultInvalidAddress());
R_UNLESS(num_pages != 0, svc::ResultInvalidSize());
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
R_UNLESS(((phys_addr + size - 1) & ~PhysicalMapAllowedMask) == 0, svc::ResultInvalidAddress());
/* Do the mapping. */
R_RETURN(page_table->MapIo(phys_addr, size, KMemoryPermission_UserReadWrite));
}
template<typename F>
ALWAYS_INLINE Result KCapabilities::ProcessMapRegionCapability(const util::BitPack32 cap, F f) {
/* Define the allowed memory regions. */
constexpr const KMemoryRegionType MemoryRegions[] = {
KMemoryRegionType_None,
KMemoryRegionType_KernelTraceBuffer,
KMemoryRegionType_OnMemoryBootImage,
KMemoryRegionType_DTB,
};
/* Extract regions/read only. */
const RegionType types[3] = { cap.Get<MapRegion::Region0>(), cap.Get<MapRegion::Region1>(), cap.Get<MapRegion::Region2>(), };
const bool ro[3] = { cap.Get<MapRegion::ReadOnly0>(), cap.Get<MapRegion::ReadOnly1>(), cap.Get<MapRegion::ReadOnly2>(), };
for (size_t i = 0; i < util::size(types); i++) {
const auto type = types[i];
const auto perm = ro[i] ? KMemoryPermission_UserRead : KMemoryPermission_UserReadWrite;
switch (type) {
case RegionType::NoMapping:
break;
case RegionType::KernelTraceBuffer:
/* NOTE: This does not match official, but is used to make pre-processing hbl capabilities in userland unnecessary. */
/* If ktrace isn't enabled, allow ktrace to succeed without mapping anything. */
if constexpr (!ams::kern::IsKTraceEnabled) {
break;
}
case RegionType::OnMemoryBootImage:
case RegionType::DTB:
R_TRY(f(MemoryRegions[static_cast<u32>(type)], perm));
break;
default:
R_THROW(svc::ResultNotFound());
}
}
R_SUCCEED();
}
Result KCapabilities::MapRegion(const util::BitPack32 cap, KProcessPageTable *page_table) {
/* Map each region into the process's page table. */
R_RETURN(ProcessMapRegionCapability(cap, [page_table] ALWAYS_INLINE_LAMBDA (KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
R_RETURN(page_table->MapRegion(region_type, perm));
}));
}
Result KCapabilities::CheckMapRegion(const util::BitPack32 cap) {
/* Check that each region has a physical backing store. */
R_RETURN(ProcessMapRegionCapability(cap, [] ALWAYS_INLINE_LAMBDA (KMemoryRegionType region_type, KMemoryPermission perm) -> Result {
MESOSPHERE_UNUSED(perm);
R_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(region_type) != nullptr, svc::ResultOutOfRange());
R_SUCCEED();
}));
}
Result KCapabilities::SetInterruptPairCapability(const util::BitPack32 cap) {
/* Extract interrupts. */
const u32 ids[2] = { cap.Get<InterruptPair::InterruptId0>(), cap.Get<InterruptPair::InterruptId1>(), };
for (size_t i = 0; i < util::size(ids); i++) {
if (ids[i] != PaddingInterruptId) {
R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(ids[i]), svc::ResultOutOfRange());
R_UNLESS(this->SetInterruptPermitted(ids[i]), svc::ResultOutOfRange());
}
}
R_SUCCEED();
}
Result KCapabilities::SetProgramTypeCapability(const util::BitPack32 cap) {
/* Validate. */
R_UNLESS(cap.Get<ProgramType::Reserved>() == 0, svc::ResultReservedUsed());
m_program_type = cap.Get<ProgramType::Type>();
R_SUCCEED();
}
Result KCapabilities::SetKernelVersionCapability(const util::BitPack32 cap) {
/* Ensure we haven't set our version before. */
R_UNLESS(m_intended_kernel_version.Get<KernelVersion::MajorVersion>() == 0, svc::ResultInvalidArgument());
/* Set, ensure that we set a valid version. */
m_intended_kernel_version = cap;
R_UNLESS(m_intended_kernel_version.Get<KernelVersion::MajorVersion>() != 0, svc::ResultInvalidArgument());
R_SUCCEED();
}
Result KCapabilities::SetHandleTableCapability(const util::BitPack32 cap) {
/* Validate. */
R_UNLESS(cap.Get<HandleTable::Reserved>() == 0, svc::ResultReservedUsed());
m_handle_table_size = cap.Get<HandleTable::Size>();
R_SUCCEED();
}
Result KCapabilities::SetDebugFlagsCapability(const util::BitPack32 cap) {
/* Validate. */
R_UNLESS(cap.Get<DebugFlags::Reserved>() == 0, svc::ResultReservedUsed());
u32 total = 0;
if (cap.Get<DebugFlags::AllowDebug>()) { ++total; }
if (cap.Get<DebugFlags::ForceDebugProd>()) { ++total; }
if (cap.Get<DebugFlags::ForceDebug>()) { ++total; }
R_UNLESS(total <= 1, svc::ResultInvalidCombination());
m_debug_capabilities.Set<DebugFlags::AllowDebug>(cap.Get<DebugFlags::AllowDebug>());
m_debug_capabilities.Set<DebugFlags::ForceDebugProd>(cap.Get<DebugFlags::ForceDebugProd>());
m_debug_capabilities.Set<DebugFlags::ForceDebug>(cap.Get<DebugFlags::ForceDebug>());
R_SUCCEED();
}
Result KCapabilities::SetCapability(const util::BitPack32 cap, u32 &set_flags, u32 &set_svc, KProcessPageTable *page_table) {
/* Validate this is a capability we can act on. */
const auto type = GetCapabilityType(cap);
R_UNLESS(type != CapabilityType::Invalid, svc::ResultInvalidArgument());
/* If the type is padding, we have no work to do. */
R_SUCCEED_IF(type == CapabilityType::Padding);
/* Check that we haven't already processed this capability. */
const auto flag = GetCapabilityFlag(type);
R_UNLESS(((set_flags & InitializeOnceFlags) & flag) == 0, svc::ResultInvalidCombination());
set_flags |= flag;
/* Process the capability. */
switch (type) {
case CapabilityType::CorePriority: R_RETURN(this->SetCorePriorityCapability(cap));
case CapabilityType::SyscallMask: R_RETURN(this->SetSyscallMaskCapability(cap, set_svc));
case CapabilityType::MapIoPage: R_RETURN(this->MapIoPage(cap, page_table));
case CapabilityType::MapRegion: R_RETURN(this->MapRegion(cap, page_table));
case CapabilityType::InterruptPair: R_RETURN(this->SetInterruptPairCapability(cap));
case CapabilityType::ProgramType: R_RETURN(this->SetProgramTypeCapability(cap));
case CapabilityType::KernelVersion: R_RETURN(this->SetKernelVersionCapability(cap));
case CapabilityType::HandleTable: R_RETURN(this->SetHandleTableCapability(cap));
case CapabilityType::DebugFlags: R_RETURN(this->SetDebugFlagsCapability(cap));
default: R_THROW(svc::ResultInvalidArgument());
}
}
Result KCapabilities::SetCapabilities(const u32 *caps, s32 num_caps, KProcessPageTable *page_table) {
u32 set_flags = 0, set_svc = 0;
for (s32 i = 0; i < num_caps; i++) {
const util::BitPack32 cap = { caps[i] };
if (GetCapabilityType(cap) == CapabilityType::MapRange) {
/* Check that the pair cap exists. */
R_UNLESS((++i) < num_caps, svc::ResultInvalidCombination());
/* Check the pair cap is a map range cap. */
const util::BitPack32 size_cap = { caps[i] };
R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange, svc::ResultInvalidCombination());
/* Map the range. */
R_TRY(this->MapRange(cap, size_cap, page_table));
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
}
R_SUCCEED();
}
Result KCapabilities::SetCapabilities(svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KProcessPageTable *page_table) {
u32 set_flags = 0, set_svc = 0;
for (s32 i = 0; i < num_caps; i++) {
/* Read the cap from userspace. */
u32 cap0;
R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap0), i));
const util::BitPack32 cap = { cap0 };
if (GetCapabilityType(cap) == CapabilityType::MapRange) {
/* Check that the pair cap exists. */
R_UNLESS((++i) < num_caps, svc::ResultInvalidCombination());
/* Read the second cap from userspace. */
u32 cap1;
R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap1), i));
/* Check the pair cap is a map range cap. */
const util::BitPack32 size_cap = { cap1 };
R_UNLESS(GetCapabilityType(size_cap) == CapabilityType::MapRange, svc::ResultInvalidCombination());
/* Map the range. */
R_TRY(this->MapRange(cap, size_cap, page_table));
} else {
R_TRY(this->SetCapability(cap, set_flags, set_svc, page_table));
}
}
R_SUCCEED();
}
Result KCapabilities::CheckCapabilities(svc::KUserPointer<const u32 *> user_caps, s32 num_caps) {
for (s32 i = 0; i < num_caps; ++i) {
/* Read the cap from userspace. */
u32 cap0;
R_TRY(user_caps.CopyArrayElementTo(std::addressof(cap0), i));
/* Check the capability refers to a valid region. */
const util::BitPack32 cap = { cap0 };
if (GetCapabilityType(cap) == CapabilityType::MapRegion) {
R_TRY(CheckMapRegion(cap));
}
}
R_SUCCEED();
}
}
| 17,193
|
C++
|
.cpp
| 307
| 45.944625
| 165
| 0.614378
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,928
|
kern_k_wait_object.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_wait_object.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class ThreadQueueImplForKWaitObjectSynchronize final : public KThreadQueueWithoutEndWait {
private:
KThread::WaiterList *m_wait_list;
KThread **m_thread;
public:
constexpr ThreadQueueImplForKWaitObjectSynchronize(KThread::WaiterList *wl, KThread **t) : KThreadQueueWithoutEndWait(), m_wait_list(wl), m_thread(t) { /* ... */ }
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Remove the thread from the wait list. */
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
/* If the result was a timeout and the thread is our wait object thread, cancel recursively. */
if (svc::ResultTimedOut::Includes(wait_result) && waiting_thread == *m_thread) {
for (auto &thread : *m_wait_list) {
thread.CancelWait(svc::ResultTimedOut(), false);
}
}
/* If the thread is our wait object thread, clear it. */
if (*m_thread == waiting_thread) {
*m_thread = nullptr;
}
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
Result KWaitObject::Synchronize(s64 timeout) {
/* Perform the wait. */
KHardwareTimer *timer;
KThread *cur_thread = GetCurrentThreadPointer();
ThreadQueueImplForKWaitObjectSynchronize wait_queue(std::addressof(m_wait_list), std::addressof(m_next_thread));
{
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
/* Check that the thread isn't terminating. */
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Handle the case where timeout is non-negative/infinite. */
if (timeout >= 0) {
/* Check if we're already waiting. */
if (m_next_thread != nullptr) {
slp.CancelSleep();
R_THROW(svc::ResultBusy());
}
/* If timeout is zero, handle the special case by canceling all waiting threads. */
if (timeout == 0) {
for (auto &thread : m_wait_list) {
thread.CancelWait(svc::ResultTimedOut(), false);
}
slp.CancelSleep();
R_SUCCEED();
}
}
/* If the timeout isn't infinite, register it as our next timeout. */
if (timeout > 0) {
wait_queue.SetHardwareTimer(timer);
m_next_thread = cur_thread;
}
/* Add the current thread to our wait list. */
m_wait_list.push_back(*cur_thread);
/* Wait until the timeout occurs. */
cur_thread->BeginWait(std::addressof(wait_queue));
}
R_SUCCEED();
}
}
| 3,984
|
C++
|
.cpp
| 83
| 34.987952
| 179
| 0.56543
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,929
|
kern_k_thread_local_page.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_thread_local_page.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KThreadLocalPage::Initialize(KProcess *process) {
MESOSPHERE_ASSERT_THIS();
/* Set that this process owns us. */
m_owner = process;
/* Allocate a new page. */
KPageBuffer *page_buf = KPageBuffer::AllocateChecked<PageSize>();
R_UNLESS(page_buf != nullptr, svc::ResultOutOfMemory());
ON_RESULT_FAILURE { KPageBuffer::Free(page_buf); };
/* Map the address in. */
R_RETURN(m_owner->GetPageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite));
}
Result KThreadLocalPage::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Get the physical address of the page. */
KPhysicalAddress phys_addr = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), this->GetAddress()));
/* Unmap the page. */
R_TRY(m_owner->GetPageTable().UnmapPages(this->GetAddress(), 1, KMemoryState_ThreadLocal));
/* Free the page. */
KPageBuffer::FreeChecked<PageSize>(KPageBuffer::FromPhysicalAddress(phys_addr));
R_SUCCEED();
}
KProcessAddress KThreadLocalPage::Reserve() {
MESOSPHERE_ASSERT_THIS();
for (size_t i = 0; i < util::size(m_is_region_free); i++) {
if (m_is_region_free[i]) {
m_is_region_free[i] = false;
return this->GetRegionAddress(i);
}
}
return Null<KProcessAddress>;
}
void KThreadLocalPage::Release(KProcessAddress addr) {
MESOSPHERE_ASSERT_THIS();
m_is_region_free[this->GetRegionIndex(addr)] = true;
}
void *KThreadLocalPage::GetPointer() const {
MESOSPHERE_ASSERT_THIS();
KPhysicalAddress phys_addr;
MESOSPHERE_ABORT_UNLESS(m_owner->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), this->GetAddress()));
return static_cast<void *>(KPageBuffer::FromPhysicalAddress(phys_addr));
}
}
| 2,760
|
C++
|
.cpp
| 60
| 39.15
| 184
| 0.673127
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,930
|
kern_k_process.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_process.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr u64 InitialProcessIdMin = 1;
constexpr u64 InitialProcessIdMax = 0x50;
constexpr u64 ProcessIdMin = InitialProcessIdMax + 1;
constexpr u64 ProcessIdMax = std::numeric_limits<u64>::max();
constinit util::Atomic<u64> g_initial_process_id = InitialProcessIdMin;
constinit util::Atomic<u64> g_process_id = ProcessIdMin;
Result TerminateChildren(KProcess *process, const KThread *thread_to_not_terminate) {
/* Request that all children threads terminate. */
{
KScopedLightLock proc_lk(process->GetListLock());
KScopedSchedulerLock sl;
if (thread_to_not_terminate != nullptr && process->GetPinnedThread(GetCurrentCoreId()) == thread_to_not_terminate) {
/* NOTE: Here Nintendo unpins the current thread instead of the thread_to_not_terminate. */
/* This is valid because the only caller which uses non-nullptr as argument uses GetCurrentThreadPointer(), */
/* but it's still notable because it seems incorrect at first glance. */
process->UnpinCurrentThread();
}
auto &thread_list = process->GetThreadList();
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
if (thread->GetState() != KThread::ThreadState_Terminated) {
thread->RequestTerminate();
}
}
}
}
/* Wait for all children threads to terminate. */
while (true) {
/* Get the next child. */
KThread *cur_child = nullptr;
{
KScopedLightLock proc_lk(process->GetListLock());
auto &thread_list = process->GetThreadList();
for (auto it = thread_list.begin(); it != thread_list.end(); ++it) {
if (KThread *thread = std::addressof(*it); thread != thread_to_not_terminate) {
if (thread->GetState() != KThread::ThreadState_Terminated) {
if (AMS_LIKELY(thread->Open())) {
cur_child = thread;
break;
}
}
}
}
}
/* If we didn't find any non-terminated children, we're done. */
if (cur_child == nullptr) {
break;
}
/* Terminate and close the thread. */
ON_SCOPE_EXIT { cur_child->Close(); };
if (const Result terminate_result = cur_child->Terminate(); svc::ResultTerminationRequested::Includes(terminate_result)) {
R_THROW(terminate_result);
}
}
R_SUCCEED();
}
class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
private:
KThread **m_exception_thread;
public:
constexpr ThreadQueueImplForKProcessEnterUserException(KThread **t) : KThreadQueue(), m_exception_thread(t) { /* ... */ }
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
/* Set the exception thread. */
*m_exception_thread = waiting_thread;
/* Invoke the base end wait handler. */
KThreadQueue::EndWait(waiting_thread, wait_result);
}
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Remove the thread as a waiter on its mutex owner. */
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
void KProcess::Finalize() {
/* Delete the process local region. */
this->DeleteThreadLocalRegion(m_plr_address);
/* Get the used memory size. */
const size_t used_memory_size = this->GetUsedNonSystemUserPhysicalMemorySize();
/* Finalize the page table. */
m_page_table.Finalize();
/* Finish using our system resource. */
{
if (m_system_resource->IsSecureResource()) {
/* Finalize optimized memory. If memory wasn't optimized, this is a no-op. */
Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
}
m_system_resource->Close();
}
/* Free all shared memory infos. */
{
auto it = m_shared_memory_list.begin();
while (it != m_shared_memory_list.end()) {
KSharedMemoryInfo *info = std::addressof(*it);
KSharedMemory *shmem = info->GetSharedMemory();
while (!info->Close()) {
shmem->Close();
}
shmem->Close();
it = m_shared_memory_list.erase(it);
KSharedMemoryInfo::Free(info);
}
}
/* Close all references to our io regions. */
{
auto it = m_io_region_list.begin();
while (it != m_io_region_list.end()) {
KIoRegion *io_region = std::addressof(*it);
it = m_io_region_list.erase(it);
io_region->Close();
}
}
/* Our thread local page list must be empty at this point. */
MESOSPHERE_ABORT_UNLESS(m_partially_used_tlp_tree.empty());
MESOSPHERE_ABORT_UNLESS(m_fully_used_tlp_tree.empty());
/* Release memory to the resource limit. */
if (m_resource_limit != nullptr) {
MESOSPHERE_ABORT_UNLESS(used_memory_size >= m_memory_release_hint);
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - m_memory_release_hint);
m_resource_limit->Close();
}
/* Log that we finalized for debug. */
MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", m_process_id, m_name);
/* Perform inherited finalization. */
KSynchronizationObject::Finalize();
}
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) {
/* Validate that the intended kernel version is high enough for us to support. */
R_UNLESS(m_capabilities.GetIntendedKernelVersion() >= ams::svc::RequiredKernelVersion, svc::ResultInvalidCombination());
/* Validate that the intended kernel version isn't too high for us to support. */
R_UNLESS(m_capabilities.GetIntendedKernelVersion() <= ams::svc::SupportedKernelVersion, svc::ResultInvalidCombination());
/* Create and clear the process local region. */
R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
m_plr_heap_address = this->GetThreadLocalRegionPointer(m_plr_address);
std::memset(m_plr_heap_address, 0, ams::svc::ThreadLocalRegionSize);
/* Copy in the name from parameters. */
static_assert(sizeof(params.name) < sizeof(m_name));
std::memcpy(m_name, params.name, sizeof(params.name));
m_name[sizeof(params.name)] = 0;
/* Set misc fields. */
m_state = State_Created;
m_main_thread_stack_size = 0;
m_used_kernel_memory_size = 0;
m_ideal_core_id = 0;
m_flags = params.flags;
m_version = params.version;
m_program_id = params.program_id;
m_code_address = params.code_address;
m_code_size = params.code_num_pages * PageSize;
m_is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
m_is_jit_debug = false;
#if defined(MESOSPHERE_ENABLE_PROCESS_CREATION_TIME)
m_creation_time = KHardwareTimer::GetTick();
#endif
/* Set thread fields. */
for (size_t i = 0; i < cpu::NumCores; i++) {
m_running_threads[i] = nullptr;
m_pinned_threads[i] = nullptr;
m_running_thread_idle_counts[i] = 0;
m_running_thread_switch_counts[i] = 0;
}
/* Set max memory. */
m_max_process_memory = m_page_table.GetHeapRegionSize();
/* Generate random entropy. */
KSystemControl::GenerateRandom(m_entropy, util::size(m_entropy));
/* Clear remaining fields. */
m_num_running_threads = 0;
m_num_process_switches = 0;
m_num_thread_switches = 0;
m_num_fpu_switches = 0;
m_num_supervisor_calls = 0;
m_num_ipc_messages = 0;
m_is_signaled = false;
m_attached_object = nullptr;
m_exception_thread = nullptr;
m_is_suspended = false;
m_memory_release_hint = 0;
m_schedule_count = 0;
m_is_handle_table_initialized = false;
/* We're initialized! */
m_is_initialized = true;
R_SUCCEED();
}
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool, bool immortal) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(res_limit != nullptr);
MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast<size_t>(params.code_num_pages));
/* Set members. */
m_memory_pool = pool;
m_resource_limit = res_limit;
m_is_default_application_system_resource = false;
m_is_immortal = immortal;
/* Setup our system resource. */
if (const size_t system_resource_num_pages = params.system_resource_num_pages; system_resource_num_pages != 0) {
/* Create a secure system resource. */
KSecureSystemResource *secure_resource = KSecureSystemResource::Create();
R_UNLESS(secure_resource != nullptr, svc::ResultOutOfResource());
ON_RESULT_FAILURE { secure_resource->Close(); };
/* Initialize the secure resource. */
R_TRY(secure_resource->Initialize(system_resource_num_pages * PageSize, m_resource_limit, m_memory_pool));
/* Set our system resource. */
m_system_resource = secure_resource;
} else {
/* Use the system-wide system resource. */
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_default_application_system_resource = is_app;
/* Open reference to the system resource. */
m_system_resource->Open();
}
/* Ensure we clean up our secure resource, if we fail. */
ON_RESULT_FAILURE { m_system_resource->Close(); };
/* Setup page table. */
{
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, params.code_num_pages * PageSize, m_system_resource, res_limit, this->GetSlabIndex()));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
/* Ensure we can insert the code region. */
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
/* Map the code region. */
R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead));
/* Initialize capabilities. */
R_TRY(m_capabilities.Initialize(caps, num_caps, std::addressof(m_page_table)));
/* Initialize the process id. */
m_process_id = g_initial_process_id++;
MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= m_process_id);
MESOSPHERE_ABORT_UNLESS(m_process_id <= InitialProcessIdMax);
/* Initialize the rest of the process. */
R_TRY(this->Initialize(params));
/* Open a reference to the resource limit. */
m_resource_limit->Open();
/* We succeeded! */
R_SUCCEED();
}
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms, svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(res_limit != nullptr);
/* Set pool and resource limit. */
m_memory_pool = pool;
m_resource_limit = res_limit;
m_is_default_application_system_resource = false;
m_is_immortal = false;
/* Get the memory sizes. */
const size_t code_num_pages = params.code_num_pages;
const size_t system_resource_num_pages = params.system_resource_num_pages;
const size_t code_size = code_num_pages * PageSize;
const size_t system_resource_size = system_resource_num_pages * PageSize;
/* Reserve memory for our code resource. */
KScopedResourceReservation memory_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, code_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Setup our system resource. */
if (system_resource_num_pages != 0) {
/* Create a secure system resource. */
KSecureSystemResource *secure_resource = KSecureSystemResource::Create();
R_UNLESS(secure_resource != nullptr, svc::ResultOutOfResource());
ON_RESULT_FAILURE { secure_resource->Close(); };
/* Initialize the secure resource. */
R_TRY(secure_resource->Initialize(system_resource_size, m_resource_limit, m_memory_pool));
/* Set our system resource. */
m_system_resource = secure_resource;
} else {
/* Use the system-wide system resource. */
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
m_system_resource = std::addressof(is_app ? Kernel::GetApplicationSystemResource() : Kernel::GetSystemSystemResource());
m_is_default_application_system_resource = is_app;
/* Open reference to the system resource. */
m_system_resource->Open();
}
/* Ensure we clean up our secure resource, if we fail. */
ON_RESULT_FAILURE { m_system_resource->Close(); };
/* Setup page table. */
{
const bool from_back = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) == 0;
R_TRY(m_page_table.Initialize(static_cast<ams::svc::CreateProcessFlag>(params.flags), from_back, pool, params.code_address, code_size, m_system_resource, res_limit, this->GetSlabIndex()));
}
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
/* Ensure we can insert the code region. */
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
/* Map the code region. */
R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped)));
/* Initialize capabilities. */
R_TRY(m_capabilities.Initialize(user_caps, num_caps, std::addressof(m_page_table)));
/* Initialize the process id. */
m_process_id = g_process_id++;
MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= m_process_id);
MESOSPHERE_ABORT_UNLESS(m_process_id <= ProcessIdMax);
/* If we should optimize memory allocations, do so. */
if (m_system_resource->IsSecureResource() && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) {
R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(m_process_id, pool));
}
/* Initialize the rest of the process. */
R_TRY(this->Initialize(params));
/* Open a reference to the resource limit. */
m_resource_limit->Open();
/* We succeeded, so commit our memory reservation. */
memory_reservation.Commit();
R_SUCCEED();
}
void KProcess::DoWorkerTaskImpl() {
/* Terminate child threads. */
TerminateChildren(this, nullptr);
/* Finalize the handle table, if we're not immortal. */
if (!m_is_immortal && m_is_handle_table_initialized) {
this->FinalizeHandleTable();
}
/* Call the debug callback. */
KDebug::OnExitProcess(this);
/* Finish termination. */
this->FinishTermination();
}
Result KProcess::StartTermination() {
/* Finalize the handle table when we're done, if the process isn't immortal. */
ON_SCOPE_EXIT {
if (!m_is_immortal) {
this->FinalizeHandleTable();
}
};
/* Terminate child threads other than the current one. */
R_RETURN(TerminateChildren(this, GetCurrentThreadPointer()));
}
void KProcess::FinishTermination() {
/* Only allow termination to occur if the process isn't immortal. */
if (!m_is_immortal) {
/* Release resource limit hint. */
if (m_resource_limit != nullptr) {
m_memory_release_hint = this->GetUsedNonSystemUserPhysicalMemorySize();
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, m_memory_release_hint);
}
/* Change state. */
{
KScopedSchedulerLock sl;
this->ChangeState(State_Terminated);
}
/* Close. */
this->Close();
}
}
void KProcess::Exit() {
MESOSPHERE_ASSERT_THIS();
/* Determine whether we need to start terminating. */
bool needs_terminate = false;
{
KScopedLightLock lk(m_state_lock);
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(m_state != State_Created);
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
MESOSPHERE_ASSERT(m_state != State_Crashed);
MESOSPHERE_ASSERT(m_state != State_Terminated);
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_DebugBreak) {
this->ChangeState(State_Terminating);
needs_terminate = true;
}
}
/* If we need to start termination, do so. */
if (needs_terminate) {
this->StartTermination();
/* Note for debug that we're exiting the process. */
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", m_process_id, m_name);
/* Register the process as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
}
/* Exit the current thread. */
GetCurrentThread().Exit();
MESOSPHERE_PANIC("Thread survived call to exit");
}
Result KProcess::Terminate() {
MESOSPHERE_ASSERT_THIS();
/* Determine whether we need to start terminating */
bool needs_terminate = false;
{
KScopedLightLock lk(m_state_lock);
/* Check whether we're allowed to terminate. */
R_UNLESS(m_state != State_Created, svc::ResultInvalidState());
R_UNLESS(m_state != State_CreatedAttached, svc::ResultInvalidState());
KScopedSchedulerLock sl;
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
this->ChangeState(State_Terminating);
needs_terminate = true;
}
}
/* If we need to terminate, do so. */
if (needs_terminate) {
/* Start termination. */
if (R_SUCCEEDED(this->StartTermination())) {
/* Note for debug that we're terminating the process. */
MESOSPHERE_LOG("KProcess::Terminate() OK pid=%ld name=%-12s\n", m_process_id, m_name);
/* Call the debug callback. */
KDebug::OnTerminateProcess(this);
/* Finish termination. */
this->FinishTermination();
} else {
/* Note for debug that we're terminating the process. */
MESOSPHERE_LOG("KProcess::Terminate() FAIL pid=%ld name=%-12s\n", m_process_id, m_name);
/* Register the process as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitProcess, this);
}
}
R_SUCCEED();
}
Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
/* Lock ourselves, to prevent concurrent access. */
KScopedLightLock lk(m_state_lock);
/* Address and size parameters aren't used. */
MESOSPHERE_UNUSED(address, size);
/* Try to find an existing info for the memory. */
KSharedMemoryInfo *info = nullptr;
for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
if (it->GetSharedMemory() == shmem) {
info = std::addressof(*it);
break;
}
}
/* If we didn't find an info, create one. */
if (info == nullptr) {
/* Allocate a new info. */
info = KSharedMemoryInfo::Allocate();
R_UNLESS(info != nullptr, svc::ResultOutOfResource());
/* Initialize the info and add it to our list. */
info->Initialize(shmem);
m_shared_memory_list.push_back(*info);
}
/* Open a reference to the shared memory and its info. */
shmem->Open();
info->Open();
R_SUCCEED();
}
void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
/* Lock ourselves, to prevent concurrent access. */
KScopedLightLock lk(m_state_lock);
/* Address and size parameters aren't used. */
MESOSPHERE_UNUSED(address, size);
/* Find an existing info for the memory. */
KSharedMemoryInfo *info = nullptr;
auto it = m_shared_memory_list.begin();
for (/* ... */; it != m_shared_memory_list.end(); ++it) {
if (it->GetSharedMemory() == shmem) {
info = std::addressof(*it);
break;
}
}
MESOSPHERE_ABORT_UNLESS(info != nullptr);
/* Close a reference to the info and its memory. */
if (info->Close()) {
m_shared_memory_list.erase(it);
KSharedMemoryInfo::Free(info);
}
shmem->Close();
}
void KProcess::AddIoRegion(KIoRegion *io_region) {
/* Lock ourselves, to prevent concurrent access. */
KScopedLightLock lk(m_state_lock);
/* Open a reference to the region. */
io_region->Open();
/* Add the region to our list. */
m_io_region_list.push_back(*io_region);
}
void KProcess::RemoveIoRegion(KIoRegion *io_region) {
/* Remove the region from our list. */
{
/* Lock ourselves, to prevent concurrent access. */
KScopedLightLock lk(m_state_lock);
/* Remove the region from our list. */
m_io_region_list.erase(m_io_region_list.iterator_to(*io_region));
}
/* Close our reference to the io region. */
io_region->Close();
}
Result KProcess::CreateThreadLocalRegion(KProcessAddress *out) {
KThreadLocalPage *tlp = nullptr;
KProcessAddress tlr = Null<KProcessAddress>;
/* See if we can get a region from a partially used TLP. */
{
KScopedSchedulerLock sl;
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
tlr = it->Reserve();
MESOSPHERE_ABORT_UNLESS(tlr != Null<KProcessAddress>);
if (it->IsAllUsed()) {
tlp = std::addressof(*it);
m_partially_used_tlp_tree.erase(it);
m_fully_used_tlp_tree.insert(*tlp);
}
*out = tlr;
R_SUCCEED();
}
}
/* Allocate a new page. */
tlp = KThreadLocalPage::Allocate();
R_UNLESS(tlp != nullptr, svc::ResultOutOfMemory());
ON_RESULT_FAILURE { KThreadLocalPage::Free(tlp); };
/* Initialize the new page. */
R_TRY(tlp->Initialize(this));
/* Reserve a TLR. */
tlr = tlp->Reserve();
MESOSPHERE_ABORT_UNLESS(tlr != Null<KProcessAddress>);
/* Insert into our tree. */
{
KScopedSchedulerLock sl;
if (tlp->IsAllUsed()) {
m_fully_used_tlp_tree.insert(*tlp);
} else {
m_partially_used_tlp_tree.insert(*tlp);
}
}
/* We succeeded! */
*out = tlr;
R_SUCCEED();
}
Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
KThreadLocalPage *page_to_free = nullptr;
/* Release the region. */
{
KScopedSchedulerLock sl;
/* Try to find the page in the partially used list. */
auto it = m_partially_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize));
if (it == m_partially_used_tlp_tree.end()) {
/* If we don't find it, it has to be in the fully used list. */
it = m_fully_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize));
R_UNLESS(it != m_fully_used_tlp_tree.end(), svc::ResultInvalidAddress());
/* Release the region. */
it->Release(addr);
/* Move the page out of the fully used list. */
KThreadLocalPage *tlp = std::addressof(*it);
m_fully_used_tlp_tree.erase(it);
if (tlp->IsAllFree()) {
page_to_free = tlp;
} else {
m_partially_used_tlp_tree.insert(*tlp);
}
} else {
/* Release the region. */
it->Release(addr);
/* Handle the all-free case. */
KThreadLocalPage *tlp = std::addressof(*it);
if (tlp->IsAllFree()) {
m_partially_used_tlp_tree.erase(it);
page_to_free = tlp;
}
}
}
/* If we should free the page it was in, do so. */
if (page_to_free != nullptr) {
page_to_free->Finalize();
KThreadLocalPage::Free(page_to_free);
}
R_SUCCEED();
}
void *KProcess::GetThreadLocalRegionPointer(KProcessAddress addr) {
KThreadLocalPage *tlp = nullptr;
{
KScopedSchedulerLock sl;
if (auto it = m_partially_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize)); it != m_partially_used_tlp_tree.end()) {
tlp = std::addressof(*it);
} else if (auto it = m_fully_used_tlp_tree.find_key(util::AlignDown(GetInteger(addr), PageSize)); it != m_fully_used_tlp_tree.end()) {
tlp = std::addressof(*it);
} else {
return nullptr;
}
}
return static_cast<u8 *>(tlp->GetPointer()) + (GetInteger(addr) & (PageSize - 1));
}
bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value) {
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
return rl->Reserve(which, value);
} else {
return true;
}
}
bool KProcess::ReserveResource(ams::svc::LimitableResource which, s64 value, s64 timeout) {
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
return rl->Reserve(which, value, timeout);
} else {
return true;
}
}
void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value) {
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
rl->Release(which, value);
}
}
void KProcess::ReleaseResource(ams::svc::LimitableResource which, s64 value, s64 hint) {
if (KResourceLimit *rl = this->GetResourceLimit(); rl != nullptr) {
rl->Release(which, value, hint);
}
}
void KProcess::IncrementRunningThreadCount() {
MESOSPHERE_ASSERT(m_num_running_threads.Load() >= 0);
++m_num_running_threads;
}
void KProcess::DecrementRunningThreadCount() {
MESOSPHERE_ASSERT(m_num_running_threads.Load() > 0);
if (const auto prev = m_num_running_threads--; prev == 1) {
this->Terminate();
}
}
bool KProcess::EnterUserException() {
/* Get the current thread. */
KThread *cur_thread = GetCurrentThreadPointer();
MESOSPHERE_ASSERT(this == cur_thread->GetOwnerProcess());
/* Check that we haven't already claimed the exception thread. */
if (m_exception_thread == cur_thread) {
return false;
}
/* Create the wait queue we'll be using. */
ThreadQueueImplForKProcessEnterUserException wait_queue(std::addressof(m_exception_thread));
/* Claim the exception thread. */
{
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Check that we're not terminating. */
if (cur_thread->IsTerminationRequested()) {
return false;
}
/* If we don't have an exception thread, we can just claim it directly. */
if (m_exception_thread == nullptr) {
m_exception_thread = cur_thread;
KScheduler::SetSchedulerUpdateNeeded();
return true;
}
/* Otherwise, we need to wait until we don't have an exception thread. */
/* Add the current thread as a waiter on the current exception thread. */
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1);
m_exception_thread->AddWaiter(cur_thread);
/* Wait to claim the exception thread. */
cur_thread->BeginWait(std::addressof(wait_queue));
}
/* If our wait didn't end due to thread termination, we succeeded. */
return !svc::ResultTerminationRequested::Includes(cur_thread->GetWaitResult());
}
bool KProcess::LeaveUserException() {
return this->ReleaseUserException(GetCurrentThreadPointer());
}
bool KProcess::ReleaseUserException(KThread *thread) {
KScopedSchedulerLock sl;
if (m_exception_thread == thread) {
m_exception_thread = nullptr;
/* Remove waiter thread. */
bool has_waiters;
if (KThread *next = thread->RemoveWaiterByKey(std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)) | 1); next != nullptr) {
next->EndWait(ResultSuccess());
}
KScheduler::SetSchedulerUpdateNeeded();
return true;
} else {
return false;
}
}
void KProcess::RegisterThread(KThread *thread) {
KScopedLightLock lk(m_list_lock);
m_thread_list.push_back(*thread);
}
void KProcess::UnregisterThread(KThread *thread) {
KScopedLightLock lk(m_list_lock);
m_thread_list.erase(m_thread_list.iterator_to(*thread));
}
size_t KProcess::GetUsedUserPhysicalMemorySize() const {
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySizeNonDefault();
return norm_size + other_size + sec_size;
}
size_t KProcess::GetTotalUserPhysicalMemorySize() const {
/* Get the amount of free and used size. */
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
const size_t max_size = m_max_process_memory;
/* Determine used size. */
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySize();
const size_t used_size = norm_size + other_size + sec_size;
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
if (used_size + free_size > max_size) {
return max_size;
} else {
return free_size + this->GetUsedUserPhysicalMemorySize();
}
}
size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
return norm_size + other_size;
}
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
/* Get the amount of free and used size. */
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
const size_t max_size = m_max_process_memory;
/* Determine used size. */
/* NOTE: This does *not* check this->IsDefaultApplicationSystemResource(), unlike GetUsedUserPhysicalMemorySize(). */
const size_t norm_size = m_page_table.GetNormalMemorySize();
const size_t other_size = m_code_size + m_main_thread_stack_size;
const size_t sec_size = this->GetRequiredSecureMemorySize();
const size_t used_size = norm_size + other_size + sec_size;
/* NOTE: These function calls will recalculate, introducing a race...it is unclear why Nintendo does it this way. */
if (used_size + free_size > max_size) {
return max_size - this->GetRequiredSecureMemorySizeNonDefault();
} else {
return free_size + this->GetUsedNonSystemUserPhysicalMemorySize();
}
}
Result KProcess::Run(s32 priority, size_t stack_size) {
MESOSPHERE_ASSERT_THIS();
/* Lock ourselves, to prevent concurrent access. */
KScopedLightLock lk(m_state_lock);
/* Validate that we're in a state where we can initialize. */
const auto state = m_state;
R_UNLESS(state == State_Created || state == State_CreatedAttached, svc::ResultInvalidState());
/* Place a tentative reservation of a thread for this process. */
KScopedResourceReservation thread_reservation(this, ams::svc::LimitableResource_ThreadCountMax);
R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached());
/* Ensure that we haven't already allocated stack. */
MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0);
/* Ensure that we're allocating a valid stack. */
R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory());
R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory());
/* Place a tentative reservation of memory for our new stack. */
KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, stack_size);
R_UNLESS(mem_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate and map our stack. */
KProcessAddress stack_top = Null<KProcessAddress>;
if (stack_size) {
KProcessAddress stack_bottom;
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite));
stack_top = stack_bottom + stack_size;
m_main_thread_stack_size = stack_size;
}
/* Ensure our stack is safe to clean up on exit. */
ON_RESULT_FAILURE {
if (m_main_thread_stack_size) {
MESOSPHERE_R_ABORT_UNLESS(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size, m_main_thread_stack_size / PageSize, KMemoryState_Stack));
m_main_thread_stack_size = 0;
}
};
/* Set our maximum heap size. */
R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory - (m_main_thread_stack_size + m_code_size)));
/* Initialize our handle table. */
R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
ON_RESULT_FAILURE_2 { this->FinalizeHandleTable(); };
/* Create a new thread for the process. */
KThread *main_thread = KThread::Create();
R_UNLESS(main_thread != nullptr, svc::ResultOutOfResource());
ON_SCOPE_EXIT { main_thread->Close(); };
/* Initialize the thread. */
R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast<KThreadFunction>(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, m_ideal_core_id, this));
/* Register the thread, and commit our reservation. */
KThread::Register(main_thread);
thread_reservation.Commit();
/* Add the thread to our handle table. */
ams::svc::Handle thread_handle;
R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
/* Set the thread arguments. */
main_thread->GetContext().SetArguments(0, thread_handle);
/* Update our state. */
this->ChangeState((state == State_Created) ? State_Running : State_RunningAttached);
ON_RESULT_FAILURE_2 { this->ChangeState(state); };
/* Run our thread. */
R_TRY(main_thread->Run());
/* Open a reference to represent that we're running. */
this->Open();
/* We succeeded! Commit our memory reservation. */
mem_reservation.Commit();
/* Note for debug that we're running a new process. */
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", m_process_id, m_name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
R_SUCCEED();
}
Result KProcess::Reset() {
MESOSPHERE_ASSERT_THIS();
/* Lock the process and the scheduler. */
KScopedLightLock lk(m_state_lock);
KScopedSchedulerLock sl;
/* Validate that we're in a state that we can reset. */
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
R_UNLESS(m_is_signaled, svc::ResultInvalidState());
/* Clear signaled. */
m_is_signaled = false;
R_SUCCEED();
}
Result KProcess::SetActivity(ams::svc::ProcessActivity activity) {
/* Lock ourselves and the scheduler. */
KScopedLightLock lk(m_state_lock);
KScopedLightLock list_lk(m_list_lock);
KScopedSchedulerLock sl;
/* Validate our state. */
R_UNLESS(m_state != State_Terminating, svc::ResultInvalidState());
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
/* Either pause or resume. */
if (activity == ams::svc::ProcessActivity_Paused) {
/* Verify that we're not suspended. */
R_UNLESS(!m_is_suspended, svc::ResultInvalidState());
/* Suspend all threads. */
auto end = this->GetThreadList().end();
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
it->RequestSuspend(KThread::SuspendType_Process);
}
/* Set ourselves as suspended. */
this->SetSuspended(true);
} else {
MESOSPHERE_ASSERT(activity == ams::svc::ProcessActivity_Runnable);
/* Verify that we're suspended. */
R_UNLESS(m_is_suspended, svc::ResultInvalidState());
/* Resume all threads. */
auto end = this->GetThreadList().end();
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
it->Resume(KThread::SuspendType_Process);
}
/* Set ourselves as resumed. */
this->SetSuspended(false);
}
R_SUCCEED();
}
void KProcess::PinCurrentThread() {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Get the current thread. */
const s32 core_id = GetCurrentCoreId();
KThread *cur_thread = GetCurrentThreadPointer();
/* If the thread isn't terminated, pin it. */
if (!cur_thread->IsTerminationRequested()) {
/* Pin it. */
this->PinThread(core_id, cur_thread);
cur_thread->Pin();
/* An update is needed. */
KScheduler::SetSchedulerUpdateNeeded();
}
}
void KProcess::UnpinCurrentThread() {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Get the current thread. */
const s32 core_id = GetCurrentCoreId();
KThread *cur_thread = GetCurrentThreadPointer();
/* Unpin it. */
cur_thread->Unpin();
this->UnpinThread(core_id, cur_thread);
/* An update is needed. */
KScheduler::SetSchedulerUpdateNeeded();
}
void KProcess::UnpinThread(KThread *thread) {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Get the thread's core id. */
const auto core_id = thread->GetActiveCore();
/* Unpin it. */
this->UnpinThread(core_id, thread);
thread->Unpin();
/* An update is needed. */
KScheduler::SetSchedulerUpdateNeeded();
}
Result KProcess::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count) {
/* Lock the list. */
KScopedLightLock lk(m_list_lock);
/* Iterate over the list. */
s32 count = 0;
auto end = this->GetThreadList().end();
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
/* If we're within array bounds, write the id. */
if (count < max_out_count) {
/* Get the thread id. */
KThread *thread = std::addressof(*it);
const u64 id = thread->GetId();
/* Copy the id to userland. */
R_TRY(out_thread_ids.CopyArrayElementFrom(std::addressof(id), count));
}
/* Increment the count. */
++count;
}
/* We successfully iterated the list. */
*out_num_threads = count;
R_SUCCEED();
}
KProcess::State KProcess::SetDebugObject(void *debug_object) {
/* Attaching should only happen to non-null objects while the scheduler is locked. */
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(debug_object != nullptr);
/* Cache our state to return it to the debug object. */
const auto old_state = m_state;
/* Set the object. */
m_attached_object = debug_object;
/* Check that our state is valid for attach. */
MESOSPHERE_ASSERT(m_state == State_Created || m_state == State_Running || m_state == State_Crashed);
/* Update our state. */
if (m_state != State_DebugBreak) {
if (m_state == State_Created) {
this->ChangeState(State_CreatedAttached);
} else {
this->ChangeState(State_DebugBreak);
}
}
return old_state;
}
void KProcess::ClearDebugObject(KProcess::State old_state) {
/* Detaching from process should only happen while the scheduler is locked. */
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Clear the attached object. */
m_attached_object = nullptr;
/* Validate that the process is in an attached state. */
MESOSPHERE_ASSERT(m_state == State_CreatedAttached || m_state == State_RunningAttached || m_state == State_DebugBreak || m_state == State_Terminating || m_state == State_Terminated);
/* Change the state appropriately. */
if (m_state == State_CreatedAttached) {
this->ChangeState(State_Created);
} else if (m_state == State_RunningAttached || m_state == State_DebugBreak) {
/* Disallow transition back to created from running. */
if (old_state == State_Created) {
old_state = State_Running;
}
this->ChangeState(old_state);
}
}
bool KProcess::EnterJitDebug(ams::svc::DebugEvent event, ams::svc::DebugException exception, uintptr_t param1, uintptr_t param2, uintptr_t param3, uintptr_t param4) {
/* Check that we're the current process. */
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this == GetCurrentProcessPointer());
/* If we aren't allowed to enter jit debug, don't. */
if ((m_flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) {
return false;
}
/* We're the current process, so we should be some kind of running. */
MESOSPHERE_ASSERT(m_state != State_Created);
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
MESOSPHERE_ASSERT(m_state != State_Terminated);
/* Try to enter JIT debug. */
while (true) {
/* Lock ourselves and the scheduler. */
KScopedLightLock lk(m_state_lock);
KScopedLightLock list_lk(m_list_lock);
KScopedSchedulerLock sl;
/* If we're attached to a debugger, we're necessarily in debug. */
if (this->IsAttachedToDebugger()) {
return true;
}
/* If the current thread is terminating, we can't enter debug. */
if (GetCurrentThread().IsTerminationRequested()) {
return false;
}
/* We're not attached to debugger, so check that. */
MESOSPHERE_ASSERT(m_state != State_RunningAttached);
MESOSPHERE_ASSERT(m_state != State_DebugBreak);
/* If we're terminating, we can't enter debug. */
if (m_state != State_Running && m_state != State_Crashed) {
MESOSPHERE_ASSERT(m_state == State_Terminating);
return false;
}
/* If the current thread is suspended, retry. */
if (GetCurrentThread().IsSuspended()) {
continue;
}
/* Suspend all our threads. */
{
auto end = this->GetThreadList().end();
for (auto it = this->GetThreadList().begin(); it != end; ++it) {
it->RequestSuspend(KThread::SuspendType_Debug);
}
}
/* Change our state to crashed. */
this->ChangeState(State_Crashed);
/* Enter jit debug. */
m_is_jit_debug = true;
m_jit_debug_event_type = event;
m_jit_debug_exception_type = exception;
m_jit_debug_params[0] = param1;
m_jit_debug_params[1] = param2;
m_jit_debug_params[2] = param3;
m_jit_debug_params[3] = param4;
m_jit_debug_thread_id = GetCurrentThread().GetId();
/* Exit our retry loop. */
break;
}
/* Check if our state indicates we're in jit debug. */
{
KScopedSchedulerLock sl;
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
return true;
}
}
return false;
}
KEventInfo *KProcess::GetJitDebugInfo() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
if (m_is_jit_debug) {
const uintptr_t params[5] = { m_jit_debug_exception_type, m_jit_debug_params[0], m_jit_debug_params[1], m_jit_debug_params[2], m_jit_debug_params[3] };
return KDebugBase::CreateDebugEvent(m_jit_debug_event_type, m_jit_debug_thread_id, params, util::size(params));
} else {
return nullptr;
}
}
void KProcess::ClearJitDebugInfo() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
m_is_jit_debug = false;
}
KProcess *KProcess::GetProcessFromId(u64 process_id) {
/* Lock the list. */
KProcess::ListAccessor accessor;
const auto end = accessor.end();
/* Iterate over the list. */
for (auto it = accessor.begin(); it != end; ++it) {
/* Get the process. */
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
if (process->GetId() == process_id) {
if (AMS_LIKELY(process->Open())) {
return process;
}
}
}
/* We failed to find the process. */
return nullptr;
}
Result KProcess::GetProcessList(s32 *out_num_processes, ams::kern::svc::KUserPointer<u64 *> out_process_ids, s32 max_out_count) {
/* Lock the list. */
KProcess::ListAccessor accessor;
const auto end = accessor.end();
/* Iterate over the list. */
s32 count = 0;
for (auto it = accessor.begin(); it != end; ++it) {
/* If we're within array bounds, write the id. */
if (count < max_out_count) {
/* Get the process id. */
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
const u64 id = process->GetId();
/* Copy the id to userland. */
R_TRY(out_process_ids.CopyArrayElementFrom(std::addressof(id), count));
}
/* Increment the count. */
++count;
}
/* We successfully iterated the list. */
*out_num_processes = count;
R_SUCCEED();
}
}
| 52,265
|
C++
|
.cpp
| 1,047
| 38.673352
| 267
| 0.584958
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,931
|
kern_k_light_client_session.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_light_client_session.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KLightClientSession::Destroy() {
MESOSPHERE_ASSERT_THIS();
m_parent->OnClientClosed();
}
void KLightClientSession::OnServerClosed() {
MESOSPHERE_ASSERT_THIS();
}
Result KLightClientSession::SendSyncRequest(u32 *data) {
MESOSPHERE_ASSERT_THIS();
/* Get the request thread. */
KThread *cur_thread = GetCurrentThreadPointer();
/* Set the light data. */
cur_thread->SetLightSessionData(data);
/* Send the request. */
R_RETURN(m_parent->OnRequest(cur_thread));
}
}
| 1,261
|
C++
|
.cpp
| 34
| 32.352941
| 76
| 0.698686
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,932
|
kern_k_port.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_port.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KPort::Initialize(s32 max_sessions, bool is_light, uintptr_t name) {
/* Open a new reference count to the initialized port. */
this->Open();
/* Create and initialize our server/client pair. */
KAutoObject::Create<KServerPort>(std::addressof(m_server));
KAutoObject::Create<KClientPort>(std::addressof(m_client));
m_server.Initialize(this);
m_client.Initialize(this, max_sessions);
/* Set our member variables. */
m_is_light = is_light;
m_name = name;
m_state = State::Normal;
}
void KPort::OnClientClosed() {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock sl;
if (m_state == State::Normal) {
m_state = State::ClientClosed;
}
}
void KPort::OnServerClosed() {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock sl;
if (m_state == State::Normal) {
m_state = State::ServerClosed;
}
}
Result KPort::EnqueueSession(KServerSession *session) {
KScopedSchedulerLock sl;
R_UNLESS(m_state == State::Normal, svc::ResultPortClosed());
m_server.EnqueueSession(session);
R_SUCCEED();
}
Result KPort::EnqueueSession(KLightServerSession *session) {
KScopedSchedulerLock sl;
R_UNLESS(m_state == State::Normal, svc::ResultPortClosed());
m_server.EnqueueSession(session);
R_SUCCEED();
}
}
| 2,157
|
C++
|
.cpp
| 57
| 31.385965
| 77
| 0.658185
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,933
|
kern_k_system_resource.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_system_resource.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KSecureSystemResource::Initialize(size_t size, KResourceLimit *resource_limit, KMemoryManager::Pool pool) {
/* Set members. */
m_resource_limit = resource_limit;
m_resource_size = size;
m_resource_pool = pool;
/* Determine required size for our secure resource. */
const size_t secure_size = this->CalculateRequiredSecureMemorySize();
/* Reserve memory for our secure resource. */
KScopedResourceReservation memory_reservation(m_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, secure_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate secure memory. */
R_TRY(KSystemControl::AllocateSecureMemory(std::addressof(m_resource_address), m_resource_size, m_resource_pool));
MESOSPHERE_ASSERT(m_resource_address != Null<KVirtualAddress>);
/* Ensure we clean up the secure memory, if we fail past this point. */
ON_RESULT_FAILURE { KSystemControl::FreeSecureMemory(m_resource_address, m_resource_size, m_resource_pool); };
/* Check that our allocation is bigger than the reference counts needed for it. */
const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(m_resource_size), PageSize);
R_UNLESS(m_resource_size > rc_size, svc::ResultOutOfMemory());
/* Initialize slab heaps. */
m_dynamic_page_manager.Initialize(m_resource_address + rc_size, m_resource_size - rc_size, PageSize);
m_page_table_heap.Initialize(std::addressof(m_dynamic_page_manager), 0, GetPointer<KPageTableManager::RefCount>(m_resource_address));
m_memory_block_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
m_block_info_heap.Initialize(std::addressof(m_dynamic_page_manager), 0);
/* Initialize managers. */
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_page_table_heap));
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_memory_block_heap));
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager), std::addressof(m_block_info_heap));
/* Set our managers. */
this->SetManagers(m_memory_block_slab_manager, m_block_info_manager, m_page_table_manager);
/* Commit the memory reservation. */
memory_reservation.Commit();
/* Open reference to our resource limit. */
m_resource_limit->Open();
/* Set ourselves as initialized. */
m_is_initialized = true;
R_SUCCEED();
}
void KSecureSystemResource::Finalize() {
/* Check that we have no outstanding allocations. */
MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0);
MESOSPHERE_ABORT_UNLESS(m_block_info_manager.GetUsed() == 0);
MESOSPHERE_ABORT_UNLESS(m_page_table_manager.GetUsed() == 0);
/* Free our secure memory. */
KSystemControl::FreeSecureMemory(m_resource_address, m_resource_size, m_resource_pool);
/* Release the memory reservation. */
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, this->CalculateRequiredSecureMemorySize());
/* Close reference to our resource limit. */
m_resource_limit->Close();
}
size_t KSecureSystemResource::CalculateRequiredSecureMemorySize(size_t size, KMemoryManager::Pool pool) {
return KSystemControl::CalculateRequiredSecureMemorySize(size, pool);
}
}
| 4,260
|
C++
|
.cpp
| 70
| 53.771429
| 141
| 0.702878
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,934
|
kern_k_page_table_base.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_page_table_base.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include <mesosphere/kern_select_page_table.hpp>
namespace ams::kern {
namespace {
class KScopedLightLockPair {
NON_COPYABLE(KScopedLightLockPair);
NON_MOVEABLE(KScopedLightLockPair);
private:
KLightLock *m_lower;
KLightLock *m_upper;
public:
ALWAYS_INLINE KScopedLightLockPair(KLightLock &lhs, KLightLock &rhs) {
/* Ensure our locks are in a consistent order. */
if (std::addressof(lhs) <= std::addressof(rhs)) {
m_lower = std::addressof(lhs);
m_upper = std::addressof(rhs);
} else {
m_lower = std::addressof(rhs);
m_upper = std::addressof(lhs);
}
/* Acquire both locks. */
m_lower->Lock();
if (m_lower != m_upper) {
m_upper->Lock();
}
}
~KScopedLightLockPair() {
/* Unlock the upper lock. */
if (m_upper != nullptr && m_upper != m_lower) {
m_upper->Unlock();
}
/* Unlock the lower lock. */
if (m_lower != nullptr) {
m_lower->Unlock();
}
}
public:
/* Utility. */
ALWAYS_INLINE void TryUnlockHalf(KLightLock &lock) {
/* Only allow unlocking if the lock is half the pair. */
if (m_lower != m_upper) {
/* We want to be sure the lock is one we own. */
if (m_lower == std::addressof(lock)) {
lock.Unlock();
m_lower = nullptr;
} else if (m_upper == std::addressof(lock)) {
lock.Unlock();
m_upper = nullptr;
}
}
}
};
}
void KPageTableBase::MemoryRange::Open() {
/* If the range contains heap pages, open them. */
if (this->IsHeap()) {
Kernel::GetMemoryManager().Open(this->GetAddress(), this->GetSize() / PageSize);
}
}
void KPageTableBase::MemoryRange::Close() {
/* If the range contains heap pages, close them. */
if (this->IsHeap()) {
Kernel::GetMemoryManager().Close(this->GetAddress(), this->GetSize() / PageSize);
}
}
Result KPageTableBase::InitializeForKernel(bool is_64_bit, void *table, KVirtualAddress start, KVirtualAddress end) {
/* Initialize our members. */
m_address_space_width = (is_64_bit) ? BITSIZEOF(u64) : BITSIZEOF(u32);
m_address_space_start = KProcessAddress(GetInteger(start));
m_address_space_end = KProcessAddress(GetInteger(end));
m_is_kernel = true;
m_enable_aslr = true;
m_enable_device_address_space_merge = false;
for (auto i = 0; i < RegionType_Count; ++i) {
m_region_starts[i] = 0;
m_region_ends[i] = 0;
}
m_current_heap_end = 0;
m_alias_code_region_start = 0;
m_alias_code_region_end = 0;
m_code_region_start = 0;
m_code_region_end = 0;
m_max_heap_size = 0;
m_mapped_physical_memory_size = 0;
m_mapped_unsafe_physical_memory = 0;
m_mapped_insecure_memory = 0;
m_mapped_ipc_server_memory = 0;
m_alias_region_extra_size = 0;
m_memory_block_slab_manager = Kernel::GetSystemSystemResource().GetMemoryBlockSlabManagerPointer();
m_block_info_manager = Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer();
m_resource_limit = std::addressof(Kernel::GetSystemResourceLimit());
m_allocate_option = KMemoryManager::EncodeOption(KMemoryManager::Pool_System, KMemoryManager::Direction_FromFront);
m_heap_fill_value = MemoryFillValue_Zero;
m_ipc_fill_value = MemoryFillValue_Zero;
m_stack_fill_value = MemoryFillValue_Zero;
m_cached_physical_linear_region = nullptr;
m_cached_physical_heap_region = nullptr;
/* Initialize our implementation. */
m_impl.InitializeForKernel(table, start, end);
/* Initialize our memory block manager. */
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
}
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit) {
/* Validate the region. */
MESOSPHERE_ABORT_UNLESS(start <= code_address);
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
MESOSPHERE_ABORT_UNLESS(code_address + code_size - 1 <= end - 1);
/* Define helpers. */
auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
return KAddressSpaceInfo::GetAddressSpaceStart(flags, type);
};
auto GetSpaceSize = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
return KAddressSpaceInfo::GetAddressSpaceSize(flags, type);
};
/* Default to zero alias region extra size. */
m_alias_region_extra_size = 0;
/* Set our width and heap/alias sizes. */
m_address_space_width = GetAddressSpaceWidth(flags);
size_t alias_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Alias);
size_t heap_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Heap);
/* Adjust heap/alias size if we don't have an alias region. */
if ((flags & ams::svc::CreateProcessFlag_AddressSpaceMask) == ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) {
heap_region_size += alias_region_size;
alias_region_size = 0;
}
/* Set code regions and determine remaining sizes. */
KProcessAddress process_code_start;
KProcessAddress process_code_end;
size_t stack_region_size;
size_t kernel_map_region_size;
KProcessAddress before_process_code_start, after_process_code_start;
size_t before_process_code_size, after_process_code_size;
if (m_address_space_width == 39) {
stack_region_size = GetSpaceSize(KAddressSpaceInfo::Type_Stack);
kernel_map_region_size = GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_Map39Bit);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_Map39Bit);
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = m_code_region_end;
process_code_start = util::AlignDown(GetInteger(code_address), RegionAlignment);
process_code_end = util::AlignUp(GetInteger(code_address) + code_size, RegionAlignment);
before_process_code_start = m_code_region_start;
before_process_code_size = process_code_start - before_process_code_start;
after_process_code_start = process_code_end;
after_process_code_size = m_code_region_end - process_code_end;
/* If we have a 39-bit address space and should, enable extra size to the alias region. */
if (flags & ams::svc::CreateProcessFlag_EnableAliasRegionExtraSize) {
/* Extra size is 1/8th of the address space. */
m_alias_region_extra_size = (static_cast<size_t>(1) << m_address_space_width) / 8;
alias_region_size += m_alias_region_extra_size;
}
} else {
stack_region_size = 0;
kernel_map_region_size = 0;
m_code_region_start = GetSpaceStart(KAddressSpaceInfo::Type_MapSmall);
m_code_region_end = m_code_region_start + GetSpaceSize(KAddressSpaceInfo::Type_MapSmall);
m_alias_code_region_start = m_code_region_start;
m_alias_code_region_end = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge) + GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
m_region_starts[RegionType_Stack] = m_code_region_start;
m_region_ends[RegionType_Stack] = m_code_region_end;
m_region_starts[RegionType_KernelMap] = m_code_region_start;
m_region_ends[RegionType_KernelMap] = m_code_region_end;
process_code_start = m_code_region_start;
process_code_end = m_code_region_end;
before_process_code_start = m_code_region_start;
before_process_code_size = 0;
after_process_code_start = GetSpaceStart(KAddressSpaceInfo::Type_MapLarge);
after_process_code_size = GetSpaceSize(KAddressSpaceInfo::Type_MapLarge);
}
/* Set other basic fields. */
m_enable_aslr = (flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
m_enable_device_address_space_merge = (flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
m_address_space_start = start;
m_address_space_end = end;
m_is_kernel = false;
m_memory_block_slab_manager = system_resource->GetMemoryBlockSlabManagerPointer();
m_block_info_manager = system_resource->GetBlockInfoManagerPointer();
m_resource_limit = resource_limit;
/* Set up our undetermined regions. */
{
/* Declare helper structure for layout process. */
struct RegionLayoutInfo {
size_t size;
RegionType type;
s32 alloc_index; /* 0 for before process code, 1 for after process code */
};
/* Create region layout info array, and add regions to it. */
RegionLayoutInfo region_layouts[RegionType_Count] = {};
size_t num_regions = 0;
if (kernel_map_region_size > 0) { region_layouts[num_regions++] = { .size = kernel_map_region_size, .type = RegionType_KernelMap, .alloc_index = 0, }; }
if (stack_region_size > 0) { region_layouts[num_regions++] = { .size = stack_region_size, .type = RegionType_Stack, .alloc_index = 0, }; }
region_layouts[num_regions++] = { .size = alias_region_size, .type = RegionType_Alias, .alloc_index = 0, };
region_layouts[num_regions++] = { .size = heap_region_size, .type = RegionType_Heap, .alloc_index = 0, };
/* Selection-sort the regions by size largest-to-smallest. */
for (size_t i = 0; i < num_regions - 1; ++i) {
for (size_t j = i + 1; j < num_regions; ++j) {
if (region_layouts[i].size < region_layouts[j].size) {
std::swap(region_layouts[i], region_layouts[j]);
}
}
}
/* Layout the regions. */
constexpr auto AllocIndexCount = 2;
KProcessAddress alloc_starts[AllocIndexCount] = { before_process_code_start, after_process_code_start };
size_t alloc_sizes[AllocIndexCount] = { before_process_code_size, after_process_code_size };
size_t alloc_counts[AllocIndexCount] = {};
for (size_t i = 0; i < num_regions; ++i) {
/* Get reference to the current region. */
auto &cur_region = region_layouts[i];
/* Determine where the current region should go. */
cur_region.alloc_index = alloc_sizes[1] >= alloc_sizes[0] ? 1 : 0;
++alloc_counts[cur_region.alloc_index];
/* Check that the current region can fit. */
R_UNLESS(alloc_sizes[cur_region.alloc_index] >= cur_region.size, svc::ResultOutOfMemory());
/* Update our remaining size tracking. */
alloc_sizes[cur_region.alloc_index] -= cur_region.size;
}
/* Selection sort the regions to coalesce them by alloc index. */
for (size_t i = 0; i < num_regions - 1; ++i) {
for (size_t j = i + 1; j < num_regions; ++j) {
if (region_layouts[i].alloc_index > region_layouts[j].alloc_index) {
std::swap(region_layouts[i], region_layouts[j]);
}
}
}
/* Layout the regions for each alloc index. */
for (auto cur_alloc_index = 0; cur_alloc_index < AllocIndexCount; ++cur_alloc_index) {
/* If there are no regions to place, continue. */
const size_t cur_alloc_count = alloc_counts[cur_alloc_index];
if (cur_alloc_count == 0) {
continue;
}
/* Determine the starting region index for the current alloc index. */
size_t cur_region_index = 0;
for (size_t i = 0; i < num_regions; ++i) {
if (region_layouts[i].alloc_index == cur_alloc_index) {
cur_region_index = i;
break;
}
}
/* If aslr is enabled, randomize the current region order. Otherwise, sort by type. */
if (m_enable_aslr) {
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
std::swap(region_layouts[cur_region_index + i], region_layouts[cur_region_index + KSystemControl::GenerateRandomRange(i, cur_alloc_count - 1)]);
}
} else {
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
for (size_t j = i + 1; j < cur_alloc_count; ++j) {
if (region_layouts[cur_region_index + i].type > region_layouts[cur_region_index + j].type) {
std::swap(region_layouts[cur_region_index + i], region_layouts[cur_region_index + j]);
}
}
}
}
/* Determine aslr offsets for the current space. */
size_t aslr_offsets[RegionType_Count] = {};
if (m_enable_aslr) {
/* Generate the aslr offsets. */
for (size_t i = 0; i < cur_alloc_count; ++i) {
aslr_offsets[i] = KSystemControl::GenerateRandomRange(0, alloc_sizes[cur_alloc_index] / RegionAlignment) * RegionAlignment;
}
/* Sort the aslr offsets. */
for (size_t i = 0; i < cur_alloc_count - 1; ++i) {
for (size_t j = i + 1; j < cur_alloc_count; ++j) {
if (aslr_offsets[i] > aslr_offsets[j]) {
std::swap(aslr_offsets[i], aslr_offsets[j]);
}
}
}
}
/* Calculate final region positions. */
KProcessAddress prev_region_end = alloc_starts[cur_alloc_index];
size_t prev_aslr_offset = 0;
for (size_t i = 0; i < cur_alloc_count; ++i) {
/* Get the current region. */
auto &cur_region = region_layouts[cur_region_index + i];
/* Set the current region start/end. */
m_region_starts[cur_region.type] = (aslr_offsets[i] - prev_aslr_offset) + GetInteger(prev_region_end);
m_region_ends[cur_region.type] = m_region_starts[cur_region.type] + cur_region.size;
/* Update tracking variables. */
prev_region_end = m_region_ends[cur_region.type];
prev_aslr_offset = aslr_offsets[i];
}
}
/* Declare helpers to check that regions are inside our address space. */
const KProcessAddress process_code_last = process_code_end - 1;
auto IsInAddressSpace = [&](KProcessAddress addr) ALWAYS_INLINE_LAMBDA { return m_address_space_start <= addr && addr <= m_address_space_end; };
/* Ensure that the KernelMap region is valid. */
for (size_t k = 0; k < num_regions; ++k) {
if (const auto &kmap_region = region_layouts[k]; kmap_region.type == RegionType_KernelMap) {
/* If there's no kmap region, we have nothing to check. */
if (kmap_region.size == 0) {
break;
}
/* Check that the kmap region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_KernelMap]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_KernelMap]));
/* Check for overlap with process code. */
const KProcessAddress kmap_start = m_region_starts[RegionType_KernelMap];
const KProcessAddress kmap_last = m_region_ends[RegionType_KernelMap] - 1;
MESOSPHERE_ABORT_UNLESS(kernel_map_region_size == 0 || kmap_last < process_code_start || process_code_last < kmap_start);
/* Check for overlap with stack. */
for (size_t s = 0; s < num_regions; ++s) {
if (const auto &stack_region = region_layouts[s]; stack_region.type == RegionType_Stack) {
if (stack_region.size != 0) {
const KProcessAddress stack_start = m_region_starts[RegionType_Stack];
const KProcessAddress stack_last = m_region_ends[RegionType_Stack] - 1;
MESOSPHERE_ABORT_UNLESS((kernel_map_region_size == 0 && stack_region_size == 0) || kmap_last < stack_start || stack_last < kmap_start);
}
break;
}
}
/* Check for overlap with alias. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
if (alias_region.size != 0) {
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(kmap_last < alias_start || alias_last < kmap_start);
}
break;
}
}
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(kmap_last < heap_start || heap_last < kmap_start);
}
break;
}
}
}
}
/* Check that the Stack region is valid. */
for (size_t s = 0; s < num_regions; ++s) {
if (const auto &stack_region = region_layouts[s]; stack_region.type == RegionType_Stack) {
/* If there's no stack region, we have nothing to check. */
if (stack_region.size == 0) {
break;
}
/* Check that the stack region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Stack]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Stack]));
/* Check for overlap with process code. */
const KProcessAddress stack_start = m_region_starts[RegionType_Stack];
const KProcessAddress stack_last = m_region_ends[RegionType_Stack] - 1;
MESOSPHERE_ABORT_UNLESS(stack_region_size == 0 || stack_last < process_code_start || process_code_last < stack_start);
/* Check for overlap with alias. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
if (alias_region.size != 0) {
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(stack_last < alias_start || alias_last < stack_start);
}
break;
}
}
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(stack_last < heap_start || heap_last < stack_start);
}
break;
}
}
}
}
/* Check that the Alias region is valid. */
for (size_t a = 0; a < num_regions; ++a) {
if (const auto &alias_region = region_layouts[a]; alias_region.type == RegionType_Alias) {
/* If there's no alias region, we have nothing to check. */
if (alias_region.size == 0) {
break;
}
/* Check that the alias region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Alias]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Alias]));
/* Check for overlap with process code. */
const KProcessAddress alias_start = m_region_starts[RegionType_Alias];
const KProcessAddress alias_last = m_region_ends[RegionType_Alias] - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < process_code_start || process_code_last < alias_start);
/* Check for overlap with heap. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
if (heap_region.size != 0) {
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(alias_last < heap_start || heap_last < alias_start);
}
break;
}
}
}
}
/* Check that the Heap region is valid. */
for (size_t h = 0; h < num_regions; ++h) {
if (const auto &heap_region = region_layouts[h]; heap_region.type == RegionType_Heap) {
/* If there's no heap region, we have nothing to check. */
if (heap_region.size == 0) {
break;
}
/* Check that the heap region is within our address space. */
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_starts[RegionType_Heap]));
MESOSPHERE_ABORT_UNLESS(IsInAddressSpace(m_region_ends[RegionType_Heap]));
/* Check for overlap with process code. */
const KProcessAddress heap_start = m_region_starts[RegionType_Heap];
const KProcessAddress heap_last = m_region_ends[RegionType_Heap] - 1;
MESOSPHERE_ABORT_UNLESS(heap_last < process_code_start || process_code_last < heap_start);
}
}
}
/* Set heap and fill members. */
m_current_heap_end = m_region_starts[RegionType_Heap];
m_max_heap_size = 0;
m_mapped_physical_memory_size = 0;
m_mapped_unsafe_physical_memory = 0;
m_mapped_insecure_memory = 0;
m_mapped_ipc_server_memory = 0;
const bool fill_memory = KTargetSystem::IsDebugMemoryFillEnabled();
m_heap_fill_value = fill_memory ? MemoryFillValue_Heap : MemoryFillValue_Zero;
m_ipc_fill_value = fill_memory ? MemoryFillValue_Ipc : MemoryFillValue_Zero;
m_stack_fill_value = fill_memory ? MemoryFillValue_Stack : MemoryFillValue_Zero;
/* Set allocation option. */
m_allocate_option = KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction_FromBack : KMemoryManager::Direction_FromFront);
/* Initialize our implementation. */
m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end));
/* Initialize our memory block manager. */
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
}
void KPageTableBase::Finalize() {
/* Finalize memory blocks. */
m_memory_block_manager.Finalize(m_memory_block_slab_manager);
/* Free any unsafe mapped memory. */
if (m_mapped_unsafe_physical_memory) {
Kernel::GetUnsafeMemory().Release(m_mapped_unsafe_physical_memory);
}
/* Release any insecure mapped memory. */
if (m_mapped_insecure_memory) {
if (auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(); insecure_resource_limit != nullptr) {
insecure_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, m_mapped_insecure_memory);
}
}
/* Release any ipc server memory. */
if (m_mapped_ipc_server_memory) {
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, m_mapped_ipc_server_memory);
}
/* Invalidate the entire instruction cache. */
cpu::InvalidateEntireInstructionCache();
}
KProcessAddress KPageTableBase::GetRegionAddress(ams::svc::MemoryState state) const {
switch (state) {
case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel:
return m_address_space_start;
case ams::svc::MemoryState_Normal:
return m_region_starts[RegionType_Heap];
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
return m_region_starts[RegionType_Alias];
case ams::svc::MemoryState_Stack:
return m_region_starts[RegionType_Stack];
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal:
return m_region_starts[RegionType_KernelMap];
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
case ams::svc::MemoryState_AliasCodeData:
case ams::svc::MemoryState_Transfered:
case ams::svc::MemoryState_SharedTransfered:
case ams::svc::MemoryState_SharedCode:
case ams::svc::MemoryState_GeneratedCode:
case ams::svc::MemoryState_CodeOut:
case ams::svc::MemoryState_Coverage:
case ams::svc::MemoryState_Insecure:
return m_alias_code_region_start;
case ams::svc::MemoryState_Code:
case ams::svc::MemoryState_CodeData:
return m_code_region_start;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
size_t KPageTableBase::GetRegionSize(ams::svc::MemoryState state) const {
switch (state) {
case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel:
return m_address_space_end - m_address_space_start;
case ams::svc::MemoryState_Normal:
return m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap];
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
return m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias];
case ams::svc::MemoryState_Stack:
return m_region_ends[RegionType_Stack] - m_region_starts[RegionType_Stack];
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_ThreadLocal:
return m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap];
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
case ams::svc::MemoryState_AliasCodeData:
case ams::svc::MemoryState_Transfered:
case ams::svc::MemoryState_SharedTransfered:
case ams::svc::MemoryState_SharedCode:
case ams::svc::MemoryState_GeneratedCode:
case ams::svc::MemoryState_CodeOut:
case ams::svc::MemoryState_Coverage:
case ams::svc::MemoryState_Insecure:
return m_alias_code_region_end - m_alias_code_region_start;
case ams::svc::MemoryState_Code:
case ams::svc::MemoryState_CodeData:
return m_code_region_end - m_code_region_start;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
bool KPageTableBase::CanContain(KProcessAddress addr, size_t size, ams::svc::MemoryState state) const {
const KProcessAddress end = addr + size;
const KProcessAddress last = end - 1;
const KProcessAddress region_start = this->GetRegionAddress(state);
const size_t region_size = this->GetRegionSize(state);
const bool is_in_region = region_start <= addr && addr < end && last <= region_start + region_size - 1;
const bool is_in_heap = !(end <= m_region_starts[RegionType_Heap] || m_region_ends[RegionType_Heap] <= addr || m_region_starts[RegionType_Heap] == m_region_ends[RegionType_Heap]);
const bool is_in_alias = !(end <= m_region_starts[RegionType_Alias] || m_region_ends[RegionType_Alias] <= addr || m_region_starts[RegionType_Alias] == m_region_ends[RegionType_Alias]);
switch (state) {
case ams::svc::MemoryState_Free:
case ams::svc::MemoryState_Kernel:
return is_in_region;
case ams::svc::MemoryState_Io:
case ams::svc::MemoryState_Static:
case ams::svc::MemoryState_Code:
case ams::svc::MemoryState_CodeData:
case ams::svc::MemoryState_Shared:
case ams::svc::MemoryState_AliasCode:
case ams::svc::MemoryState_AliasCodeData:
case ams::svc::MemoryState_Stack:
case ams::svc::MemoryState_ThreadLocal:
case ams::svc::MemoryState_Transfered:
case ams::svc::MemoryState_SharedTransfered:
case ams::svc::MemoryState_SharedCode:
case ams::svc::MemoryState_GeneratedCode:
case ams::svc::MemoryState_CodeOut:
case ams::svc::MemoryState_Coverage:
case ams::svc::MemoryState_Insecure:
return is_in_region && !is_in_heap && !is_in_alias;
case ams::svc::MemoryState_Normal:
MESOSPHERE_ASSERT(is_in_heap);
return is_in_region && !is_in_alias;
case ams::svc::MemoryState_Ipc:
case ams::svc::MemoryState_NonSecureIpc:
case ams::svc::MemoryState_NonDeviceIpc:
MESOSPHERE_ASSERT(is_in_alias);
return is_in_region && !is_in_heap;
default:
return false;
}
}
Result KPageTableBase::CheckMemoryState(KMemoryBlockManager::const_iterator it, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
/* Validate the states match expectation. */
R_UNLESS((it->GetState() & state_mask) == state, svc::ResultInvalidCurrentMemory());
R_UNLESS((it->GetPermission() & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
R_UNLESS((it->GetAttribute() & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
R_SUCCEED();
}
Result KPageTableBase::CheckMemoryStateContiguous(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Get information about the first block. */
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
/* If the start address isn't aligned, we need a block. */
const size_t blocks_for_start_align = (util::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) ? 1 : 0;
while (true) {
/* Validate against the provided masks. */
R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr));
/* Break once we're done. */
if (last_addr <= it->GetLastAddress()) {
break;
}
/* Advance our iterator. */
it++;
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
}
/* If the end address isn't aligned, we need a block. */
const size_t blocks_for_end_align = (util::AlignUp(GetInteger(addr) + size, PageSize) != it->GetEndAddress()) ? 1 : 0;
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
}
R_SUCCEED();
}
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KMemoryBlockManager::const_iterator it, KProcessAddress last_addr, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Validate all blocks in the range have correct state. */
const KMemoryState first_state = it->GetState();
const KMemoryPermission first_perm = it->GetPermission();
const KMemoryAttribute first_attr = it->GetAttribute();
while (true) {
/* Validate the current block. */
R_UNLESS(it->GetState() == first_state, svc::ResultInvalidCurrentMemory());
R_UNLESS(it->GetPermission() == first_perm, svc::ResultInvalidCurrentMemory());
R_UNLESS((it->GetAttribute() | ignore_attr) == (first_attr | ignore_attr), svc::ResultInvalidCurrentMemory());
/* Validate against the provided masks. */
R_TRY(this->CheckMemoryState(it, state_mask, state, perm_mask, perm, attr_mask, attr));
/* Break once we're done. */
if (last_addr <= it->GetLastAddress()) {
break;
}
/* Advance our iterator. */
it++;
MESOSPHERE_ASSERT(it != m_memory_block_manager.cend());
}
/* Write output state. */
if (out_state != nullptr) {
*out_state = first_state;
}
if (out_perm != nullptr) {
*out_perm = first_perm;
}
if (out_attr != nullptr) {
*out_attr = static_cast<KMemoryAttribute>(first_attr & ~ignore_attr);
}
/* If the end address isn't aligned, we need a block. */
if (out_blocks_needed != nullptr) {
const size_t blocks_for_end_align = (util::AlignDown(GetInteger(last_addr), PageSize) + PageSize != it->GetEndAddress()) ? 1 : 0;
*out_blocks_needed = blocks_for_end_align;
}
R_SUCCEED();
}
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Check memory state. */
const KProcessAddress last_addr = addr + size - 1;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(addr);
R_TRY(this->CheckMemoryState(out_state, out_perm, out_attr, out_blocks_needed, it, last_addr, state_mask, state, perm_mask, perm, attr_mask, attr, ignore_attr));
/* If the start address isn't aligned, we need a block. */
if (out_blocks_needed != nullptr && util::AlignDown(GetInteger(addr), PageSize) != it->GetAddress()) {
++(*out_blocks_needed);
}
R_SUCCEED();
}
Result KPageTableBase::LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr) {
/* Validate basic preconditions. */
MESOSPHERE_ASSERT((lock_attr & attr) == 0);
MESOSPHERE_ASSERT((lock_attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0);
/* Validate the lock request. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check that the output page group is empty, if it exists. */
if (out_pg) {
MESOSPHERE_ASSERT(out_pg->GetNumPages() == 0);
}
/* Check the state. */
KMemoryState old_state;
KMemoryPermission old_perm;
KMemoryAttribute old_attr;
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks), addr, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
/* Get the physical address, if we're supposed to. */
if (out_paddr != nullptr) {
MESOSPHERE_ABORT_UNLESS(this->GetPhysicalAddressLocked(out_paddr, addr));
}
/* Make the page group, if we're supposed to. */
if (out_pg != nullptr) {
R_TRY(this->MakePageGroup(*out_pg, addr, num_pages));
}
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* Decide on new perm and attr. */
new_perm = (new_perm != KMemoryPermission_None) ? new_perm : old_perm;
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr | lock_attr);
/* Update permission, if we need to. */
if (new_perm != old_perm) {
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
const KPageProperties properties = { new_perm, false, (old_attr & KMemoryAttribute_Uncached) != 0, DisableMergeAttribute_DisableHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
/* If we have an output group, open. */
if (out_pg) {
out_pg->Open();
}
R_SUCCEED();
}
Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg) {
/* Validate basic preconditions. */
MESOSPHERE_ASSERT((attr_mask & lock_attr) == lock_attr);
MESOSPHERE_ASSERT((attr & lock_attr) == lock_attr);
/* Validate the unlock request. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(addr, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the state. */
KMemoryState old_state;
KMemoryPermission old_perm;
KMemoryAttribute old_attr;
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks), addr, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
/* Check the page group. */
if (pg != nullptr) {
R_UNLESS(this->IsValidPageGroup(*pg, addr, num_pages), svc::ResultInvalidMemoryRegion());
}
/* Decide on new perm and attr. */
new_perm = (new_perm != KMemoryPermission_None) ? new_perm : old_perm;
KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(old_attr & ~lock_attr);
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* Update permission, if we need to. */
if (new_perm != old_perm) {
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
const KPageProperties properties = { new_perm, false, (old_attr & KMemoryAttribute_Uncached) != 0, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
R_SUCCEED();
}
Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(out_info != nullptr);
MESOSPHERE_ASSERT(out_page != nullptr);
const KMemoryBlock *block = m_memory_block_manager.FindBlock(address);
R_UNLESS(block != nullptr, svc::ResultInvalidCurrentMemory());
*out_info = block->GetMemoryInfo();
out_page->flags = 0;
R_SUCCEED();
}
Result KPageTableBase::QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, ams::svc::MemoryState state) const {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(out != nullptr);
const KProcessAddress region_start = this->GetRegionAddress(state);
const size_t region_size = this->GetRegionSize(state);
/* Check that the address/size are potentially valid. */
R_UNLESS((address < address + size), svc::ResultNotFound());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
bool cur_valid = false;
TraversalEntry next_entry;
bool next_valid;
size_t tot_size = 0;
next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), region_start);
next_entry.block_size = (next_entry.block_size - (GetInteger(region_start) & (next_entry.block_size - 1)));
/* Iterate, looking for entry. */
while (true) {
if ((!next_valid && !cur_valid) || (next_valid && cur_valid && next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
cur_entry.block_size += next_entry.block_size;
} else {
if (cur_valid && cur_entry.phys_addr <= address && address + size <= cur_entry.phys_addr + cur_entry.block_size) {
/* Check if this region is valid. */
const KProcessAddress mapped_address = (region_start + tot_size) + (address - cur_entry.phys_addr);
if (R_SUCCEEDED(this->CheckMemoryState(mapped_address, size, KMemoryState_Mask, static_cast<KMemoryState>(util::ToUnderlying(state)), KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
/* It is! */
*out = mapped_address;
R_SUCCEED();
}
}
/* Update tracking variables. */
tot_size += cur_entry.block_size;
cur_entry = next_entry;
cur_valid = next_valid;
}
if (cur_entry.block_size + tot_size >= region_size) {
break;
}
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
}
/* Check the last entry. */
R_UNLESS(cur_valid, svc::ResultNotFound());
R_UNLESS(cur_entry.phys_addr <= address, svc::ResultNotFound());
R_UNLESS(address + size <= cur_entry.phys_addr + cur_entry.block_size, svc::ResultNotFound());
/* Check if the last region is valid. */
const KProcessAddress mapped_address = (region_start + tot_size) + (address - cur_entry.phys_addr);
R_TRY_CATCH(this->CheckMemoryState(mapped_address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None)) {
R_CONVERT_ALL(svc::ResultNotFound());
} R_END_TRY_CATCH;
/* We found the region. */
*out = mapped_address;
R_SUCCEED();
}
Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Validate that the source address's state is valid. */
KMemoryState src_state;
size_t num_src_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_FlagCanAlias, KMemoryState_FlagCanAlias, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
/* Validate that the dst address's state is valid. */
size_t num_dst_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator for the source. */
Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
R_TRY(src_allocator_result);
/* Create an update allocator for the destination. */
Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
/* Map the memory. */
{
/* Determine the number of pages being operated on. */
const size_t num_pages = size / PageSize;
/* Create page groups for the memory being unmapped. */
KPageGroup pg(m_block_info_manager);
/* Create the page group representing the source. */
R_TRY(this->MakePageGroup(pg, src_address, num_pages));
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Reprotect the source as kernel-read/not mapped. */
const KMemoryPermission new_src_perm = static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped);
const KMemoryAttribute new_src_attr = KMemoryAttribute_Locked;
const KPageProperties src_properties = { new_src_perm, false, false, DisableMergeAttribute_DisableHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* Ensure that we unprotect the source pages on failure. */
ON_RESULT_FAILURE {
const KPageProperties unprotect_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableHeadBodyTail };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
};
/* Map the alias pages. */
const KPageProperties dst_map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, false));
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_src_perm, new_src_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_Stack, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
}
R_SUCCEED();
}
Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Validate that the source address's state is valid. */
KMemoryState src_state;
size_t num_src_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(src_state), nullptr, nullptr, std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_FlagCanAlias, KMemoryState_FlagCanAlias, KMemoryPermission_All, KMemoryPermission_NotMapped | KMemoryPermission_KernelRead, KMemoryAttribute_All, KMemoryAttribute_Locked));
/* Validate that the dst address's state is valid. */
KMemoryPermission dst_perm;
size_t num_dst_allocator_blocks;
R_TRY(this->CheckMemoryState(nullptr, std::addressof(dst_perm), nullptr, std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Stack, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Create an update allocator for the source. */
Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
R_TRY(src_allocator_result);
/* Create an update allocator for the destination. */
Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
/* Unmap the memory. */
{
/* Determine the number of pages being operated on. */
const size_t num_pages = size / PageSize;
/* Create page groups for the memory being unmapped. */
KPageGroup pg(m_block_info_manager);
/* Create the page group representing the destination. */
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
/* Ensure the page group is the valid for the source. */
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion());
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the aliased copy of the pages. */
const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
/* Ensure that we re-map the aliased pages on failure. */
ON_RESULT_FAILURE {
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
};
/* Try to set the permissions for the source pages back to what they should be. */
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
}
R_SUCCEED();
}
Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
/* Validate the mapping request. */
R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Verify that the source memory is normal heap. */
KMemoryState src_state;
KMemoryPermission src_perm;
size_t num_src_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(src_state), std::addressof(src_perm), nullptr, std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
/* Verify that the destination memory is unmapped. */
size_t num_dst_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator for the source. */
Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
R_TRY(src_allocator_result);
/* Create an update allocator for the destination. */
Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
/* Map the code memory. */
{
/* Determine the number of pages being operated on. */
const size_t num_pages = size / PageSize;
/* Create page groups for the memory being unmapped. */
KPageGroup pg(m_block_info_manager);
/* Create the page group representing the source. */
R_TRY(this->MakePageGroup(pg, src_address, num_pages));
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Reprotect the source as kernel-read/not mapped. */
const KMemoryPermission new_perm = static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped);
const KPageProperties src_properties = { new_perm, false, false, DisableMergeAttribute_DisableHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* Ensure that we unprotect the source pages on failure. */
ON_RESULT_FAILURE {
const KPageProperties unprotect_properties = { src_perm, false, false, DisableMergeAttribute_EnableHeadBodyTail };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
};
/* Map the alias pages. */
const KPageProperties dst_properties = { new_perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
}
R_SUCCEED();
}
Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
/* Validate the mapping request. */
R_UNLESS(this->CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidMemoryRegion());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Verify that the source memory is locked normal heap. */
size_t num_src_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_src_allocator_blocks), src_address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_Locked));
/* Verify that the destination memory is aliasable code. */
size_t num_dst_allocator_blocks;
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_dst_allocator_blocks), dst_address, size, KMemoryState_FlagCanCodeAlias, KMemoryState_FlagCanCodeAlias, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All & ~KMemoryAttribute_PermissionLocked, KMemoryAttribute_None));
/* Determine whether any pages being unmapped are code. */
bool any_code_pages = false;
{
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(dst_address);
while (true) {
/* Check if the memory has code flag. */
if ((it->GetState() & KMemoryState_FlagCode) != 0) {
any_code_pages = true;
break;
}
/* Check if we're done. */
if (dst_address + size - 1 <= it->GetLastAddress()) {
break;
}
/* Advance. */
++it;
}
}
/* Ensure that we maintain the instruction cache. */
bool reprotected_pages = false;
ON_SCOPE_EXIT {
if (reprotected_pages && any_code_pages) {
cpu::InvalidateEntireInstructionCache();
}
};
/* Unmap. */
{
/* Determine the number of pages being operated on. */
const size_t num_pages = size / PageSize;
/* Create page groups for the memory being unmapped. */
KPageGroup pg(m_block_info_manager);
/* Create the page group representing the destination. */
R_TRY(this->MakePageGroup(pg, dst_address, num_pages));
/* Verify that the page group contains the same pages as the source. */
R_UNLESS(this->IsValidPageGroup(pg, src_address, num_pages), svc::ResultInvalidMemoryRegion());
/* Create an update allocator for the source. */
Result src_allocator_result;
KMemoryBlockManagerUpdateAllocator src_allocator(std::addressof(src_allocator_result), m_memory_block_slab_manager, num_src_allocator_blocks);
R_TRY(src_allocator_result);
/* Create an update allocator for the destination. */
Result dst_allocator_result;
KMemoryBlockManagerUpdateAllocator dst_allocator(std::addressof(dst_allocator_result), m_memory_block_slab_manager, num_dst_allocator_blocks);
R_TRY(dst_allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the aliased copy of the pages. */
const KPageProperties dst_unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
/* Ensure that we re-map the aliased pages on failure. */
ON_RESULT_FAILURE {
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
};
/* Try to set the permissions for the source pages back to what they should be. */
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
/* Note that we reprotected pages. */
reprotected_pages = true;
}
R_SUCCEED();
}
Result KPageTableBase::MapInsecurePhysicalMemory(KProcessAddress address, size_t size) {
/* Get the insecure memory resource limit and pool. */
auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit();
const auto insecure_pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetInsecureMemoryPool());
/* Reserve the insecure memory. */
/* NOTE: ResultOutOfMemory is returned here instead of the usual LimitReached. */
KScopedResourceReservation memory_reservation(insecure_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultOutOfMemory());
/* Allocate pages for the insecure memory. */
KPageGroup pg(m_block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), size / PageSize, 1, KMemoryManager::EncodeOption(insecure_pool, KMemoryManager::Direction_FromFront)));
/* Close the opened pages when we're done with them. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
ON_SCOPE_EXIT { pg.Close(); };
/* Clear all the newly allocated pages. */
for (const auto &it : pg) {
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
}
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Validate that the address's state is valid. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Map the pages. */
const size_t num_pages = size / PageSize;
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, OperationType_MapGroup, false));
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Insecure, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* Update our mapped insecure size. */
m_mapped_insecure_memory += size;
/* Commit the memory reservation. */
memory_reservation.Commit();
/* We succeeded. */
R_SUCCEED();
}
Result KPageTableBase::UnmapInsecurePhysicalMemory(KProcessAddress address, size_t size) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Insecure, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the memory. */
const size_t num_pages = size / PageSize;
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
/* Update our mapped insecure size. */
m_mapped_insecure_memory -= size;
/* Release the insecure memory from the insecure limit. */
if (auto * const insecure_resource_limit = KSystemControl::GetInsecureMemoryResourceLimit(); insecure_resource_limit != nullptr) {
insecure_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, size);
}
R_SUCCEED();
}
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
KProcessAddress address = Null<KProcessAddress>;
if (num_pages <= region_num_pages) {
if (this->IsAslrEnabled()) {
/* Try to directly find a free area up to 8 times. */
for (size_t i = 0; i < 8; i++) {
const size_t random_offset = KSystemControl::GenerateRandomRange(0, (region_num_pages - num_pages - guard_pages) * PageSize / alignment) * alignment;
const KProcessAddress candidate = util::AlignDown(GetInteger(region_start + random_offset), alignment) + offset;
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(candidate);
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
if (it->GetState() != KMemoryState_Free) { continue; }
if (!(region_start <= candidate)) { continue; }
if (!(it->GetAddress() + guard_pages * PageSize <= GetInteger(candidate))) { continue; }
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= it->GetLastAddress())) { continue; }
if (!(candidate + (num_pages + guard_pages) * PageSize - 1 <= region_start + region_num_pages * PageSize - 1)) { continue; }
address = candidate;
break;
}
/* Fall back to finding the first free area with a random offset. */
if (address == Null<KProcessAddress>) {
/* NOTE: Nintendo does not account for guard pages here. */
/* This may theoretically cause an offset to be chosen that cannot be mapped. */
/* We will account for guard pages. */
const size_t offset_pages = KSystemControl::GenerateRandomRange(0, region_num_pages - num_pages - guard_pages);
address = m_memory_block_manager.FindFreeArea(region_start + offset_pages * PageSize, region_num_pages - offset_pages, num_pages, alignment, offset, guard_pages);
}
}
/* Find the first free area. */
if (address == Null<KProcessAddress>) {
address = m_memory_block_manager.FindFreeArea(region_start, region_num_pages, num_pages, alignment, offset, guard_pages);
}
}
return address;
}
size_t KPageTableBase::GetSize(KMemoryState state) const {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Iterate, counting blocks with the desired state. */
size_t total_size = 0;
for (KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(m_address_space_start); it != m_memory_block_manager.end(); ++it) {
if (it->GetState() == state) {
total_size += it->GetSize();
}
}
return total_size;
}
size_t KPageTableBase::GetCodeSize() const {
return this->GetSize(KMemoryState_Code);
}
size_t KPageTableBase::GetCodeDataSize() const {
return this->GetSize(KMemoryState_CodeData);
}
size_t KPageTableBase::GetAliasCodeSize() const {
return this->GetSize(KMemoryState_AliasCode);
}
size_t KPageTableBase::GetAliasCodeDataSize() const {
return this->GetSize(KMemoryState_AliasCodeData);
}
Result KPageTableBase::AllocateAndMapPagesImpl(PageLinkedList *page_list, KProcessAddress address, size_t num_pages, const KPageProperties &properties) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Create a page group to hold the pages we allocate. */
KPageGroup pg(m_block_info_manager);
/* Allocate the pages. */
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, m_allocate_option));
/* Ensure that the page group is closed when we're done working with it. */
ON_SCOPE_EXIT { pg.Close(); };
/* Clear all pages. */
for (const auto &it : pg) {
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
}
/* Map the pages. */
R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false));
}
Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Note the current address, so that we can iterate. */
const KProcessAddress start_address = address;
KProcessAddress cur_address = address;
/* Ensure that we clean up on failure. */
ON_RESULT_FAILURE {
MESOSPHERE_ABORT_UNLESS(!reuse_ll);
if (cur_address != start_address) {
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, start_address, (cur_address - start_address) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
}
};
/* Iterate, mapping all pages in the group. */
for (const auto &block : pg) {
/* Map and advance. */
const KPageProperties cur_properties = (cur_address == start_address) ? properties : KPageProperties{ properties.perm, properties.io, properties.uncached, DisableMergeAttribute_None };
R_TRY(this->Operate(page_list, cur_address, block.GetNumPages(), block.GetAddress(), true, cur_properties, OperationType_Map, reuse_ll));
cur_address += block.GetSize();
}
/* We succeeded! */
R_SUCCEED();
}
void KPageTableBase::RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Note the current address, so that we can iterate. */
const KProcessAddress start_address = address;
const KProcessAddress last_address = start_address + size - 1;
const KProcessAddress end_address = last_address + 1;
/* Iterate over the memory. */
auto pg_it = pg.begin();
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
auto it = m_memory_block_manager.FindIterator(start_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Determine the range to map. */
KProcessAddress map_address = std::max(GetInteger(it->GetAddress()), GetInteger(start_address));
const KProcessAddress map_end_address = std::min(GetInteger(it->GetEndAddress()), GetInteger(end_address));
MESOSPHERE_ABORT_UNLESS(map_end_address != map_address);
/* Determine if we should disable head merge. */
const bool disable_head_merge = it->GetAddress() >= GetInteger(start_address) && (it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Normal) != 0;
const KPageProperties map_properties = { it->GetPermission(), false, false, disable_head_merge ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
/* While we have pages to map, map them. */
size_t map_pages = (map_end_address - map_address) / PageSize;
while (map_pages > 0) {
/* Check if we're at the end of the physical block. */
if (pg_pages == 0) {
/* Ensure there are more pages to map. */
MESOSPHERE_ABORT_UNLESS(pg_it != pg.end());
/* Advance our physical block. */
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
/* Map whatever we can. */
const size_t cur_pages = std::min(pg_pages, map_pages);
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, map_address, map_pages, pg_phys_addr, true, map_properties, OperationType_Map, true));
/* Advance. */
map_address += cur_pages * PageSize;
map_pages -= cur_pages;
pg_phys_addr += cur_pages * PageSize;
pg_pages -= cur_pages;
}
/* Check if we're done. */
if (last_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
++it;
}
/* Check that we re-mapped precisely the page group. */
MESOSPHERE_ABORT_UNLESS((++pg_it) == pg.end());
}
Result KPageTableBase::MakePageGroup(KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
/* We're making a new group, not adding to an existing one. */
R_UNLESS(pg.empty(), svc::ResultInvalidCurrentMemory());
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
R_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr), svc::ResultInvalidCurrentMemory());
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
/* Iterate, adding to group as we go. */
while (tot_size < size) {
R_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)), svc::ResultInvalidCurrentMemory());
if (next_entry.phys_addr != (cur_addr + cur_size)) {
const size_t cur_pages = cur_size / PageSize;
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
R_TRY(pg.AddBlock(cur_addr, cur_pages));
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we add the right amount for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* add the last block. */
const size_t cur_pages = cur_size / PageSize;
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
R_TRY(pg.AddBlock(cur_addr, cur_pages));
R_SUCCEED();
}
bool KPageTableBase::IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
const size_t size = num_pages * PageSize;
/* Empty groups are necessarily invalid. */
if (pg.empty()) {
return false;
}
auto &impl = this->GetImpl();
/* We're going to validate that the group we'd expect is the group we see. */
auto cur_it = pg.begin();
KPhysicalAddress cur_block_address = cur_it->GetAddress();
size_t cur_block_pages = cur_it->GetNumPages();
auto UpdateCurrentIterator = [&]() ALWAYS_INLINE_LAMBDA {
if (cur_block_pages == 0) {
if ((++cur_it) == pg.end()) {
return false;
}
cur_block_address = cur_it->GetAddress();
cur_block_pages = cur_it->GetNumPages();
}
return true;
};
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
if (!impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), addr)) {
return false;
}
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
/* Iterate, comparing expected to actual. */
while (tot_size < size) {
if (!impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context))) {
return false;
}
if (next_entry.phys_addr != (cur_addr + cur_size)) {
const size_t cur_pages = cur_size / PageSize;
if (!IsHeapPhysicalAddress(cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
if (cur_block_address != cur_addr || cur_block_pages < cur_pages) {
return false;
}
cur_block_address += cur_size;
cur_block_pages -= cur_pages;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we compare the right amount for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
if (!IsHeapPhysicalAddress(cur_addr)) {
return false;
}
if (!UpdateCurrentIterator()) {
return false;
}
return cur_block_address == cur_addr && cur_block_pages == (cur_size / PageSize);
}
Result KPageTableBase::GetContiguousMemoryRangeWithState(MemoryRange *out, KProcessAddress address, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
auto &impl = this->GetImpl();
/* Begin a traversal. */
TraversalContext context;
TraversalEntry cur_entry = { .phys_addr = Null<KPhysicalAddress>, .block_size = 0, .sw_reserved_bits = 0, .attr = 0 };
R_UNLESS(impl.BeginTraversal(std::addressof(cur_entry), std::addressof(context), address), svc::ResultInvalidCurrentMemory());
/* Traverse until we have enough size or we aren't contiguous any more. */
const KPhysicalAddress phys_address = cur_entry.phys_addr;
const u8 entry_attr = cur_entry.attr;
size_t contig_size;
for (contig_size = cur_entry.block_size - (GetInteger(phys_address) & (cur_entry.block_size - 1)); contig_size < size; contig_size += cur_entry.block_size) {
if (!impl.ContinueTraversal(std::addressof(cur_entry), std::addressof(context))) {
break;
}
if (cur_entry.phys_addr != phys_address + contig_size) {
break;
}
if (cur_entry.attr != entry_attr) {
break;
}
}
/* Take the minimum size for our region. */
size = std::min(size, contig_size);
/* Check that the memory is contiguous (modulo the reference count bit). */
const u32 test_state_mask = state_mask | KMemoryState_FlagReferenceCounted;
const bool is_heap = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, test_state_mask, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
if (!is_heap) {
R_TRY(this->CheckMemoryStateContiguous(address, size, test_state_mask, state, perm_mask, perm, attr_mask, attr));
}
/* The memory is contiguous, so set the output range. */
out->Set(phys_address, size, is_heap, attr);
R_SUCCEED();
}
Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Verify we can change the memory permission. */
KMemoryState old_state;
KMemoryPermission old_perm;
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, std::addressof(num_allocator_blocks), addr, size, KMemoryState_FlagCanReprotect, KMemoryState_FlagCanReprotect, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Determine new perm. */
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
R_SUCCEED_IF(old_perm == new_perm);
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
R_SUCCEED();
}
Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Verify we can change the memory permission. */
KMemoryState old_state;
KMemoryPermission old_perm;
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), nullptr, std::addressof(num_allocator_blocks), addr, size, KMemoryState_FlagCode, KMemoryState_FlagCode, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Make a new page group for the region. */
KPageGroup pg(m_block_info_manager);
/* Determine new perm/state. */
const KMemoryPermission new_perm = ConvertToKMemoryPermission(svc_perm);
KMemoryState new_state = old_state;
const bool is_w = (new_perm & KMemoryPermission_UserWrite) == KMemoryPermission_UserWrite;
const bool is_x = (new_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute;
const bool was_x = (old_perm & KMemoryPermission_UserExecute) == KMemoryPermission_UserExecute;
MESOSPHERE_ASSERT(!(is_w && is_x));
if (is_w) {
switch (old_state) {
case KMemoryState_Code: new_state = KMemoryState_CodeData; break;
case KMemoryState_AliasCode: new_state = KMemoryState_AliasCodeData; break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
/* Create a page group, if we're setting execute permissions. */
if (is_x) {
R_TRY(this->MakePageGroup(pg, GetInteger(addr), num_pages));
}
/* Succeed if there's nothing to do. */
R_SUCCEED_IF(old_perm == new_perm && old_state == new_state);
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* If we're creating an executable mapping, take and immediately release the scheduler lock. This will force a reschedule. */
if (is_x) {
KScopedSchedulerLock sl;
}
/* Perform mapping operation. */
const KPageProperties properties = { new_perm, false, false, DisableMergeAttribute_None };
const auto operation = was_x ? OperationType_ChangePermissionsAndRefreshAndFlush : OperationType_ChangePermissions;
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, operation, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, new_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
/* Ensure cache coherency, if we're setting pages as executable. */
if (is_x) {
for (const auto &block : pg) {
cpu::StoreDataCache(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), block.GetSize());
}
cpu::InvalidateEntireInstructionCache();
}
R_SUCCEED();
}
Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
const size_t num_pages = size / PageSize;
MESOSPHERE_ASSERT((mask | KMemoryAttribute_SetMask) == KMemoryAttribute_SetMask);
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Verify we can change the memory attribute. */
KMemoryState old_state;
KMemoryPermission old_perm;
KMemoryAttribute old_attr;
size_t num_allocator_blocks;
constexpr u32 AttributeTestMask = ~(KMemoryAttribute_SetMask | KMemoryAttribute_DeviceShared);
const u32 state_test_mask = ((mask & KMemoryAttribute_Uncached) ? static_cast<u32>(KMemoryState_FlagCanChangeAttribute) : 0) | ((mask & KMemoryAttribute_PermissionLocked) ? static_cast<u32>(KMemoryState_FlagCanPermissionLock) : 0);
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks),
addr, size,
state_test_mask, state_test_mask,
KMemoryPermission_None, KMemoryPermission_None,
AttributeTestMask, KMemoryAttribute_None, ~AttributeTestMask));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* If we need to, perform a change attribute operation. */
if ((mask & KMemoryAttribute_Uncached) != 0) {
/* Determine the new attribute. */
const KMemoryAttribute new_attr = static_cast<KMemoryAttribute>(((old_attr & ~mask) | (attr & mask)));
/* Perform operation. */
const KPageProperties properties = { old_perm, false, (new_attr & KMemoryAttribute_Uncached) != 0, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissionsAndRefreshAndFlush, false));
}
/* Update the blocks. */
m_memory_block_manager.UpdateAttribute(std::addressof(allocator), addr, num_pages, mask, attr);
R_SUCCEED();
}
Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) {
/* Lock the physical memory mutex. */
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
/* Try to perform a reduction in heap, instead of an extension. */
KProcessAddress cur_address;
size_t allocation_size;
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Validate that setting heap size is possible at all. */
R_UNLESS(!m_is_kernel, svc::ResultOutOfMemory());
R_UNLESS(size <= static_cast<size_t>(m_region_ends[RegionType_Heap] - m_region_starts[RegionType_Heap]), svc::ResultOutOfMemory());
R_UNLESS(size <= m_max_heap_size, svc::ResultOutOfMemory());
if (size < static_cast<size_t>(m_current_heap_end - m_region_starts[RegionType_Heap])) {
/* The size being requested is less than the current size, so we need to free the end of the heap. */
/* Validate memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks),
m_region_starts[RegionType_Heap] + size, (m_current_heap_end - m_region_starts[RegionType_Heap]) - size,
KMemoryState_All, KMemoryState_Normal,
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
KMemoryAttribute_All, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the end of the heap. */
const size_t num_pages = ((m_current_heap_end - m_region_starts[RegionType_Heap]) - size) / PageSize;
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), m_region_starts[RegionType_Heap] + size, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Release the memory from the resource limit. */
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, num_pages * PageSize);
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), m_region_starts[RegionType_Heap] + size, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, size == 0 ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None);
/* Update the current heap end. */
m_current_heap_end = m_region_starts[RegionType_Heap] + size;
/* Set the output. */
*out = m_region_starts[RegionType_Heap];
R_SUCCEED();
} else if (size == static_cast<size_t>(m_current_heap_end - m_region_starts[RegionType_Heap])) {
/* The size requested is exactly the current size. */
*out = m_region_starts[RegionType_Heap];
R_SUCCEED();
} else {
/* We have to allocate memory. Determine how much to allocate and where while the table is locked. */
cur_address = m_current_heap_end;
allocation_size = size - (m_current_heap_end - m_region_starts[RegionType_Heap]);
}
}
/* Reserve memory for the heap extension. */
KScopedResourceReservation memory_reservation(m_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, allocation_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate pages for the heap extension. */
KPageGroup pg(m_block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), allocation_size / PageSize, 1, m_allocate_option));
/* Close the opened pages when we're done with them. */
/* If the mapping succeeds, each page will gain an extra reference, otherwise they will be freed automatically. */
ON_SCOPE_EXIT { pg.Close(); };
/* Clear all the newly allocated pages. */
for (const auto &it : pg) {
std::memset(GetVoidPointer(GetHeapVirtualAddress(it.GetAddress())), m_heap_fill_value, it.GetSize());
}
/* Map the pages. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Ensure that the heap hasn't changed since we began executing. */
MESOSPHERE_ABORT_UNLESS(cur_address == m_current_heap_end);
/* Check the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), m_current_heap_end, allocation_size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Map the pages. */
const size_t num_pages = allocation_size / PageSize;
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, (m_current_heap_end == m_region_starts[RegionType_Heap]) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), m_current_heap_end, num_pages, pg, map_properties, OperationType_MapGroup, false));
/* We succeeded, so commit our memory reservation. */
memory_reservation.Commit();
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), m_current_heap_end, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, m_region_starts[RegionType_Heap] == m_current_heap_end ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
/* Update the current heap end. */
m_current_heap_end = m_region_starts[RegionType_Heap] + size;
/* Set the output. */
*out = m_region_starts[RegionType_Heap];
R_SUCCEED();
}
}
Result KPageTableBase::SetMaxHeapSize(size_t size) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Only process page tables are allowed to set heap size. */
MESOSPHERE_ASSERT(!this->IsKernel());
m_max_heap_size = size;
R_SUCCEED();
}
Result KPageTableBase::QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
/* If the address is invalid, create a fake block. */
if (!this->Contains(addr, 1)) {
*out_info = {
.m_address = GetInteger(m_address_space_end),
.m_size = 0 - GetInteger(m_address_space_end),
.m_state = static_cast<KMemoryState>(ams::svc::MemoryState_Inaccessible),
.m_device_disable_merge_left_count = 0,
.m_device_disable_merge_right_count = 0,
.m_ipc_lock_count = 0,
.m_device_use_count = 0,
.m_ipc_disable_merge_count = 0,
.m_permission = KMemoryPermission_None,
.m_attribute = KMemoryAttribute_None,
.m_original_permission = KMemoryPermission_None,
.m_disable_merge_attribute = KMemoryBlockDisableMergeAttribute_None,
};
out_page_info->flags = 0;
R_SUCCEED();
}
/* Otherwise, lock the table and query. */
KScopedLightLock lk(m_general_lock);
R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
}
Result KPageTableBase::QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Align the address down to page size. */
address = util::AlignDown(GetInteger(address), PageSize);
/* Verify that we can query the address. */
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
R_UNLESS(it != m_memory_block_manager.end(), svc::ResultInvalidCurrentMemory());
/* Check the memory state. */
R_TRY(this->CheckMemoryState(it, KMemoryState_FlagCanQueryPhysical, KMemoryState_FlagCanQueryPhysical, KMemoryPermission_UserReadExecute, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
/* Prepare to traverse. */
KPhysicalAddress phys_addr;
size_t phys_size;
KProcessAddress virt_addr = it->GetAddress();
KProcessAddress end_addr = it->GetEndAddress();
/* Perform traversal. */
{
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = m_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
/* Set tracking variables. */
phys_addr = next_entry.phys_addr;
phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
/* Iterate. */
while (true) {
/* Continue the traversal. */
traverse_valid = m_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
if (!traverse_valid) {
break;
}
if (next_entry.phys_addr != (phys_addr + phys_size)) {
/* Check if we're done. */
if (virt_addr <= address && address <= virt_addr + phys_size - 1) {
break;
}
/* Advance. */
phys_addr = next_entry.phys_addr;
virt_addr += next_entry.block_size;
phys_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1));
} else {
phys_size += next_entry.block_size;
}
/* Check if we're done. */
if (end_addr < virt_addr + phys_size) {
break;
}
}
MESOSPHERE_ASSERT(virt_addr <= address && address <= virt_addr + phys_size - 1);
/* Ensure we use the right size. */
if (end_addr < virt_addr + phys_size) {
phys_size = end_addr - virt_addr;
}
}
/* Set the output. */
out->physical_address = GetInteger(phys_addr);
out->virtual_address = GetInteger(virt_addr);
out->size = phys_size;
R_SUCCEED();
}
Result KPageTableBase::MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryState state, KMemoryPermission perm) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
MESOSPHERE_ASSERT(size > 0);
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
const size_t num_pages = size / PageSize;
const KPhysicalAddress last = phys_addr + size - 1;
/* Get region extents. */
const KProcessAddress region_start = m_region_starts[RegionType_KernelMap];
const size_t region_size = m_region_ends[RegionType_KernelMap] - m_region_starts[RegionType_KernelMap];
const size_t region_num_pages = region_size / PageSize;
MESOSPHERE_ASSERT(this->CanContain(region_start, region_size, state));
/* Locate the memory region. */
const KMemoryRegion *region = KMemoryLayout::Find(phys_addr);
R_UNLESS(region != nullptr, svc::ResultInvalidAddress());
MESOSPHERE_ASSERT(region->Contains(GetInteger(phys_addr)));
/* Ensure that the region is mappable. */
const bool is_rw = perm == KMemoryPermission_UserReadWrite;
while (true) {
/* Check that the region exists. */
R_UNLESS(region != nullptr, svc::ResultInvalidAddress());
/* Check the region attributes. */
R_UNLESS(!region->IsDerivedFrom(KMemoryRegionType_Dram), svc::ResultInvalidAddress());
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress());
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), svc::ResultInvalidAddress());
/* Check if we're done. */
if (GetInteger(last) <= region->GetLastAddress()) {
break;
}
/* Advance. */
region = region->GetNext();
};
/* Select an address to map at. */
KProcessAddress addr = Null<KProcessAddress>;
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
const KPhysicalAddress aligned_phys = util::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
if (aligned_phys <= phys_addr) {
continue;
}
const KPhysicalAddress last_aligned_paddr = util::AlignDown(GetInteger(last) + 1, alignment) - 1;
if (!(last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr)) {
continue;
}
addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
if (addr != Null<KProcessAddress>) {
break;
}
}
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
/* Check that we can map IO here. */
MESOSPHERE_ASSERT(this->CanContain(addr, size, state));
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Perform mapping operation. */
const KPageProperties properties = { perm, state == KMemoryState_IoRegister, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(page_list, addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
/* Set the output address. */
*out = addr;
R_SUCCEED();
}
Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Map the io memory. */
KProcessAddress addr;
R_TRY(this->MapIoImpl(std::addressof(addr), updater.GetPageList(), phys_addr, size, KMemoryState_IoRegister, perm));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, KMemoryState_IoRegister, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
R_SUCCEED();
}
Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission svc_perm) {
const size_t num_pages = size / PageSize;
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Validate the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_None, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KMemoryPermission perm = ConvertToKMemoryPermission(svc_perm);
const KPageProperties properties = { perm, mapping == ams::svc::MemoryMapping_IoRegister, mapping == ams::svc::MemoryMapping_Uncached, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, phys_addr, true, properties, OperationType_Map, false));
/* Update the blocks. */
const auto state = mapping == ams::svc::MemoryMapping_Memory ? KMemoryState_IoMemory : KMemoryState_IoRegister;
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, state, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
R_SUCCEED();
}
Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping) {
const size_t num_pages = size / PageSize;
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Validate the memory state. */
KMemoryState old_state;
KMemoryPermission old_perm;
KMemoryAttribute old_attr;
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(old_state), std::addressof(old_perm), std::addressof(old_attr), std::addressof(num_allocator_blocks),
dst_address, size,
KMemoryState_All, mapping == ams::svc::MemoryMapping_Memory ? KMemoryState_IoMemory : KMemoryState_IoRegister,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_All, KMemoryAttribute_Locked));
/* Validate that the region being unmapped corresponds to the physical range described. */
{
/* Get the impl. */
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_address));
/* Check that the physical region matches. */
R_UNLESS(next_entry.phys_addr == phys_addr, svc::ResultInvalidMemoryRegion());
/* Iterate. */
for (size_t checked_size = next_entry.block_size - (GetInteger(phys_addr) & (next_entry.block_size - 1)); checked_size < size; checked_size += next_entry.block_size) {
/* Continue the traversal. */
MESOSPHERE_ABORT_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
/* Check that the physical region matches. */
R_UNLESS(next_entry.phys_addr == phys_addr + checked_size, svc::ResultInvalidMemoryRegion());
}
}
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* If the region being unmapped is Memory, synchronize. */
if (mapping == ams::svc::MemoryMapping_Memory) {
/* Change the region to be uncached. */
const KPageProperties properties = { old_perm, false, true, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissionsAndRefresh, false));
/* Temporarily unlock ourselves, so that other operations can occur while we flush the region. */
m_general_lock.Unlock();
ON_SCOPE_EXIT { m_general_lock.Lock(); };
/* Flush the region. */
MESOSPHERE_R_ABORT_UNLESS(cpu::FlushDataCache(GetVoidPointer(dst_address), size));
}
/* Perform the unmap. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
R_SUCCEED();
}
Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
MESOSPHERE_ASSERT(size > 0);
R_UNLESS(phys_addr < phys_addr + size, svc::ResultInvalidAddress());
const size_t num_pages = size / PageSize;
const KPhysicalAddress last = phys_addr + size - 1;
/* Get region extents. */
const KProcessAddress region_start = this->GetRegionAddress(KMemoryState_Static);
const size_t region_size = this->GetRegionSize(KMemoryState_Static);
const size_t region_num_pages = region_size / PageSize;
/* Locate the memory region. */
const KMemoryRegion *region = KMemoryLayout::Find(phys_addr);
R_UNLESS(region != nullptr, svc::ResultInvalidAddress());
MESOSPHERE_ASSERT(region->Contains(GetInteger(phys_addr)));
R_UNLESS(GetInteger(last) <= region->GetLastAddress(), svc::ResultInvalidAddress());
/* Check the region attributes. */
const bool is_rw = perm == KMemoryPermission_UserReadWrite;
R_UNLESS( region->IsDerivedFrom(KMemoryRegionType_Dram), svc::ResultInvalidAddress());
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_NoUserMap), svc::ResultInvalidAddress());
R_UNLESS(!region->HasTypeAttribute(KMemoryRegionAttr_UserReadOnly) || !is_rw, svc::ResultInvalidAddress());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Select an address to map at. */
KProcessAddress addr = Null<KProcessAddress>;
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
const KPhysicalAddress aligned_phys = util::AlignUp(GetInteger(phys_addr), alignment) + alignment - 1;
if (aligned_phys <= phys_addr) {
continue;
}
const KPhysicalAddress last_aligned_paddr = util::AlignDown(GetInteger(last) + 1, alignment) - 1;
if (!(last_aligned_paddr <= last && aligned_phys <= last_aligned_paddr)) {
continue;
}
addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
if (addr != Null<KProcessAddress>) {
break;
}
}
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
/* Check that we can map static here. */
MESOSPHERE_ASSERT(this->CanContain(addr, size, KMemoryState_Static));
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState_Static, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
R_SUCCEED();
}
Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
/* Get the memory region. */
const KMemoryRegion *region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(region_type);
R_UNLESS(region != nullptr, svc::ResultOutOfRange());
/* Check that the region is valid. */
MESOSPHERE_ABORT_UNLESS(region->GetEndAddress() != 0);
/* Map the region. */
R_TRY_CATCH(this->MapStatic(region->GetAddress(), region->GetSize(), perm)) {
R_CONVERT(svc::ResultInvalidAddress, svc::ResultOutOfRange())
} R_END_TRY_CATCH;
R_SUCCEED();
}
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
MESOSPHERE_ASSERT(util::IsAligned(alignment, PageSize) && alignment >= PageSize);
/* Ensure this is a valid map request. */
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Find a random address to map at. */
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, alignment, 0, this->GetNumGuardPages());
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), alignment));
MESOSPHERE_ASSERT(this->CanContain(addr, num_pages * PageSize, state));
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
if (is_pa_valid) {
R_TRY(this->Operate(updater.GetPageList(), addr, num_pages, phys_addr, true, properties, OperationType_Map, false));
} else {
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), addr, num_pages, properties));
}
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
*out_addr = addr;
R_SUCCEED();
}
Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
/* Check that the map is in range. */
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Map the pages. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->AllocateAndMapPagesImpl(updater.GetPageList(), address, num_pages, properties));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
R_SUCCEED();
}
Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
/* Check that the unmap is in range. */
const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform the unmap. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
R_SUCCEED();
}
Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
/* Ensure this is a valid map request. */
const size_t num_pages = pg.GetNumPages();
R_UNLESS(this->CanContain(region_start, region_num_pages * PageSize, state), svc::ResultInvalidCurrentMemory());
R_UNLESS(num_pages < region_num_pages, svc::ResultOutOfMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Find a random address to map at. */
KProcessAddress addr = this->FindFreeArea(region_start, region_num_pages, num_pages, PageSize, 0, this->GetNumGuardPages());
R_UNLESS(addr != Null<KProcessAddress>, svc::ResultOutOfMemory());
MESOSPHERE_ASSERT(this->CanContain(addr, num_pages * PageSize, state));
MESOSPHERE_R_ASSERT(this->CheckMemoryState(addr, num_pages * PageSize, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
*out_addr = addr;
R_SUCCEED();
}
Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
/* Ensure this is a valid map request. */
const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(addr, size, state), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check if state allows us to map. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), addr, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform mapping operation. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), addr, pg, properties, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
R_SUCCEED();
}
Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
MESOSPHERE_ASSERT(!this->IsLockedByCurrentThread());
/* Ensure this is a valid unmap request. */
const size_t num_pages = pg.GetNumPages();
const size_t size = num_pages * PageSize;
R_UNLESS(this->CanContain(address, size, state), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check if state allows us to unmap. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, state, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Check that the page group is valid. */
R_UNLESS(this->IsValidPageGroup(pg, address, num_pages), svc::ResultInvalidCurrentMemory());
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Perform unmapping operation. */
const KPageProperties properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, properties, OperationType_Unmap, false));
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
R_SUCCEED();
}
Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
/* Ensure that the page group isn't null. */
MESOSPHERE_ASSERT(out != nullptr);
/* Make sure that the region we're mapping is valid for the table. */
const size_t size = num_pages * PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check if state allows us to create the group. */
R_TRY(this->CheckMemoryState(address, size, state_mask | KMemoryState_FlagReferenceCounted, state | KMemoryState_FlagReferenceCounted, perm_mask, perm, attr_mask, attr));
/* Create a new page group for the region. */
R_TRY(this->MakePageGroup(*out, address, num_pages));
/* Open a new reference to the pages in the group. */
out->Open();
R_SUCCEED();
}
Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
/* Check that the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_Uncached, KMemoryAttribute_None));
/* Get the impl. */
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Check that the pages are linearly mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Invalidate the block. */
if (cur_size > 0) {
/* NOTE: Nintendo does not check the result of invalidation. */
cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
}
/* Advance. */
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Check that the last block is linearly mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Invalidate the last block. */
if (cur_size > 0) {
/* NOTE: Nintendo does not check the result of invalidation. */
cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
}
R_SUCCEED();
}
Result KPageTableBase::InvalidateCurrentProcessDataCache(KProcessAddress address, size_t size) {
/* Check pre-condition: this is being called on the current process. */
MESOSPHERE_ASSERT(this == std::addressof(GetCurrentProcess().GetPageTable().GetBasePageTable()));
/* Check that the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_Uncached, KMemoryAttribute_None));
/* Invalidate the data cache. */
R_RETURN(cpu::InvalidateDataCache(GetVoidPointer(address), size));
}
bool KPageTableBase::CanReadWriteDebugMemory(KProcessAddress address, size_t size, bool force_debug_prod) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* If the memory is debuggable and user-readable, we can perform the access. */
if (R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_NotMapped | KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
return true;
}
/* If we're in debug mode, and the process isn't force debug prod, check if the memory is debuggable and kernel-readable and user-executable. */
if (KTargetSystem::IsDebugMode() && !force_debug_prod) {
if (R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_FlagCanDebug, KMemoryState_FlagCanDebug, KMemoryPermission_KernelRead | KMemoryPermission_UserExecute, KMemoryPermission_KernelRead | KMemoryPermission_UserExecute, KMemoryAttribute_None, KMemoryAttribute_None))) {
return true;
}
}
/* If neither of the above checks passed, we can't access the memory. */
return false;
}
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size, bool force_debug_prod) {
/* Lightly validate the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Require that the memory either be user-readable-and-mapped or debug-accessible. */
const bool can_read = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_NotMapped | KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
if (!can_read) {
R_UNLESS(this->CanReadWriteDebugMemory(address, size, force_debug_prod), svc::ResultInvalidCurrentMemory());
}
/* Get the impl. */
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
const void * copy_src = GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr));
cpu::FlushDataCache(copy_src, copy_size);
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(buffer, copy_src, copy_size), svc::ResultInvalidPointer());
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + copy_size);
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
const void * copy_src = GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr));
cpu::FlushDataCache(copy_src, cur_size);
R_UNLESS(UserspaceAccess::CopyMemoryToUser(buffer, copy_src, cur_size), svc::ResultInvalidPointer());
}
R_SUCCEED();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
R_SUCCEED();
}
Result KPageTableBase::WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
/* Lightly validate the region is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Require that the memory either be user-writable-and-mapped or debug-accessible. */
const bool can_write = R_SUCCEEDED(this->CheckMemoryStateContiguous(address, size, KMemoryState_None, KMemoryState_None, KMemoryPermission_NotMapped | KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
if (!can_write) {
R_UNLESS(this->CanReadWriteDebugMemory(address, size, false), svc::ResultInvalidCurrentMemory());
}
/* Get the impl. */
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), address);
R_UNLESS(traverse_valid, svc::ResultInvalidCurrentMemory());
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, copy_size), svc::ResultInvalidCurrentMemory());
cpu::StoreDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size);
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + copy_size);
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), buffer, cur_size), svc::ResultInvalidCurrentMemory());
cpu::StoreDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
}
R_SUCCEED();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
buffer = reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(buffer) + cur_size);
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
/* Invalidate the entire instruction cache, as this svc allows modifying executable pages. */
cpu::InvalidateEntireInstructionCache();
R_SUCCEED();
}
Result KPageTableBase::ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size, KMemoryState state) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Determine the mapping extents. */
const KPhysicalAddress map_start = util::AlignDown(GetInteger(phys_addr), PageSize);
const KPhysicalAddress map_end = util::AlignUp(GetInteger(phys_addr) + size, PageSize);
const size_t map_size = map_end - map_start;
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Temporarily map the io memory. */
KProcessAddress io_addr;
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, state, KMemoryPermission_UserRead));
/* Ensure we unmap the io memory when we're done with it. */
ON_SCOPE_EXIT {
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
};
/* Read the memory. */
const KProcessAddress read_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
switch ((GetInteger(read_addr) | size) & 3) {
case 0:
{
R_UNLESS(UserspaceAccess::ReadIoMemory32Bit(buffer, GetVoidPointer(read_addr), size), svc::ResultInvalidPointer());
}
break;
case 2:
{
R_UNLESS(UserspaceAccess::ReadIoMemory16Bit(buffer, GetVoidPointer(read_addr), size), svc::ResultInvalidPointer());
}
break;
default:
{
R_UNLESS(UserspaceAccess::ReadIoMemory8Bit(buffer, GetVoidPointer(read_addr), size), svc::ResultInvalidPointer());
}
break;
}
R_SUCCEED();
}
Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size, KMemoryState state) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Determine the mapping extents. */
const KPhysicalAddress map_start = util::AlignDown(GetInteger(phys_addr), PageSize);
const KPhysicalAddress map_end = util::AlignUp(GetInteger(phys_addr) + size, PageSize);
const size_t map_size = map_end - map_start;
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Temporarily map the io memory. */
KProcessAddress io_addr;
R_TRY(this->MapIoImpl(std::addressof(io_addr), updater.GetPageList(), map_start, map_size, state, KMemoryPermission_UserReadWrite));
/* Ensure we unmap the io memory when we're done with it. */
ON_SCOPE_EXIT {
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), io_addr, map_size / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
};
/* Read the memory. */
const KProcessAddress write_addr = io_addr + (GetInteger(phys_addr) & (PageSize - 1));
switch ((GetInteger(write_addr) | size) & 3) {
case 0:
{
R_UNLESS(UserspaceAccess::WriteIoMemory32Bit(GetVoidPointer(write_addr), buffer, size), svc::ResultInvalidPointer());
}
break;
case 2:
{
R_UNLESS(UserspaceAccess::WriteIoMemory16Bit(GetVoidPointer(write_addr), buffer, size), svc::ResultInvalidPointer());
}
break;
default:
{
R_UNLESS(UserspaceAccess::WriteIoMemory8Bit(GetVoidPointer(write_addr), buffer, size), svc::ResultInvalidPointer());
}
break;
}
R_SUCCEED();
}
Result KPageTableBase::ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size, KMemoryState state) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* We need to lock both this table, and the current process's table, so set up some aliases. */
KPageTableBase &src_page_table = *this;
KPageTableBase &dst_page_table = GetCurrentProcess().GetPageTable().GetBasePageTable();
/* Acquire the table locks. */
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* Check that the desired range is readable io memory. */
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None));
/* Read the memory. */
u8 *dst = static_cast<u8 *>(buffer);
const KProcessAddress last_address = address + size - 1;
while (address <= last_address) {
/* Get the current physical address. */
KPhysicalAddress phys_addr;
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
/* Determine the current read size. */
const size_t cur_size = std::min<size_t>(last_address - address + 1, PageSize - (GetInteger(address) & (PageSize - 1)));
/* Read. */
R_TRY(dst_page_table.ReadIoMemoryImpl(dst, phys_addr, cur_size, state));
/* Advance. */
address += cur_size;
dst += cur_size;
}
R_SUCCEED();
}
Result KPageTableBase::WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size, KMemoryState state) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* We need to lock both this table, and the current process's table, so set up some aliases. */
KPageTableBase &src_page_table = *this;
KPageTableBase &dst_page_table = GetCurrentProcess().GetPageTable().GetBasePageTable();
/* Acquire the table locks. */
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* Check that the desired range is writable io memory. */
R_TRY(this->CheckMemoryStateContiguous(address, size, KMemoryState_All, state, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None));
/* Read the memory. */
const u8 *src = static_cast<const u8 *>(buffer);
const KProcessAddress last_address = address + size - 1;
while (address <= last_address) {
/* Get the current physical address. */
KPhysicalAddress phys_addr;
MESOSPHERE_ABORT_UNLESS(src_page_table.GetPhysicalAddressLocked(std::addressof(phys_addr), address));
/* Determine the current read size. */
const size_t cur_size = std::min<size_t>(last_address - address + 1, PageSize - (GetInteger(address) & (PageSize - 1)));
/* Read. */
R_TRY(dst_page_table.WriteIoMemoryImpl(phys_addr, src, cur_size, state));
/* Advance. */
address += cur_size;
src += cur_size;
}
R_SUCCEED();
}
Result KPageTableBase::LockForMapDeviceAddressSpace(bool *out_is_io, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned, bool check_heap) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap) | (check_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_None);
size_t num_allocator_blocks;
KMemoryState old_state;
R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr, std::addressof(num_allocator_blocks), address, size, test_state, test_state, perm, perm, KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None, KMemoryAttribute_DeviceShared));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
/* Set whether the locked memory was io. */
*out_is_io = static_cast<ams::svc::MemoryState>(old_state & KMemoryState_Mask) == ams::svc::MemoryState_Io;
R_SUCCEED();
}
Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size, bool check_heap) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
const u32 test_state = KMemoryState_FlagCanDeviceMap | (check_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_None);
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
address, size,
test_state, test_state,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* Update the memory blocks. */
const KMemoryBlockManager::MemoryBlockLockFunction lock_func = m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None);
R_SUCCEED();
}
Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryStateContiguous(std::addressof(num_allocator_blocks),
address, size,
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission_None);
R_SUCCEED();
}
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
/* Lightly validate the range before doing anything else. */
const size_t num_pages = size / PageSize;
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check memory state. */
size_t allocator_num_blocks = 0;
R_TRY(this->CheckMemoryStateContiguous(std::addressof(allocator_num_blocks),
address, size,
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
/* Create an update allocator for the region. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, allocator_num_blocks);
R_TRY(allocator_result);
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, KMemoryPermission_None);
R_SUCCEED();
}
Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Get the range. */
const u32 test_state = (is_aligned ? KMemoryState_FlagCanAlignedDeviceMap : KMemoryState_FlagCanDeviceMap);
R_TRY(this->GetContiguousMemoryRangeWithState(out,
address, size,
test_state, test_state,
perm, perm,
KMemoryAttribute_IpcLocked | KMemoryAttribute_Locked, KMemoryAttribute_None));
/* We got the range, so open it. */
out->Open();
R_SUCCEED();
}
Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Get the range. */
R_TRY(this->GetContiguousMemoryRangeWithState(out,
address, size,
KMemoryState_FlagCanDeviceMap, KMemoryState_FlagCanDeviceMap,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked, KMemoryAttribute_DeviceShared));
/* We got the range, so open it. */
out->Open();
R_SUCCEED();
}
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
R_RETURN(this->LockMemoryAndOpen(nullptr, out, address, size,
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
KMemoryAttribute_All, KMemoryAttribute_None,
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
KMemoryAttribute_Locked));
}
Result KPageTableBase::UnlockForIpcUserBuffer(KProcessAddress address, size_t size) {
R_RETURN(this->UnlockMemory(address, size,
KMemoryState_FlagCanIpcUserBuffer, KMemoryState_FlagCanIpcUserBuffer,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_All, KMemoryAttribute_Locked,
KMemoryPermission_UserReadWrite,
KMemoryAttribute_Locked, nullptr));
}
Result KPageTableBase::LockForTransferMemory(KPageGroup *out, KProcessAddress address, size_t size, KMemoryPermission perm) {
R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size,
KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer,
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
KMemoryAttribute_All, KMemoryAttribute_None,
perm,
KMemoryAttribute_Locked));
}
Result KPageTableBase::UnlockForTransferMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
R_RETURN(this->UnlockMemory(address, size,
KMemoryState_FlagCanTransfer, KMemoryState_FlagCanTransfer,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_All, KMemoryAttribute_Locked,
KMemoryPermission_UserReadWrite,
KMemoryAttribute_Locked, std::addressof(pg)));
}
Result KPageTableBase::LockForCodeMemory(KPageGroup *out, KProcessAddress address, size_t size) {
R_RETURN(this->LockMemoryAndOpen(out, nullptr, address, size,
KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory,
KMemoryPermission_All, KMemoryPermission_UserReadWrite,
KMemoryAttribute_All, KMemoryAttribute_None,
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
KMemoryAttribute_Locked));
}
Result KPageTableBase::UnlockForCodeMemory(KProcessAddress address, size_t size, const KPageGroup &pg) {
R_RETURN(this->UnlockMemory(address, size,
KMemoryState_FlagCanCodeMemory, KMemoryState_FlagCanCodeMemory,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_All, KMemoryAttribute_Locked,
KMemoryPermission_UserReadWrite,
KMemoryAttribute_Locked, std::addressof(pg)));
}
Result KPageTableBase::OpenMemoryRangeForProcessCacheOperation(MemoryRange *out, KProcessAddress address, size_t size) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Get the range. */
R_TRY(this->GetContiguousMemoryRangeWithState(out,
address, size,
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
KMemoryPermission_UserRead, KMemoryPermission_UserRead,
KMemoryAttribute_Uncached, KMemoryAttribute_None));
/* We got the range, so open it. */
out->Open();
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
R_UNLESS(UserspaceAccess::CopyMemoryToUserAligned32Bit(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), copy_size), svc::ResultInvalidCurrentMemory());
dst_addr += copy_size;
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
R_UNLESS(UserspaceAccess::CopyMemoryToUser(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidCurrentMemory());
}
R_SUCCEED();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
dst_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), src_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy the data. */
std::memcpy(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
R_SUCCEED();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
dst_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy as much aligned data as we can. */
if (cur_size >= sizeof(u32)) {
const size_t copy_size = util::AlignDown(cur_size, sizeof(u32));
R_UNLESS(UserspaceAccess::CopyMemoryFromUserAligned32Bit(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), copy_size), svc::ResultInvalidCurrentMemory());
src_addr += copy_size;
cur_addr += copy_size;
cur_size -= copy_size;
}
/* Copy remaining data. */
if (cur_size > 0) {
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size), svc::ResultInvalidCurrentMemory());
}
R_SUCCEED();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
src_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
/* Lightly validate the range before doing anything else. */
R_UNLESS(this->Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check memory state. */
R_TRY(this->CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), dst_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_addr = next_entry.phys_addr;
size_t cur_size = next_entry.block_size - (GetInteger(cur_addr) & (next_entry.block_size - 1));
size_t tot_size = cur_size;
auto PerformCopy = [&]() ALWAYS_INLINE_LAMBDA -> Result {
/* Ensure the address is linear mapped. */
R_UNLESS(IsLinearMappedPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
/* Copy the data. */
std::memcpy(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size);
R_SUCCEED();
};
/* Iterate. */
while (tot_size < size) {
/* Continue the traversal. */
traverse_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
if (next_entry.phys_addr != (cur_addr + cur_size)) {
/* Perform copy. */
R_TRY(PerformCopy());
/* Advance. */
src_addr += cur_size;
cur_addr = next_entry.phys_addr;
cur_size = next_entry.block_size;
} else {
cur_size += next_entry.block_size;
}
tot_size += next_entry.block_size;
}
/* Ensure we use the right size for the last block. */
if (tot_size > size) {
cur_size -= (tot_size - size);
}
/* Perform copy for the last block. */
R_TRY(PerformCopy());
}
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromHeapToHeap(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
/* For convenience, alias this. */
KPageTableBase &src_page_table = *this;
/* Lightly validate the ranges before doing anything else. */
R_UNLESS(src_page_table.Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_page_table.Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Acquire the table locks. */
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* Check memory state. */
R_TRY(src_page_table.CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
R_TRY(dst_page_table.CheckMemoryStateContiguous(dst_addr, size, dst_state_mask, dst_state, dst_test_perm, dst_test_perm, dst_attr_mask | KMemoryAttribute_Uncached, dst_attr));
/* Get implementations. */
auto &src_impl = src_page_table.GetImpl();
auto &dst_impl = dst_page_table.GetImpl();
/* Prepare for traversal. */
TraversalContext src_context;
TraversalContext dst_context;
TraversalEntry src_next_entry;
TraversalEntry dst_next_entry;
bool traverse_valid;
/* Begin traversal. */
traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), std::addressof(src_context), src_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), std::addressof(dst_context), dst_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
size_t cur_src_size = src_next_entry.block_size - (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
size_t cur_dst_size = dst_next_entry.block_size - (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
/* Adjust the initial block sizes. */
src_next_entry.block_size = cur_src_size;
dst_next_entry.block_size = cur_dst_size;
/* Before we get any crazier, succeed if there's nothing to do. */
R_SUCCEED_IF(size == 0);
/* We're going to manage dual traversal via an offset against the total size. */
KPhysicalAddress cur_src_addr = cur_src_block_addr;
KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
/* Iterate. */
size_t ofs = 0;
while (ofs < size) {
/* Determine how much we can copy this iteration. */
const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
/* If we need to advance the traversals, do so. */
bool updated_src = false, updated_dst = false, skip_copy = false;
if (ofs + cur_copy_size != size) {
if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
/* Continue the src traversal. */
traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), std::addressof(src_context));
MESOSPHERE_ASSERT(traverse_valid);
/* Update source. */
updated_src = cur_src_addr + cur_min_size != GetInteger(src_next_entry.phys_addr);
}
if (cur_dst_addr + cur_min_size == dst_next_entry.phys_addr + dst_next_entry.block_size) {
/* Continue the dst traversal. */
traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), std::addressof(dst_context));
MESOSPHERE_ASSERT(traverse_valid);
/* Update destination. */
updated_dst = cur_dst_addr + cur_min_size != GetInteger(dst_next_entry.phys_addr);
}
/* If we didn't update either of source/destination, skip the copy this iteration. */
if (!updated_src && !updated_dst) {
skip_copy = true;
/* Update the source block address. */
cur_src_block_addr = src_next_entry.phys_addr;
}
}
/* Do the copy, unless we're skipping it. */
if (!skip_copy) {
/* We need both ends of the copy to be heap blocks. */
R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), svc::ResultInvalidCurrentMemory());
R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), svc::ResultInvalidCurrentMemory());
/* Copy the data. */
std::memcpy(GetVoidPointer(GetHeapVirtualAddress(cur_dst_addr)), GetVoidPointer(GetHeapVirtualAddress(cur_src_addr)), cur_copy_size);
/* Update. */
cur_src_block_addr = src_next_entry.phys_addr;
cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
cur_dst_block_addr = dst_next_entry.phys_addr;
cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
/* Advance offset. */
ofs += cur_copy_size;
}
/* Update min size. */
cur_src_size = src_next_entry.block_size;
cur_dst_size = dst_next_entry.block_size;
cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, cur_dst_block_addr - cur_dst_addr + cur_dst_size);
}
}
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
/* For convenience, alias this. */
KPageTableBase &src_page_table = *this;
/* Lightly validate the ranges before doing anything else. */
R_UNLESS(src_page_table.Contains(src_addr, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_page_table.Contains(dst_addr, size), svc::ResultInvalidCurrentMemory());
/* Copy the memory. */
{
/* Acquire the table locks. */
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* Check memory state for source. */
R_TRY(src_page_table.CheckMemoryStateContiguous(src_addr, size, src_state_mask, src_state, src_test_perm, src_test_perm, src_attr_mask | KMemoryAttribute_Uncached, src_attr));
/* Destination state is intentionally unchecked. */
MESOSPHERE_UNUSED(dst_state_mask, dst_state, dst_test_perm, dst_attr_mask, dst_attr);
/* Get implementations. */
auto &src_impl = src_page_table.GetImpl();
auto &dst_impl = dst_page_table.GetImpl();
/* Prepare for traversal. */
TraversalContext src_context;
TraversalContext dst_context;
TraversalEntry src_next_entry;
TraversalEntry dst_next_entry;
bool traverse_valid;
/* Begin traversal. */
traverse_valid = src_impl.BeginTraversal(std::addressof(src_next_entry), std::addressof(src_context), src_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
traverse_valid = dst_impl.BeginTraversal(std::addressof(dst_next_entry), std::addressof(dst_context), dst_addr);
MESOSPHERE_ABORT_UNLESS(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_src_block_addr = src_next_entry.phys_addr;
KPhysicalAddress cur_dst_block_addr = dst_next_entry.phys_addr;
size_t cur_src_size = src_next_entry.block_size - (GetInteger(cur_src_block_addr) & (src_next_entry.block_size - 1));
size_t cur_dst_size = dst_next_entry.block_size - (GetInteger(cur_dst_block_addr) & (dst_next_entry.block_size - 1));
/* Adjust the initial block sizes. */
src_next_entry.block_size = cur_src_size;
dst_next_entry.block_size = cur_dst_size;
/* Before we get any crazier, succeed if there's nothing to do. */
R_SUCCEED_IF(size == 0);
/* We're going to manage dual traversal via an offset against the total size. */
KPhysicalAddress cur_src_addr = cur_src_block_addr;
KPhysicalAddress cur_dst_addr = cur_dst_block_addr;
size_t cur_min_size = std::min<size_t>(cur_src_size, cur_dst_size);
/* Iterate. */
size_t ofs = 0;
while (ofs < size) {
/* Determine how much we can copy this iteration. */
const size_t cur_copy_size = std::min<size_t>(cur_min_size, size - ofs);
/* If we need to advance the traversals, do so. */
bool updated_src = false, updated_dst = false, skip_copy = false;
if (ofs + cur_copy_size != size) {
if (cur_src_addr + cur_min_size == cur_src_block_addr + cur_src_size) {
/* Continue the src traversal. */
traverse_valid = src_impl.ContinueTraversal(std::addressof(src_next_entry), std::addressof(src_context));
MESOSPHERE_ASSERT(traverse_valid);
/* Update source. */
updated_src = cur_src_addr + cur_min_size != GetInteger(src_next_entry.phys_addr);
}
if (cur_dst_addr + cur_min_size == dst_next_entry.phys_addr + dst_next_entry.block_size) {
/* Continue the dst traversal. */
traverse_valid = dst_impl.ContinueTraversal(std::addressof(dst_next_entry), std::addressof(dst_context));
MESOSPHERE_ASSERT(traverse_valid);
/* Update destination. */
updated_dst = cur_dst_addr + cur_min_size != GetInteger(dst_next_entry.phys_addr);
}
/* If we didn't update either of source/destination, skip the copy this iteration. */
if (!updated_src && !updated_dst) {
skip_copy = true;
/* Update the source block address. */
cur_src_block_addr = src_next_entry.phys_addr;
}
}
/* Do the copy, unless we're skipping it. */
if (!skip_copy) {
/* We need both ends of the copy to be heap blocks. */
R_UNLESS(IsHeapPhysicalAddress(cur_src_addr), svc::ResultInvalidCurrentMemory());
R_UNLESS(IsHeapPhysicalAddress(cur_dst_addr), svc::ResultInvalidCurrentMemory());
/* Copy the data. */
std::memcpy(GetVoidPointer(GetHeapVirtualAddress(cur_dst_addr)), GetVoidPointer(GetHeapVirtualAddress(cur_src_addr)), cur_copy_size);
/* Update. */
cur_src_block_addr = src_next_entry.phys_addr;
cur_src_addr = updated_src ? cur_src_block_addr : cur_src_addr + cur_copy_size;
cur_dst_block_addr = dst_next_entry.phys_addr;
cur_dst_addr = updated_dst ? cur_dst_block_addr : cur_dst_addr + cur_copy_size;
/* Advance offset. */
ofs += cur_copy_size;
}
/* Update min size. */
cur_src_size = src_next_entry.block_size;
cur_dst_size = dst_next_entry.block_size;
cur_min_size = std::min<size_t>(cur_src_block_addr - cur_src_addr + cur_src_size, cur_dst_block_addr - cur_dst_addr + cur_dst_size);
}
}
R_SUCCEED();
}
#pragma GCC push_options
#pragma GCC optimize ("-O3")
Result KPageTableBase::SetupForIpcClient(PageLinkedList *page_list, size_t *out_blocks_needed, KProcessAddress address, size_t size, KMemoryPermission test_perm, KMemoryState dst_state) {
/* Validate pre-conditions. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(test_perm == KMemoryPermission_UserReadWrite || test_perm == KMemoryPermission_UserRead);
/* Check that the address is in range. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Get the source permission. */
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? (KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped) : KMemoryPermission_UserRead);
/* Get aligned extents. */
const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(address), PageSize);
const KProcessAddress aligned_src_end = util::AlignUp(GetInteger(address) + size, PageSize);
const KProcessAddress mapping_src_start = util::AlignUp(GetInteger(address), PageSize);
const KProcessAddress mapping_src_end = util::AlignDown(GetInteger(address) + size, PageSize);
const auto aligned_src_last = GetInteger(aligned_src_end) - 1;
const auto mapping_src_last = GetInteger(mapping_src_end) - 1;
/* Get the test state and attribute mask. */
u32 test_state;
u32 test_attr_mask;
switch (dst_state) {
case KMemoryState_Ipc:
test_state = KMemoryState_FlagCanUseIpc;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_IpcLocked));
break;
case KMemoryState_NonSecureIpc:
test_state = KMemoryState_FlagCanUseNonSecureIpc;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked));
break;
case KMemoryState_NonDeviceIpc:
test_state = KMemoryState_FlagCanUseNonDeviceIpc;
test_attr_mask = KMemoryAttribute_All & (~(KMemoryAttribute_PermissionLocked | KMemoryAttribute_DeviceShared | KMemoryAttribute_IpcLocked));
break;
default:
R_THROW(svc::ResultInvalidCombination());
}
/* Ensure that on failure, we roll back appropriately. */
size_t mapped_size = 0;
ON_RESULT_FAILURE {
if (mapped_size > 0) {
this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, src_perm);
}
};
size_t blocks_needed = 0;
/* Iterate, mapping as needed. */
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(aligned_src_start);
while (true) {
/* Validate the current block. */
R_TRY(this->CheckMemoryState(it, test_state, test_state, test_perm, test_perm, test_attr_mask, KMemoryAttribute_None));
if (mapping_src_start < mapping_src_end && GetInteger(mapping_src_start) < GetInteger(it->GetEndAddress()) && GetInteger(it->GetAddress()) < GetInteger(mapping_src_end)) {
const auto cur_start = it->GetAddress() >= GetInteger(mapping_src_start) ? GetInteger(it->GetAddress()) : GetInteger(mapping_src_start);
const auto cur_end = mapping_src_last >= GetInteger(it->GetLastAddress()) ? GetInteger(it->GetEndAddress()) : GetInteger(mapping_src_end);
const size_t cur_size = cur_end - cur_start;
if (GetInteger(it->GetAddress()) < GetInteger(mapping_src_start)) {
++blocks_needed;
}
if (mapping_src_last < GetInteger(it->GetLastAddress())) {
++blocks_needed;
}
/* Set the permissions on the block, if we need to. */
if ((it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != src_perm) {
const DisableMergeAttribute head_body_attr = (GetInteger(mapping_src_start) >= GetInteger(it->GetAddress())) ? DisableMergeAttribute_DisableHeadAndBody : DisableMergeAttribute_None;
const DisableMergeAttribute tail_attr = (cur_end == GetInteger(mapping_src_end)) ? DisableMergeAttribute_DisableTail : DisableMergeAttribute_None;
const KPageProperties properties = { src_perm, false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
R_TRY(this->Operate(page_list, cur_start, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
/* Note that we mapped this part. */
mapped_size += cur_size;
}
/* If the block is at the end, we're done. */
if (aligned_src_last <= GetInteger(it->GetLastAddress())) {
break;
}
/* Advance. */
++it;
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
}
if (out_blocks_needed != nullptr) {
MESOSPHERE_ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
*out_blocks_needed = blocks_needed;
}
R_SUCCEED();
}
Result KPageTableBase::SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(src_page_table.IsLockedByCurrentThread());
/* Check that we can theoretically map. */
const KProcessAddress region_start = m_region_starts[RegionType_Alias];
const size_t region_size = m_region_ends[RegionType_Alias] - m_region_starts[RegionType_Alias];
R_UNLESS(size < region_size, svc::ResultOutOfAddressSpace());
/* Get aligned source extents. */
const KProcessAddress src_start = src_addr;
const KProcessAddress src_end = src_addr + size;
const KProcessAddress aligned_src_start = util::AlignDown(GetInteger(src_start), PageSize);
const KProcessAddress aligned_src_end = util::AlignUp(GetInteger(src_start) + size, PageSize);
const KProcessAddress mapping_src_start = util::AlignUp(GetInteger(src_start), PageSize);
const KProcessAddress mapping_src_end = util::AlignDown(GetInteger(src_start) + size, PageSize);
const size_t aligned_src_size = aligned_src_end - aligned_src_start;
const size_t mapping_src_size = (mapping_src_start < mapping_src_end) ? (mapping_src_end - mapping_src_start) : 0;
/* Select a random address to map at. */
KProcessAddress dst_addr = Null<KProcessAddress>;
for (s32 block_type = KPageTable::GetMaxBlockType(); block_type >= 0; block_type--) {
const size_t alignment = KPageTable::GetBlockSize(static_cast<KPageTable::BlockType>(block_type));
const size_t offset = GetInteger(aligned_src_start) & (alignment - 1);
dst_addr = this->FindFreeArea(region_start, region_size / PageSize, aligned_src_size / PageSize, alignment, offset, this->GetNumGuardPages());
if (dst_addr != Null<KProcessAddress>) {
break;
}
}
R_UNLESS(dst_addr != Null<KProcessAddress>, svc::ResultOutOfAddressSpace());
/* Check that we can perform the operation we're about to perform. */
MESOSPHERE_ASSERT(this->CanContain(dst_addr, aligned_src_size, dst_state));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Reserve space for any partial pages we allocate. */
const size_t unmapped_size = aligned_src_size - mapping_src_size;
KScopedResourceReservation memory_reservation(m_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, unmapped_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Ensure that we manage page references correctly. */
KPhysicalAddress start_partial_page = Null<KPhysicalAddress>;
KPhysicalAddress end_partial_page = Null<KPhysicalAddress>;
KProcessAddress cur_mapped_addr = dst_addr;
/* If the partial pages are mapped, an extra reference will have been opened. Otherwise, they'll free on scope exit. */
ON_SCOPE_EXIT {
if (start_partial_page != Null<KPhysicalAddress>) {
Kernel::GetMemoryManager().Close(start_partial_page, 1);
}
if (end_partial_page != Null<KPhysicalAddress>) {
Kernel::GetMemoryManager().Close(end_partial_page, 1);
}
};
ON_RESULT_FAILURE {
if (cur_mapped_addr != dst_addr) {
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_addr, (cur_mapped_addr - dst_addr) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
}
};
/* Allocate the start page as needed. */
if (aligned_src_start < mapping_src_start) {
start_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
R_UNLESS(start_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
}
/* Allocate the end page as needed. */
if (mapping_src_end < aligned_src_end && (aligned_src_start < mapping_src_end || aligned_src_start == mapping_src_start)) {
end_partial_page = Kernel::GetMemoryManager().AllocateAndOpenContinuous(1, 1, m_allocate_option);
R_UNLESS(end_partial_page != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
}
/* Get the implementation. */
auto &src_impl = src_page_table.GetImpl();
/* Get the fill value for partial pages. */
const auto fill_val = m_ipc_fill_value;
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool traverse_valid = src_impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), aligned_src_start);
MESOSPHERE_ASSERT(traverse_valid);
MESOSPHERE_UNUSED(traverse_valid);
/* Prepare tracking variables. */
KPhysicalAddress cur_block_addr = next_entry.phys_addr;
size_t cur_block_size = next_entry.block_size - (GetInteger(cur_block_addr) & (next_entry.block_size - 1));
size_t tot_block_size = cur_block_size;
/* Map the start page, if we have one. */
if (start_partial_page != Null<KPhysicalAddress>) {
/* Ensure the page holds correct data. */
const KVirtualAddress start_partial_virt = GetHeapVirtualAddress(start_partial_page);
if (send) {
const size_t partial_offset = src_start - aligned_src_start;
size_t copy_size, clear_size;
if (src_end < mapping_src_start) {
copy_size = size;
clear_size = mapping_src_start - src_end;
} else {
copy_size = mapping_src_start - src_start;
clear_size = 0;
}
std::memset(GetVoidPointer(start_partial_virt), fill_val, partial_offset);
std::memcpy(GetVoidPointer(start_partial_virt + partial_offset), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr) + partial_offset), copy_size);
if (clear_size > 0) {
std::memset(GetVoidPointer(start_partial_virt + partial_offset + copy_size), fill_val, clear_size);
}
} else {
std::memset(GetVoidPointer(start_partial_virt), fill_val, PageSize);
}
/* Map the page. */
const KPageProperties start_map_properties = { test_perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, start_partial_page, true, start_map_properties, OperationType_Map, false));
/* Update tracking extents. */
cur_mapped_addr += PageSize;
cur_block_addr += PageSize;
cur_block_size -= PageSize;
/* If the block's size was one page, we may need to continue traversal. */
if (cur_block_size == 0 && aligned_src_size > PageSize) {
traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
cur_block_addr = next_entry.phys_addr;
cur_block_size = next_entry.block_size;
tot_block_size += next_entry.block_size;
}
}
/* Map the remaining pages. */
while (aligned_src_start + tot_block_size < mapping_src_end) {
/* Continue the traversal. */
traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
/* Process the block. */
if (next_entry.phys_addr != cur_block_addr + cur_block_size) {
/* Map the block we've been processing so far. */
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, cur_block_size / PageSize, cur_block_addr, true, map_properties, OperationType_Map, false));
/* Update tracking extents. */
cur_mapped_addr += cur_block_size;
cur_block_addr = next_entry.phys_addr;
cur_block_size = next_entry.block_size;
} else {
cur_block_size += next_entry.block_size;
}
tot_block_size += next_entry.block_size;
}
/* Handle the last direct-mapped page. */
if (const KProcessAddress mapped_block_end = aligned_src_start + tot_block_size - cur_block_size; mapped_block_end < mapping_src_end) {
const size_t last_block_size = mapping_src_end - mapped_block_end;
/* Map the last block. */
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, last_block_size / PageSize, cur_block_addr, true, map_properties, OperationType_Map, false));
/* Update tracking extents. */
cur_mapped_addr += last_block_size;
cur_block_addr += last_block_size;
if (mapped_block_end + cur_block_size < aligned_src_end && cur_block_size == last_block_size) {
traverse_valid = src_impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
MESOSPHERE_ASSERT(traverse_valid);
cur_block_addr = next_entry.phys_addr;
}
}
/* Map the end page, if we have one. */
if (end_partial_page != Null<KPhysicalAddress>) {
/* Ensure the page holds correct data. */
const KVirtualAddress end_partial_virt = GetHeapVirtualAddress(end_partial_page);
if (send) {
const size_t copy_size = src_end - mapping_src_end;
std::memcpy(GetVoidPointer(end_partial_virt), GetVoidPointer(GetHeapVirtualAddress(cur_block_addr)), copy_size);
std::memset(GetVoidPointer(end_partial_virt + copy_size), fill_val, PageSize - copy_size);
} else {
std::memset(GetVoidPointer(end_partial_virt), fill_val, PageSize);
}
/* Map the page. */
const KPageProperties map_properties = { test_perm, false, false, (cur_mapped_addr == dst_addr) ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), cur_mapped_addr, 1, end_partial_page, true, map_properties, OperationType_Map, false));
}
/* Update memory blocks to reflect our changes */
m_memory_block_manager.Update(std::addressof(allocator), dst_addr, aligned_src_size / PageSize, dst_state, test_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* Set the output address. */
*out_addr = dst_addr + (src_start - aligned_src_start);
/* We succeeded. */
memory_reservation.Commit();
R_SUCCEED();
}
Result KPageTableBase::SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KPageTableBase &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
/* For convenience, alias this. */
KPageTableBase &dst_page_table = *this;
/* Acquire the table locks. */
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(std::addressof(src_page_table));
/* Perform client setup. */
size_t num_allocator_blocks;
R_TRY(src_page_table.SetupForIpcClient(updater.GetPageList(), std::addressof(num_allocator_blocks), src_addr, size, test_perm, dst_state));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), src_page_table.m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* Get the mapped extents. */
const KProcessAddress src_map_start = util::AlignUp(GetInteger(src_addr), PageSize);
const KProcessAddress src_map_end = util::AlignDown(GetInteger(src_addr) + size, PageSize);
const size_t src_map_size = src_map_end - src_map_start;
/* Ensure that we clean up appropriately if we fail after this. */
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? (KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped) : KMemoryPermission_UserRead);
ON_RESULT_FAILURE {
if (src_map_end > src_map_start) {
src_page_table.CleanupForIpcClientOnServerSetupFailure(updater.GetPageList(), src_map_start, src_map_size, src_perm);
}
};
/* Perform server setup. */
R_TRY(dst_page_table.SetupForIpcServer(out_dst_addr, size, src_addr, test_perm, dst_state, src_page_table, send));
/* If anything was mapped, ipc-lock the pages. */
if (src_map_start < src_map_end) {
/* Get the source permission. */
src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, (src_map_end - src_map_start) / PageSize, &KMemoryBlock::LockForIpc, src_perm);
}
R_SUCCEED();
}
Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
/* Validate the address. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Validate the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, dst_state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_All, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Get aligned extents. */
const KProcessAddress aligned_start = util::AlignDown(GetInteger(address), PageSize);
const KProcessAddress aligned_end = util::AlignUp(GetInteger(address) + size, PageSize);
const size_t aligned_size = aligned_end - aligned_start;
const size_t aligned_num_pages = aligned_size / PageSize;
/* Unmap the pages. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), aligned_start, aligned_num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Update memory blocks. */
m_memory_block_manager.Update(std::addressof(allocator), aligned_start, aligned_num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
/* Release from the resource limit as relevant. */
const KProcessAddress mapping_start = util::AlignUp(GetInteger(address), PageSize);
const KProcessAddress mapping_end = util::AlignDown(GetInteger(address) + size, PageSize);
const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_size - mapping_size);
R_SUCCEED();
}
Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
/* Validate the address. */
R_UNLESS(this->Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Get aligned source extents. */
const KProcessAddress mapping_start = util::AlignUp(GetInteger(address), PageSize);
const KProcessAddress mapping_end = util::AlignDown(GetInteger(address) + size, PageSize);
const KProcessAddress mapping_last = mapping_end - 1;
const size_t mapping_size = (mapping_start < mapping_end) ? (mapping_end - mapping_start) : 0;
/* If nothing was mapped, we're actually done immediately. */
R_SUCCEED_IF(mapping_size == 0);
/* Get the test state and attribute mask. */
u32 test_state;
u32 test_attr_mask;
switch (dst_state) {
case KMemoryState_Ipc:
test_state = KMemoryState_FlagCanUseIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked;
break;
case KMemoryState_NonSecureIpc:
test_state = KMemoryState_FlagCanUseNonSecureIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
break;
case KMemoryState_NonDeviceIpc:
test_state = KMemoryState_FlagCanUseNonDeviceIpc;
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
break;
default:
R_THROW(svc::ResultInvalidCombination());
}
/* Lock the table. */
/* NOTE: Nintendo does this *after* creating the updater below, but this does not follow convention elsewhere in KPageTableBase. */
KScopedLightLock lk(m_general_lock);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Ensure that on failure, we roll back appropriately. */
size_t mapped_size = 0;
ON_RESULT_FAILURE {
if (mapped_size > 0) {
/* Determine where the mapping ends. */
const auto mapped_end = GetInteger(mapping_start) + mapped_size;
const auto mapped_last = mapped_end - 1;
/* Get current and next iterators. */
KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = cur_it;
++next_it;
/* Create tracking variables. */
KProcessAddress cur_address = cur_it->GetAddress();
size_t cur_size = cur_it->GetSize();
bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1;
bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
while ((GetInteger(cur_address) + cur_size - 1) < mapped_last) {
/* Check that we have a next block. */
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
/* Check if we can consolidate the next block's permission set with the current one. */
const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) {
/* We can consolidate the reprotection for the current and next block into a single call. */
cur_size += next_it->GetSize();
} else {
/* We have to operate on the current block. */
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
}
/* Advance. */
cur_address = next_it->GetAddress();
cur_size = next_it->GetSize();
first = false;
}
/* Advance. */
cur_perm_eq = next_perm_eq;
cur_needs_set_perm = next_needs_set_perm;
cur_it = next_it++;
}
/* Process the last block. */
if ((first || cur_needs_set_perm) && !cur_perm_eq) {
const KPageProperties properties = { cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableAndMergeHeadBodyTail : DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
}
}
};
/* Iterate, reprotecting as needed. */
{
/* Get current and next iterators. */
KMemoryBlockManager::const_iterator cur_it = m_memory_block_manager.FindIterator(mapping_start);
KMemoryBlockManager::const_iterator next_it = cur_it;
++next_it;
/* Validate the current block. */
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(cur_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
/* Create tracking variables. */
KProcessAddress cur_address = cur_it->GetAddress();
size_t cur_size = cur_it->GetSize();
bool cur_perm_eq = cur_it->GetPermission() == cur_it->GetOriginalPermission();
bool cur_needs_set_perm = !cur_perm_eq && cur_it->GetIpcLockCount() == 1;
bool first = cur_it->GetIpcDisableMergeCount() == 1 && (cur_it->GetDisableMergeAttribute() & KMemoryBlockDisableMergeAttribute_Locked) == 0;
while ((cur_address + cur_size - 1) < mapping_last) {
/* Check that we have a next block. */
MESOSPHERE_ABORT_UNLESS(next_it != m_memory_block_manager.end());
/* Validate the next block. */
MESOSPHERE_R_ABORT_UNLESS(this->CheckMemoryState(next_it, test_state, test_state, KMemoryPermission_None, KMemoryPermission_None, test_attr_mask | KMemoryAttribute_IpcLocked, KMemoryAttribute_IpcLocked));
/* Check if we can consolidate the next block's permission set with the current one. */
const bool next_perm_eq = next_it->GetPermission() == next_it->GetOriginalPermission();
const bool next_needs_set_perm = !next_perm_eq && next_it->GetIpcLockCount() == 1;
if (cur_perm_eq == next_perm_eq && cur_needs_set_perm == next_needs_set_perm && cur_it->GetOriginalPermission() == next_it->GetOriginalPermission()) {
/* We can consolidate the reprotection for the current and next block into a single call. */
cur_size += next_it->GetSize();
} else {
/* We have to operate on the current block. */
if ((cur_needs_set_perm || first) && !cur_perm_eq) {
const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
/* Mark that we mapped the block. */
mapped_size += cur_size;
/* Advance. */
cur_address = next_it->GetAddress();
cur_size = next_it->GetSize();
first = false;
}
/* Advance. */
cur_perm_eq = next_perm_eq;
cur_needs_set_perm = next_needs_set_perm;
cur_it = next_it++;
}
/* Process the last block. */
const auto lock_count = cur_it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
if ((first || cur_needs_set_perm || (lock_count == 1)) && !cur_perm_eq) {
const DisableMergeAttribute head_body_attr = first ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
const DisableMergeAttribute tail_attr = lock_count == 1 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
const KPageProperties properties = { cur_needs_set_perm ? cur_it->GetOriginalPermission() : cur_it->GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
R_TRY(this->Operate(updater.GetPageList(), cur_address, cur_size / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, false));
}
}
/* Create an update allocator. */
/* NOTE: Guaranteed zero blocks needed here. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, 0);
R_TRY(allocator_result);
/* Unlock the pages. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, KMemoryPermission_None);
R_SUCCEED();
}
void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission prot_perm) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
/* Get the mapped extents. */
const KProcessAddress src_map_start = address;
const KProcessAddress src_map_end = address + size;
const KProcessAddress src_map_last = src_map_end - 1;
/* This function is only invoked when there's something to do. */
MESOSPHERE_ASSERT(src_map_end > src_map_start);
/* Iterate over blocks, fixing permissions. */
KMemoryBlockManager::const_iterator it = m_memory_block_manager.FindIterator(address);
while (true) {
const auto cur_start = it->GetAddress() >= GetInteger(src_map_start) ? it->GetAddress() : GetInteger(src_map_start);
const auto cur_end = src_map_last <= it->GetLastAddress() ? src_map_end : it->GetEndAddress();
/* If we can, fix the protections on the block. */
if ((it->GetIpcLockCount() == 0 && (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) ||
(it->GetIpcLockCount() != 0 && (it->GetOriginalPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm))
{
/* Check if we actually need to fix the protections on the block. */
if (cur_end == src_map_end || it->GetAddress() <= GetInteger(src_map_start) || (it->GetPermission() & KMemoryPermission_IpcLockChangeMask) != prot_perm) {
const bool start_nc = (it->GetAddress() == GetInteger(src_map_start)) ? ((it->GetDisableMergeAttribute() & (KMemoryBlockDisableMergeAttribute_Locked | KMemoryBlockDisableMergeAttribute_IpcLeft)) == 0) : it->GetAddress() <= GetInteger(src_map_start);
const DisableMergeAttribute head_body_attr = start_nc ? DisableMergeAttribute_EnableHeadAndBody : DisableMergeAttribute_None;
DisableMergeAttribute tail_attr;
if (cur_end == src_map_end && it->GetEndAddress() == src_map_end) {
auto next_it = it;
++next_it;
const auto lock_count = it->GetIpcLockCount() + (next_it != m_memory_block_manager.end() ? (next_it->GetIpcDisableMergeCount() - next_it->GetIpcLockCount()) : 0);
tail_attr = lock_count == 0 ? DisableMergeAttribute_EnableTail : DisableMergeAttribute_None;
} else {
tail_attr = DisableMergeAttribute_None;
}
const KPageProperties properties = { it->GetPermission(), false, false, static_cast<DisableMergeAttribute>(head_body_attr | tail_attr) };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(page_list, cur_start, (cur_end - cur_start) / PageSize, Null<KPhysicalAddress>, false, properties, OperationType_ChangePermissions, true));
}
}
/* If we're past the end of the region, we're done. */
if (src_map_last <= it->GetLastAddress()) {
break;
}
/* Advance. */
++it;
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
}
}
#pragma GCC pop_options
Result KPageTableBase::MapPhysicalMemory(KProcessAddress address, size_t size) {
/* Lock the physical memory lock. */
KScopedLightLock phys_lk(m_map_physical_memory_lock);
/* Calculate the last address for convenience. */
const KProcessAddress last_address = address + size - 1;
/* Define iteration variables. */
KProcessAddress cur_address;
size_t mapped_size;
/* The entire mapping process can be retried. */
while (true) {
/* Check if the memory is already mapped. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Iterate over the memory. */
cur_address = address;
mapped_size = 0;
auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Check if we're done. */
if (last_address <= it->GetLastAddress()) {
if (it->GetState() != KMemoryState_Free) {
mapped_size += (last_address + 1 - cur_address);
}
break;
}
/* Track the memory if it's mapped. */
if (it->GetState() != KMemoryState_Free) {
mapped_size += it->GetEndAddress() - cur_address;
}
/* Advance. */
cur_address = it->GetEndAddress();
++it;
}
/* If the size mapped is the size requested, we've nothing to do. */
R_SUCCEED_IF(size == mapped_size);
}
/* Allocate and map the memory. */
{
/* Reserve the memory from the process resource limit. */
KScopedResourceReservation memory_reservation(m_resource_limit, ams::svc::LimitableResource_PhysicalMemoryMax, size - mapped_size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate pages for the new memory. */
KPageGroup pg(m_block_info_manager);
R_TRY(Kernel::GetMemoryManager().AllocateForProcess(std::addressof(pg), (size - mapped_size) / PageSize, m_allocate_option, GetCurrentProcess().GetId(), m_heap_fill_value));
/* If we fail in the next bit (or retry), we need to cleanup the pages. */
auto pg_guard = SCOPE_GUARD {
pg.OpenFirst();
pg.Close();
};
/* Map the memory. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
size_t num_allocator_blocks = 0;
/* Verify that nobody has mapped memory since we first checked. */
{
/* Iterate over the memory. */
size_t checked_mapped_size = 0;
cur_address = address;
auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
const bool is_free = it->GetState() == KMemoryState_Free;
if (is_free) {
if (it->GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < it->GetLastAddress()) {
++num_allocator_blocks;
}
}
/* Check if we're done. */
if (last_address <= it->GetLastAddress()) {
if (!is_free) {
checked_mapped_size += (last_address + 1 - cur_address);
}
break;
}
/* Track the memory if it's mapped. */
if (!is_free) {
checked_mapped_size += it->GetEndAddress() - cur_address;
}
/* Advance. */
cur_address = it->GetEndAddress();
++it;
}
/* If the size now isn't what it was before, somebody mapped or unmapped concurrently. */
/* If this happened, retry. */
if (mapped_size != checked_mapped_size) {
continue;
}
}
/* Create an update allocator. */
MESOSPHERE_ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Prepare to iterate over the memory. */
auto pg_it = pg.begin();
KPhysicalAddress pg_phys_addr = pg_it->GetAddress();
size_t pg_pages = pg_it->GetNumPages();
/* Reset the current tracking address, and make sure we clean up on failure. */
pg_guard.Cancel();
cur_address = address;
ON_RESULT_FAILURE {
if (cur_address > address) {
const KProcessAddress last_unmap_address = cur_address - 1;
/* Iterate, unmapping the pages. */
cur_address = address;
auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* If the memory state is free, we mapped it and need to unmap it. */
if (it->GetState() == KMemoryState_Free) {
/* Determine the range to unmap. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_unmap_address + 1 - cur_address) / PageSize;
/* Unmap. */
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
}
/* Check if we're done. */
if (last_unmap_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = it->GetEndAddress();
++it;
}
}
/* Release any remaining unmapped memory. */
Kernel::GetMemoryManager().OpenFirst(pg_phys_addr, pg_pages);
Kernel::GetMemoryManager().Close(pg_phys_addr, pg_pages);
for (++pg_it; pg_it != pg.end(); ++pg_it) {
Kernel::GetMemoryManager().OpenFirst(pg_it->GetAddress(), pg_it->GetNumPages());
Kernel::GetMemoryManager().Close(pg_it->GetAddress(), pg_it->GetNumPages());
}
};
auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* If it's unmapped, we need to map it. */
if (it->GetState() == KMemoryState_Free) {
/* Determine the range to map. */
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, cur_address == this->GetAliasRegionStart() ? DisableMergeAttribute_DisableHead : DisableMergeAttribute_None };
size_t map_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize;
/* While we have pages to map, map them. */
{
/* Create a page group for the current mapping range. */
KPageGroup cur_pg(m_block_info_manager);
{
ON_RESULT_FAILURE {
cur_pg.OpenFirst();
cur_pg.Close();
};
size_t remain_pages = map_pages;
while (remain_pages > 0) {
/* Check if we're at the end of the physical block. */
if (pg_pages == 0) {
/* Ensure there are more pages to map. */
MESOSPHERE_ASSERT(pg_it != pg.end());
/* Advance our physical block. */
++pg_it;
pg_phys_addr = pg_it->GetAddress();
pg_pages = pg_it->GetNumPages();
}
/* Add whatever we can to the current block. */
const size_t cur_pages = std::min(pg_pages, remain_pages);
R_TRY(cur_pg.AddBlock(pg_phys_addr + ((pg_pages - cur_pages) * PageSize), cur_pages));
/* Advance. */
remain_pages -= cur_pages;
pg_pages -= cur_pages;
}
}
/* Map the pages. */
R_TRY(this->Operate(updater.GetPageList(), cur_address, map_pages, cur_pg, map_properties, OperationType_MapFirstGroup, false));
}
}
/* Check if we're done. */
if (last_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = it->GetEndAddress();
++it;
}
/* We succeeded, so commit the memory reservation. */
memory_reservation.Commit();
/* Increase our tracked mapped size. */
m_mapped_physical_memory_size += (size - mapped_size);
/* Update the relevant memory blocks. */
m_memory_block_manager.UpdateIfMatch(std::addressof(allocator), address, size / PageSize,
KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None,
KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None,
address == this->GetAliasRegionStart() ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None,
KMemoryBlockDisableMergeAttribute_None);
R_SUCCEED();
}
}
}
}
Result KPageTableBase::UnmapPhysicalMemory(KProcessAddress address, size_t size) {
/* Lock the physical memory lock. */
KScopedLightLock phys_lk(m_map_physical_memory_lock);
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Calculate the last address for convenience. */
const KProcessAddress last_address = address + size - 1;
/* Define iteration variables. */
KProcessAddress map_start_address = Null<KProcessAddress>;
KProcessAddress map_last_address = Null<KProcessAddress>;
KProcessAddress cur_address;
size_t mapped_size;
size_t num_allocator_blocks = 0;
/* Check if the memory is mapped. */
{
/* Iterate over the memory. */
cur_address = address;
mapped_size = 0;
auto it = m_memory_block_manager.FindIterator(cur_address);
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* Verify the memory's state. */
const bool is_normal = it->GetState() == KMemoryState_Normal && it->GetAttribute() == 0;
const bool is_free = it->GetState() == KMemoryState_Free;
R_UNLESS(is_normal || is_free, svc::ResultInvalidCurrentMemory());
if (is_normal) {
R_UNLESS(it->GetAttribute() == KMemoryAttribute_None, svc::ResultInvalidCurrentMemory());
if (map_start_address == Null<KProcessAddress>) {
map_start_address = cur_address;
}
map_last_address = (last_address >= it->GetLastAddress()) ? it->GetLastAddress() : last_address;
if (it->GetAddress() < GetInteger(address)) {
++num_allocator_blocks;
}
if (last_address < it->GetLastAddress()) {
++num_allocator_blocks;
}
mapped_size += (map_last_address + 1 - cur_address);
}
/* Check if we're done. */
if (last_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = it->GetEndAddress();
++it;
}
/* If there's nothing mapped, we've nothing to do. */
R_SUCCEED_IF(mapped_size == 0);
}
/* Create an update allocator. */
MESOSPHERE_ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Separate the mapping. */
const KPageProperties sep_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), map_start_address, (map_last_address + 1 - map_start_address) / PageSize, Null<KPhysicalAddress>, false, sep_properties, OperationType_Separate, false));
/* Reset the current tracking address, and make sure we clean up on failure. */
cur_address = address;
/* Iterate over the memory, unmapping as we go. */
auto it = m_memory_block_manager.FindIterator(cur_address);
const auto clear_merge_attr = (it->GetState() == KMemoryState_Normal && it->GetAddress() == this->GetAliasRegionStart() && it->GetAddress() == address) ? KMemoryBlockDisableMergeAttribute_Normal : KMemoryBlockDisableMergeAttribute_None;
while (true) {
/* Check that the iterator is valid. */
MESOSPHERE_ASSERT(it != m_memory_block_manager.end());
/* If the memory state is normal, we need to unmap it. */
if (it->GetState() == KMemoryState_Normal) {
/* Determine the range to unmap. */
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
const size_t cur_pages = std::min(it->GetEndAddress() - cur_address, last_address + 1 - cur_address) / PageSize;
/* Unmap. */
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), cur_address, cur_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
}
/* Check if we're done. */
if (last_address <= it->GetLastAddress()) {
break;
}
/* Advance. */
cur_address = it->GetEndAddress();
++it;
}
/* Release the memory resource. */
m_mapped_physical_memory_size -= mapped_size;
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, mapped_size);
/* Update memory blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, clear_merge_attr);
/* We succeeded. */
R_SUCCEED();
}
Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
/* Try to reserve the unsafe memory. */
R_UNLESS(Kernel::GetUnsafeMemory().TryReserve(size), svc::ResultLimitReached());
/* Ensure we release our reservation on failure. */
ON_RESULT_FAILURE { Kernel::GetUnsafeMemory().Release(size); };
/* Create a page group for the new memory. */
KPageGroup pg(m_block_info_manager);
/* Allocate the new memory. */
const size_t num_pages = size / PageSize;
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(pg), num_pages, 1, KMemoryManager::EncodeOption(KMemoryManager::Pool_Unsafe, KMemoryManager::Direction_FromFront)));
/* Close the page group when we're done with it. */
ON_SCOPE_EXIT { pg.Close(); };
/* Clear the new memory. */
for (const auto &block : pg) {
std::memset(GetVoidPointer(GetHeapVirtualAddress(block.GetAddress())), m_heap_fill_value, block.GetSize());
}
/* Map the new memory. */
{
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Free, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Map the pages. */
const KPageProperties map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, pg, map_properties, OperationType_MapGroup, false));
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* Update our mapped unsafe size. */
m_mapped_unsafe_physical_memory += size;
/* We succeeded. */
R_SUCCEED();
}
}
Result KPageTableBase::UnmapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
/* Lock the table. */
KScopedLightLock lk(m_general_lock);
/* Check whether we can unmap this much unsafe physical memory. */
R_UNLESS(size <= m_mapped_unsafe_physical_memory, svc::ResultInvalidCurrentMemory());
/* Check the memory state. */
size_t num_allocator_blocks;
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, KMemoryState_All, KMemoryState_Normal, KMemoryPermission_All, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the memory. */
const size_t num_pages = size / PageSize;
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
/* Release the unsafe memory from the limit. */
Kernel::GetUnsafeMemory().Release(size);
/* Update our mapped unsafe size. */
m_mapped_unsafe_physical_memory -= size;
R_SUCCEED();
}
Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase &src_page_table, KProcessAddress src_address) {
/* We need to lock both this table, and the current process's table, so set up an alias. */
KPageTableBase &dst_page_table = *this;
/* Acquire the table locks. */
KScopedLightLockPair lk(src_page_table.m_general_lock, dst_page_table.m_general_lock);
/* Check that the memory is mapped in the destination process. */
size_t num_allocator_blocks;
R_TRY(dst_page_table.CheckMemoryState(std::addressof(num_allocator_blocks), dst_address, size, KMemoryState_All, KMemoryState_SharedCode, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_All, KMemoryAttribute_None));
/* Check that the memory is mapped in the source process. */
R_TRY(src_page_table.CheckMemoryState(src_address, size, KMemoryState_FlagCanMapProcess, KMemoryState_FlagCanMapProcess, KMemoryPermission_None, KMemoryPermission_None, KMemoryAttribute_All, KMemoryAttribute_None));
/* Validate that the memory ranges are compatible. */
{
/* Define a helper type. */
struct ContiguousRangeInfo {
public:
KPageTableBase &m_pt;
TraversalContext m_context;
TraversalEntry m_entry;
KPhysicalAddress m_phys_addr;
size_t m_cur_size;
size_t m_remaining_size;
public:
ContiguousRangeInfo(KPageTableBase &pt, KProcessAddress address, size_t size) : m_pt(pt), m_remaining_size(size) {
/* Begin a traversal. */
MESOSPHERE_ABORT_UNLESS(m_pt.GetImpl().BeginTraversal(std::addressof(m_entry), std::addressof(m_context), address));
/* Setup tracking fields. */
m_phys_addr = m_entry.phys_addr;
m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size - (GetInteger(m_phys_addr) & (m_entry.block_size - 1)));
/* Consume the whole contiguous block. */
this->DetermineContiguousBlockExtents();
}
void ContinueTraversal() {
/* Update our remaining size. */
m_remaining_size = m_remaining_size - m_cur_size;
/* Update our tracking fields. */
if (m_remaining_size > 0) {
m_phys_addr = m_entry.phys_addr;
m_cur_size = std::min<size_t>(m_remaining_size, m_entry.block_size);
/* Consume the whole contiguous block. */
this->DetermineContiguousBlockExtents();
}
}
private:
void DetermineContiguousBlockExtents() {
/* Continue traversing until we're not contiguous, or we have enough. */
while (m_cur_size < m_remaining_size) {
MESOSPHERE_ABORT_UNLESS(m_pt.GetImpl().ContinueTraversal(std::addressof(m_entry), std::addressof(m_context)));
/* If we're not contiguous, we're done. */
if (m_entry.phys_addr != m_phys_addr + m_cur_size) {
break;
}
/* Update our current size. */
m_cur_size = std::min(m_remaining_size, m_cur_size + m_entry.block_size);
}
}
};
/* Create ranges for both tables. */
ContiguousRangeInfo src_range(src_page_table, src_address, size);
ContiguousRangeInfo dst_range(dst_page_table, dst_address, size);
/* Validate the ranges. */
while (src_range.m_remaining_size > 0 && dst_range.m_remaining_size > 0) {
R_UNLESS(src_range.m_phys_addr == dst_range.m_phys_addr, svc::ResultInvalidMemoryRegion());
R_UNLESS(src_range.m_cur_size == dst_range.m_cur_size, svc::ResultInvalidMemoryRegion());
src_range.ContinueTraversal();
dst_range.ContinueTraversal();
}
}
/* We no longer need to hold our lock on the source page table. */
lk.TryUnlockHalf(src_page_table.m_general_lock);
/* Create an update allocator. */
Result allocator_result;
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result), m_memory_block_slab_manager, num_allocator_blocks);
R_TRY(allocator_result);
/* We're going to perform an update, so create a helper. */
KScopedPageTableUpdater updater(this);
/* Unmap the memory. */
const size_t num_pages = size / PageSize;
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, false));
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
R_SUCCEED();
}
}
| 258,396
|
C++
|
.cpp
| 3,990
| 50.730075
| 375
| 0.606797
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,935
|
kern_k_memory_layout.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_memory_layout.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class KMemoryRegionAllocator {
NON_COPYABLE(KMemoryRegionAllocator);
NON_MOVEABLE(KMemoryRegionAllocator);
public:
static constexpr size_t MaxMemoryRegions = 200;
private:
KMemoryRegion m_region_heap[MaxMemoryRegions];
size_t m_num_regions;
public:
constexpr ALWAYS_INLINE KMemoryRegionAllocator() : m_region_heap(), m_num_regions() { /* ... */ }
public:
template<typename... Args>
ALWAYS_INLINE KMemoryRegion *Allocate(Args&&... args) {
/* Ensure we stay within the bounds of our heap. */
MESOSPHERE_INIT_ABORT_UNLESS(m_num_regions < MaxMemoryRegions);
/* Create the new region. */
KMemoryRegion *region = std::addressof(m_region_heap[m_num_regions++]);
std::construct_at(region, std::forward<Args>(args)...);
return region;
}
};
constinit KMemoryRegionAllocator g_memory_region_allocator;
template<typename... Args>
ALWAYS_INLINE KMemoryRegion *AllocateRegion(Args&&... args) {
return g_memory_region_allocator.Allocate(std::forward<Args>(args)...);
}
}
void KMemoryRegionTree::InsertDirectly(uintptr_t address, uintptr_t last_address, u32 attr, u32 type_id) {
this->insert(*AllocateRegion(address, last_address, attr, type_id));
}
bool KMemoryRegionTree::Insert(uintptr_t address, size_t size, u32 type_id, u32 new_attr, u32 old_attr) {
/* Locate the memory region that contains the address. */
KMemoryRegion *found = this->FindModifiable(address);
/* We require that the old attr is correct. */
if (found->GetAttributes() != old_attr) {
return false;
}
/* We further require that the region can be split from the old region. */
const uintptr_t inserted_region_end = address + size;
const uintptr_t inserted_region_last = inserted_region_end - 1;
if (found->GetLastAddress() < inserted_region_last) {
return false;
}
/* Further, we require that the type id is a valid transformation. */
if (!found->CanDerive(type_id)) {
return false;
}
/* Cache information from the region before we remove it. */
const uintptr_t old_address = found->GetAddress();
const uintptr_t old_last = found->GetLastAddress();
const uintptr_t old_pair = found->GetPairAddress();
const u32 old_type = found->GetType();
/* Erase the existing region from the tree. */
this->erase(this->iterator_to(*found));
/* Insert the new region into the tree. */
if (old_address == address) {
/* Reuse the old object for the new region, if we can. */
found->Reset(address, inserted_region_last, old_pair, new_attr, type_id);
this->insert(*found);
} else {
/* If we can't re-use, adjust the old region. */
found->Reset(old_address, address - 1, old_pair, old_attr, old_type);
this->insert(*found);
/* Insert a new region for the split. */
const uintptr_t new_pair = (old_pair != std::numeric_limits<uintptr_t>::max()) ? old_pair + (address - old_address) : old_pair;
this->insert(*AllocateRegion(address, inserted_region_last, new_pair, new_attr, type_id));
}
/* If we need to insert a region after the region, do so. */
if (old_last != inserted_region_last) {
const uintptr_t after_pair = (old_pair != std::numeric_limits<uintptr_t>::max()) ? old_pair + (inserted_region_end - old_address) : old_pair;
this->insert(*AllocateRegion(inserted_region_end, old_last, after_pair, old_attr, old_type));
}
return true;
}
void KMemoryLayout::InitializeLinearMemoryRegionTrees() {
/* Initialize linear trees. */
for (auto ®ion : GetPhysicalMemoryRegionTree()) {
if (region.HasTypeAttribute(KMemoryRegionAttr_LinearMapped)) {
GetPhysicalLinearMemoryRegionTree().InsertDirectly(region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), region.GetType());
}
}
for (auto ®ion : GetVirtualMemoryRegionTree()) {
if (region.IsDerivedFrom(KMemoryRegionType_Dram)) {
GetVirtualLinearMemoryRegionTree().InsertDirectly(region.GetAddress(), region.GetLastAddress(), region.GetAttributes(), region.GetType());
}
}
}
size_t KMemoryLayout::GetResourceRegionSizeForInit(bool use_extra_resource) {
return KernelResourceSize + KSystemControl::SecureAppletMemorySize + (use_extra_resource ? KernelSlabHeapAdditionalSize + KernelPageBufferAdditionalSize : 0);
}
}
| 5,680
|
C++
|
.cpp
| 109
| 42.211009
| 166
| 0.63007
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,936
|
kern_k_thread_queue.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_thread_queue.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KThreadQueue::NotifyAvailable(KThread *waiting_thread, KSynchronizationObject *signaled_object, Result wait_result) {
MESOSPHERE_UNUSED(waiting_thread, signaled_object, wait_result);
MESOSPHERE_PANIC("KThreadQueue::NotifyAvailable\n");
}
void KThreadQueue::EndWait(KThread *waiting_thread, Result wait_result) {
/* Set the thread's wait result. */
waiting_thread->SetWaitResult(wait_result);
/* Set the thread as runnable. */
waiting_thread->SetState(KThread::ThreadState_Runnable);
/* Clear the thread's wait queue. */
waiting_thread->ClearWaitQueue();
/* Cancel the thread task. */
if (m_hardware_timer != nullptr) {
m_hardware_timer->CancelTask(waiting_thread);
}
}
void KThreadQueue::CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) {
/* Set the thread's wait result. */
waiting_thread->SetWaitResult(wait_result);
/* Set the thread as runnable. */
waiting_thread->SetState(KThread::ThreadState_Runnable);
/* Clear the thread's wait queue. */
waiting_thread->ClearWaitQueue();
/* Cancel the thread task. */
if (cancel_timer_task && m_hardware_timer != nullptr) {
m_hardware_timer->CancelTask(waiting_thread);
}
}
void KThreadQueueWithoutEndWait::EndWait(KThread *waiting_thread, Result wait_result) {
MESOSPHERE_UNUSED(waiting_thread, wait_result);
MESOSPHERE_PANIC("KThreadQueueWithoutEndWait::EndWait\n");
}
}
| 2,279
|
C++
|
.cpp
| 50
| 39.42
| 126
| 0.692828
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,937
|
kern_k_interrupt_task_manager.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_interrupt_task_manager.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KInterruptTaskManager::TaskQueue::Enqueue(KInterruptTask *task) {
MESOSPHERE_ASSERT(task != m_head);
MESOSPHERE_ASSERT(task != m_tail);
MESOSPHERE_AUDIT(task->GetNextTask() == nullptr);
/* Insert the task into the queue. */
if (m_tail != nullptr) {
m_tail->SetNextTask(task);
} else {
m_head = task;
}
m_tail = task;
/* Set the next task for auditing. */
#if defined (MESOSPHERE_BUILD_FOR_AUDITING)
task->SetNextTask(GetDummyInterruptTask());
#endif
}
void KInterruptTaskManager::TaskQueue::Dequeue() {
MESOSPHERE_ASSERT(m_head != nullptr);
MESOSPHERE_ASSERT(m_tail != nullptr);
MESOSPHERE_AUDIT(m_tail->GetNextTask() == GetDummyInterruptTask());
/* Pop the task from the front of the queue. */
KInterruptTask *old_head = m_head;
if (m_head == m_tail) {
m_head = nullptr;
m_tail = nullptr;
} else {
m_head = m_head->GetNextTask();
}
#if defined (MESOSPHERE_BUILD_FOR_AUDITING)
old_head->SetNextTask(nullptr);
#else
AMS_UNUSED(old_head);
#endif
}
void KInterruptTaskManager::EnqueueTask(KInterruptTask *task) {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Enqueue the task and signal the scheduler. */
m_task_queue.Enqueue(task);
Kernel::GetScheduler().SetInterruptTaskRunnable();
}
void KInterruptTaskManager::DoTasks() {
/* Execute pending tasks. */
const s64 start_time = KHardwareTimer::GetTick();
for (KInterruptTask *task = m_task_queue.GetHead(); task != nullptr; task = m_task_queue.GetHead()) {
/* Dequeue the task. */
m_task_queue.Dequeue();
/* Do the task with interrupts temporarily enabled. */
{
KScopedInterruptEnable ei;
task->DoTask();
}
}
const s64 end_time = KHardwareTimer::GetTick();
/* Increment the time we've spent executing. */
m_cpu_time += end_time - start_time;
}
}
| 2,889
|
C++
|
.cpp
| 74
| 31.256757
| 109
| 0.625223
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,938
|
kern_k_class_token.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_class_token.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
/* Ensure that we generate correct class tokens for all types. */
/* Ensure that the absolute token values are correct. */
static_assert(ClassToken<KAutoObject> == 0b00000000'00000000);
static_assert(ClassToken<KSynchronizationObject> == 0b00000000'00000001);
static_assert(ClassToken<KReadableEvent> == 0b00000000'00000011);
static_assert(ClassToken<KInterruptEvent> == 0b00000111'00000011);
static_assert(ClassToken<KDebug> == 0b00001011'00000001);
static_assert(ClassToken<KThread> == 0b00010011'00000001);
static_assert(ClassToken<KServerPort> == 0b00100011'00000001);
static_assert(ClassToken<KServerSession> == 0b01000011'00000001);
static_assert(ClassToken<KClientPort> == 0b10000011'00000001);
static_assert(ClassToken<KClientSession> == 0b00001101'00000000);
static_assert(ClassToken<KProcess> == 0b00010101'00000001);
static_assert(ClassToken<KResourceLimit> == 0b00100101'00000000);
static_assert(ClassToken<KLightSession> == 0b01000101'00000000);
static_assert(ClassToken<KPort> == 0b10000101'00000000);
static_assert(ClassToken<KSession> == 0b00011001'00000000);
static_assert(ClassToken<KSharedMemory> == 0b00101001'00000000);
static_assert(ClassToken<KEvent> == 0b01001001'00000000);
static_assert(ClassToken<KLightClientSession> == 0b10001001'00000000);
static_assert(ClassToken<KLightServerSession> == 0b00110001'00000000);
static_assert(ClassToken<KTransferMemory> == 0b01010001'00000000);
static_assert(ClassToken<KDeviceAddressSpace> == 0b10010001'00000000);
static_assert(ClassToken<KSessionRequest> == 0b01100001'00000000);
static_assert(ClassToken<KCodeMemory> == 0b10100001'00000000);
static_assert(ClassToken<KIoPool> == 0b11000001'00000000);
static_assert(ClassToken<KIoRegion> == 0b00001110'00000000);
/* 0b00010110'00000000 */
/* 0b00100110'00000000 */
static_assert(ClassToken<KSystemResource> == 0b01000110'00000000);
/* Ensure that the token hierarchy is correct. */
/* Base classes */
static_assert(ClassToken<KAutoObject> == (0b00000000));
static_assert(ClassToken<KSynchronizationObject> == (0b00000001 | ClassToken<KAutoObject>));
static_assert(ClassToken<KReadableEvent> == (0b00000010 | ClassToken<KSynchronizationObject>));
/* Final classes */
static_assert(ClassToken<KInterruptEvent> == ((0b00000111 << 8) | ClassToken<KReadableEvent>));
static_assert(ClassToken<KDebug> == ((0b00001011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KThread> == ((0b00010011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KServerPort> == ((0b00100011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KServerSession> == ((0b01000011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KClientPort> == ((0b10000011 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KClientSession> == ((0b00001101 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KProcess> == ((0b00010101 << 8) | ClassToken<KSynchronizationObject>));
static_assert(ClassToken<KResourceLimit> == ((0b00100101 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KLightSession> == ((0b01000101 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KPort> == ((0b10000101 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KSession> == ((0b00011001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KSharedMemory> == ((0b00101001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KEvent> == ((0b01001001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KLightClientSession> == ((0b10001001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KLightServerSession> == ((0b00110001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KTransferMemory> == ((0b01010001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KDeviceAddressSpace> == ((0b10010001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KSessionRequest> == ((0b01100001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KCodeMemory> == ((0b10100001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KIoPool> == ((0b11000001 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KIoRegion> == ((0b00001110 << 8) | ClassToken<KAutoObject>));
static_assert(ClassToken<KSystemResource> == ((0b01000110 << 8) | ClassToken<KAutoObject>));
/* Ensure that the token hierarchy reflects the class hierarchy. */
/* Base classes. */
static_assert(!std::is_final<KSynchronizationObject>::value && std::is_base_of<KAutoObject, KSynchronizationObject>::value);
static_assert(!std::is_final<KReadableEvent>::value && std::is_base_of<KSynchronizationObject, KReadableEvent>::value);
/* Final classes */
static_assert(std::is_final<KInterruptEvent>::value && std::is_base_of<KReadableEvent, KInterruptEvent>::value);
static_assert(std::is_final<KDebug>::value && std::is_base_of<KSynchronizationObject, KDebug>::value);
static_assert(std::is_final<KThread>::value && std::is_base_of<KSynchronizationObject, KThread>::value);
static_assert(std::is_final<KServerPort>::value && std::is_base_of<KSynchronizationObject, KServerPort>::value);
static_assert(std::is_final<KServerSession>::value && std::is_base_of<KSynchronizationObject, KServerSession>::value);
static_assert(std::is_final<KClientPort>::value && std::is_base_of<KSynchronizationObject, KClientPort>::value);
static_assert(std::is_final<KClientSession>::value && std::is_base_of<KAutoObject, KClientSession>::value);
static_assert(std::is_final<KProcess>::value && std::is_base_of<KSynchronizationObject, KProcess>::value);
static_assert(std::is_final<KResourceLimit>::value && std::is_base_of<KAutoObject, KResourceLimit>::value);
static_assert(std::is_final<KLightSession>::value && std::is_base_of<KAutoObject, KLightSession>::value);
static_assert(std::is_final<KPort>::value && std::is_base_of<KAutoObject, KPort>::value);
static_assert(std::is_final<KSession>::value && std::is_base_of<KAutoObject, KSession>::value);
static_assert(std::is_final<KSharedMemory>::value && std::is_base_of<KAutoObject, KSharedMemory>::value);
static_assert(std::is_final<KEvent>::value && std::is_base_of<KAutoObject, KEvent>::value);
static_assert(std::is_final<KLightClientSession>::value && std::is_base_of<KAutoObject, KLightClientSession>::value);
static_assert(std::is_final<KLightServerSession>::value && std::is_base_of<KAutoObject, KLightServerSession>::value);
static_assert(std::is_final<KTransferMemory>::value && std::is_base_of<KAutoObject, KTransferMemory>::value);
static_assert(std::is_final<KDeviceAddressSpace>::value && std::is_base_of<KAutoObject, KDeviceAddressSpace>::value);
static_assert(std::is_final<KSessionRequest>::value && std::is_base_of<KAutoObject, KSessionRequest>::value);
static_assert(std::is_final<KCodeMemory>::value && std::is_base_of<KAutoObject, KCodeMemory>::value);
static_assert(std::is_final<KIoPool>::value && std::is_base_of<KAutoObject, KIoPool>::value);
static_assert(std::is_final<KIoRegion>::value && std::is_base_of<KAutoObject, KIoRegion>::value);
static_assert(std::is_base_of<KAutoObject, KSystemResource>::value);
}
| 9,142
|
C++
|
.cpp
| 105
| 82.485714
| 133
| 0.656988
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,939
|
kern_k_memory_manager.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_memory_manager.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
if ((type | KMemoryRegionType_DramApplicationPool) == type) {
return KMemoryManager::Pool_Application;
} else if ((type | KMemoryRegionType_DramAppletPool) == type) {
return KMemoryManager::Pool_Applet;
} else if ((type | KMemoryRegionType_DramSystemPool) == type) {
return KMemoryManager::Pool_System;
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
return KMemoryManager::Pool_SystemNonSecure;
} else {
MESOSPHERE_PANIC("InvalidMemoryRegionType for conversion to Pool");
}
}
}
void KMemoryManager::Initialize(KVirtualAddress management_region, size_t management_region_size, const u32 *min_align_shifts) {
/* Clear the management region to zero. */
const KVirtualAddress management_region_end = management_region + management_region_size;
std::memset(GetVoidPointer(management_region), 0, management_region_size);
/* Reset our manager count. */
m_num_managers = 0;
/* Traverse the virtual memory layout tree, initializing each manager as appropriate. */
while (m_num_managers != MaxManagerCount) {
/* Locate the region that should initialize the current manager. */
KPhysicalAddress region_address = Null<KPhysicalAddress>;
size_t region_size = 0;
Pool region_pool = Pool_Count;
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
/* We only care about regions that we need to create managers for. */
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
continue;
}
/* We want to initialize the managers in order. */
if (it.GetAttributes() != m_num_managers) {
continue;
}
const KPhysicalAddress cur_start = it.GetAddress();
const KPhysicalAddress cur_end = it.GetEndAddress();
/* Validate the region. */
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
MESOSPHERE_ASSERT(cur_start != Null<KPhysicalAddress>);
MESOSPHERE_ASSERT(it.GetSize() > 0);
/* Update the region's extents. */
if (region_address == Null<KPhysicalAddress>) {
region_address = cur_start;
region_size = it.GetSize();
region_pool = GetPoolFromMemoryRegionType(it.GetType());
} else {
MESOSPHERE_ASSERT(cur_start == region_address + region_size);
/* Update the size. */
region_size = cur_end - region_address;
MESOSPHERE_ABORT_UNLESS(GetPoolFromMemoryRegionType(it.GetType()) == region_pool);
}
}
/* If we didn't find a region, we're done. */
if (region_size == 0) {
break;
}
/* Initialize a new manager for the region. */
Impl *manager = std::addressof(m_managers[m_num_managers++]);
MESOSPHERE_ABORT_UNLESS(m_num_managers <= util::size(m_managers));
const size_t cur_size = manager->Initialize(region_address, region_size, management_region, management_region_end, region_pool);
management_region += cur_size;
MESOSPHERE_ABORT_UNLESS(management_region <= management_region_end);
/* Insert the manager into the pool list. */
if (m_pool_managers_tail[region_pool] == nullptr) {
m_pool_managers_head[region_pool] = manager;
} else {
m_pool_managers_tail[region_pool]->SetNext(manager);
manager->SetPrev(m_pool_managers_tail[region_pool]);
}
m_pool_managers_tail[region_pool] = manager;
}
/* Free each region to its corresponding heap. */
size_t reserved_sizes[MaxManagerCount] = {};
const KPhysicalAddress ini_start = GetInitialProcessBinaryPhysicalAddress();
const size_t ini_size = GetInitialProcessBinarySize();
const KPhysicalAddress ini_end = ini_start + ini_size;
const KPhysicalAddress ini_last = ini_end - 1;
for (const auto &it : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
/* Get the manager for the region. */
auto &manager = m_managers[it.GetAttributes()];
const KPhysicalAddress cur_start = it.GetAddress();
const KPhysicalAddress cur_last = it.GetLastAddress();
const KPhysicalAddress cur_end = it.GetEndAddress();
if (cur_start <= ini_start && ini_last <= cur_last) {
/* Free memory before the ini to the heap. */
if (cur_start != ini_start) {
manager.Free(cur_start, (ini_start - cur_start) / PageSize);
}
/* Open/reserve the ini memory. */
manager.OpenFirst(ini_start, ini_size / PageSize);
reserved_sizes[it.GetAttributes()] += ini_size;
/* Free memory after the ini to the heap. */
if (ini_last != cur_last) {
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
manager.Free(ini_end, (cur_end - ini_end) / PageSize);
}
} else {
/* Ensure there's no partial overlap with the ini image. */
if (cur_start <= ini_last) {
MESOSPHERE_ABORT_UNLESS(cur_last < ini_start);
} else {
/* Otherwise, check the region for general validity. */
MESOSPHERE_ABORT_UNLESS(cur_end != Null<KPhysicalAddress>);
}
/* Free the memory to the heap. */
manager.Free(cur_start, it.GetSize() / PageSize);
}
}
}
/* Update the used size for all managers. */
for (size_t i = 0; i < m_num_managers; ++i) {
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
}
/* Determine the min heap size for all pools. */
for (size_t i = 0; i < Pool_Count; ++i) {
/* Determine the min alignment for the pool in pages. */
const size_t min_align_pages = 1 << min_align_shifts[i];
/* Determine a heap index. */
if (const auto heap_index = KPageHeap::GetAlignedBlockIndex(min_align_pages, min_align_pages); heap_index >= 0) {
m_min_heap_indexes[i] = heap_index;
}
}
}
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
/* Lock the pool. */
KScopedLightLock lk(m_pool_locks[pool]);
/* Check that we don't already have an optimized process. */
R_UNLESS(!m_has_optimized_process[pool], svc::ResultBusy());
/* Set the optimized process id. */
m_optimized_process_ids[pool] = process_id;
m_has_optimized_process[pool] = true;
/* Clear the management area for the optimized process. */
for (auto *manager = this->GetFirstManager(pool, Direction_FromFront); manager != nullptr; manager = this->GetNextManager(manager, Direction_FromFront)) {
manager->InitializeOptimizedMemory();
}
R_SUCCEED();
}
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
/* Lock the pool. */
KScopedLightLock lk(m_pool_locks[pool]);
/* If the process was optimized, clear it. */
if (m_has_optimized_process[pool] && m_optimized_process_ids[pool] == process_id) {
m_has_optimized_process[pool] = false;
}
}
KPhysicalAddress KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
/* Early return if we're allocating no pages. */
if (num_pages == 0) {
return Null<KPhysicalAddress>;
}
/* Determine the pool and direction we're allocating from. */
const auto [pool, dir] = DecodeOption(option);
/* Check that we're allocating a correctly aligned number of pages. */
const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]);
if (!util::IsAligned(num_pages, min_align_pages)) {
return Null<KPhysicalAddress>;
}
/* Update our alignment. */
align_pages = std::max(align_pages, min_align_pages);
/* Lock the pool that we're allocating from. */
KScopedLightLock lk(m_pool_locks[pool]);
/* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
/* Loop, trying to iterate from each block. */
Impl *chosen_manager = nullptr;
KPhysicalAddress allocated_block = Null<KPhysicalAddress>;
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr; chosen_manager = this->GetNextManager(chosen_manager, dir)) {
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
if (allocated_block != Null<KPhysicalAddress>) {
break;
}
}
/* If we failed to allocate, quit now. */
if (allocated_block == Null<KPhysicalAddress>) {
return Null<KPhysicalAddress>;
}
/* Maintain the optimized memory bitmap, if we should. */
if (m_has_optimized_process[pool]) {
chosen_manager->TrackUnoptimizedAllocation(allocated_block, num_pages);
}
/* Open the first reference to the pages. */
chosen_manager->OpenFirst(allocated_block, num_pages);
return allocated_block;
}
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup *out, size_t num_pages, Pool pool, Direction dir, bool unoptimized, bool random, s32 min_heap_index) {
/* Check that we're allocating a correctly aligned number of pages. */
const size_t min_align_pages = KPageHeap::GetBlockNumPages(m_min_heap_indexes[pool]);
R_UNLESS(util::IsAligned(num_pages, min_align_pages), svc::ResultInvalidSize());
/* Adjust our min heap index to the pool minimum if needed. */
min_heap_index = std::max(min_heap_index, m_min_heap_indexes[pool]);
/* Choose a heap based on our page size request. */
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
/* Ensure that we don't leave anything un-freed. */
ON_RESULT_FAILURE {
for (const auto &it : *out) {
auto &manager = this->GetManager(it.GetAddress());
const size_t num_pages = std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
manager.Free(it.GetAddress(), num_pages);
}
out->Finalize();
};
/* Keep allocating until we've allocated all our pages. */
for (s32 index = heap_index; index >= min_heap_index && num_pages > 0; index--) {
const size_t pages_per_alloc = KPageHeap::GetBlockNumPages(index);
for (Impl *cur_manager = this->GetFirstManager(pool, dir); cur_manager != nullptr; cur_manager = this->GetNextManager(cur_manager, dir)) {
while (num_pages >= pages_per_alloc) {
/* Allocate a block. */
KPhysicalAddress allocated_block = cur_manager->AllocateBlock(index, random);
if (allocated_block == Null<KPhysicalAddress>) {
break;
}
/* Ensure we don't leak the block if we fail. */
ON_RESULT_FAILURE { cur_manager->Free(allocated_block, pages_per_alloc); };
/* Add the block to our group. */
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
/* Maintain the optimized memory bitmap, if we should. */
if (unoptimized) {
cur_manager->TrackUnoptimizedAllocation(allocated_block, pages_per_alloc);
}
num_pages -= pages_per_alloc;
}
}
}
/* Only succeed if we allocated as many pages as we wanted. */
R_UNLESS(num_pages == 0, svc::ResultOutOfMemory());
/* We succeeded! */
R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, size_t align_pages, u32 option) {
MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
/* Early return if we're allocating no pages. */
R_SUCCEED_IF(num_pages == 0);
/* Lock the pool that we're allocating from. */
const auto [pool, dir] = DecodeOption(option);
KScopedLightLock lk(m_pool_locks[pool]);
/* Choose a heap based on our alignment size request. */
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(align_pages, align_pages);
/* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, m_has_optimized_process[pool], true, heap_index));
/* Open the first reference to the pages. */
for (const auto &block : *out) {
KPhysicalAddress cur_address = block.GetAddress();
size_t remaining_pages = block.GetNumPages();
while (remaining_pages > 0) {
/* Get the manager for the current address. */
auto &manager = this->GetManager(cur_address);
/* Process part or all of the block. */
const size_t cur_pages = std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.OpenFirst(cur_address, cur_pages);
/* Advance. */
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
}
}
R_SUCCEED();
}
Result KMemoryManager::AllocateForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern) {
MESOSPHERE_ASSERT(out != nullptr);
MESOSPHERE_ASSERT(out->GetNumPages() == 0);
/* Decode the option. */
const auto [pool, dir] = DecodeOption(option);
/* Allocate the memory. */
bool optimized;
{
/* Lock the pool that we're allocating from. */
KScopedLightLock lk(m_pool_locks[pool]);
/* Check if we have an optimized process. */
const bool has_optimized = m_has_optimized_process[pool];
const bool is_optimized = m_optimized_process_ids[pool] == process_id;
/* Always use the minimum alignment size. */
const s32 heap_index = 0;
/* Allocate the page group. */
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized, false, heap_index));
/* Set whether we should optimize. */
optimized = has_optimized && is_optimized;
}
/* Perform optimized memory tracking, if we should. */
if (optimized) {
/* Iterate over the allocated blocks. */
for (const auto &block : *out) {
/* Get the block extents. */
const KPhysicalAddress block_address = block.GetAddress();
const size_t block_pages = block.GetNumPages();
/* If it has no pages, we don't need to do anything. */
if (block_pages == 0) {
continue;
}
/* Fill all the pages that we need to fill. */
bool any_new = false;
{
KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
/* Get the manager for the current address. */
auto &manager = this->GetManager(cur_address);
/* Process part or all of the block. */
const size_t cur_pages = std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
any_new = manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
/* Advance. */
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
}
}
/* If there are new pages, update tracking for the allocation. */
if (any_new) {
/* Update tracking for the allocation. */
KPhysicalAddress cur_address = block_address;
size_t remaining_pages = block_pages;
while (remaining_pages > 0) {
/* Get the manager for the current address. */
auto &manager = this->GetManager(cur_address);
/* Lock the pool for the manager. */
KScopedLightLock lk(m_pool_locks[manager.GetPool()]);
/* Track some or all of the current pages. */
const size_t cur_pages = std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
manager.TrackOptimizedAllocation(cur_address, cur_pages);
/* Advance. */
cur_address += cur_pages * PageSize;
remaining_pages -= cur_pages;
}
}
}
} else {
/* Set all the allocated memory. */
for (const auto &block : *out) {
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())), fill_pattern, block.GetSize());
}
}
R_SUCCEED();
}
size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p) {
/* Calculate management sizes. */
const size_t ref_count_size = (size / PageSize) * sizeof(u16);
const size_t optimize_map_size = CalculateOptimizedProcessOverheadSize(size);
const size_t manager_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize);
const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(size);
const size_t total_management_size = manager_size + page_heap_size;
MESOSPHERE_ABORT_UNLESS(manager_size <= total_management_size);
MESOSPHERE_ABORT_UNLESS(management + total_management_size <= management_end);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(total_management_size, PageSize));
/* Setup region. */
m_pool = p;
m_management_region = management;
m_page_reference_counts = GetPointer<RefCount>(management + optimize_map_size);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(m_management_region), PageSize));
/* Initialize the manager's KPageHeap. */
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
return total_management_size;
}
void KMemoryManager::Impl::TrackUnoptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
/* Get the range we're tracking. */
size_t offset = this->GetPageOffset(block);
const size_t last = offset + num_pages - 1;
/* Track. */
u64 *optimize_map = GetPointer<u64>(m_management_region);
while (offset <= last) {
/* Mark the page as not being optimized-allocated. */
optimize_map[offset / BITSIZEOF(u64)] &= ~(u64(1) << (offset % BITSIZEOF(u64)));
offset++;
}
}
void KMemoryManager::Impl::TrackOptimizedAllocation(KPhysicalAddress block, size_t num_pages) {
/* Get the range we're tracking. */
size_t offset = this->GetPageOffset(block);
const size_t last = offset + num_pages - 1;
/* Track. */
u64 *optimize_map = GetPointer<u64>(m_management_region);
while (offset <= last) {
/* Mark the page as being optimized-allocated. */
optimize_map[offset / BITSIZEOF(u64)] |= (u64(1) << (offset % BITSIZEOF(u64)));
offset++;
}
}
bool KMemoryManager::Impl::ProcessOptimizedAllocation(KPhysicalAddress block, size_t num_pages, u8 fill_pattern) {
/* We want to return whether any pages were newly allocated. */
bool any_new = false;
/* Get the range we're processing. */
size_t offset = this->GetPageOffset(block);
const size_t last = offset + num_pages - 1;
/* Process. */
u64 *optimize_map = GetPointer<u64>(m_management_region);
while (offset <= last) {
/* Check if the page has been optimized-allocated before. */
if ((optimize_map[offset / BITSIZEOF(u64)] & (u64(1) << (offset % BITSIZEOF(u64)))) == 0) {
/* If not, it's new. */
any_new = true;
/* Fill the page. */
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(m_heap.GetAddress()) + offset * PageSize), fill_pattern, PageSize);
}
offset++;
}
/* Return the number of pages we processed. */
return any_new;
}
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
const size_t optimize_map_size = (util::AlignUp((region_size / PageSize), BITSIZEOF(u64)) / BITSIZEOF(u64)) * sizeof(u64);
const size_t manager_meta_size = util::AlignUp(optimize_map_size + ref_count_size, PageSize);
const size_t page_heap_size = KPageHeap::CalculateManagementOverheadSize(region_size);
return manager_meta_size + page_heap_size;
}
}
| 23,321
|
C++
|
.cpp
| 424
| 42.037736
| 162
| 0.583648
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,940
|
kern_k_object_name.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_object_name.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constinit KLightLock g_object_list_lock;
constinit KObjectName::List g_object_list;
}
void KObjectName::Initialize(KAutoObject *obj, const char *name) {
/* Set member variables. */
m_object = obj;
std::strncpy(m_name, name, sizeof(m_name));
m_name[sizeof(m_name) - 1] = '\x00';
/* Open a reference to the object we hold. */
m_object->Open();
}
bool KObjectName::MatchesName(const char *name) const {
return std::strncmp(m_name, name, sizeof(m_name)) == 0;
}
Result KObjectName::NewFromName(KAutoObject *obj, const char *name) {
/* Create a new object name. */
KObjectName *new_name = KObjectName::Allocate();
R_UNLESS(new_name != nullptr, svc::ResultOutOfResource());
/* Initialize the new name. */
new_name->Initialize(obj, name);
/* Check if there's an existing name. */
{
/* Ensure we have exclusive access to the global list. */
KScopedLightLock lk(g_object_list_lock);
/* If the object doesn't exist, put it into the list. */
KScopedAutoObject existing_object = FindImpl(name);
if (existing_object.IsNull()) {
g_object_list.push_back(*new_name);
R_SUCCEED();
}
}
/* The object already exists, which is an error condition. Perform cleanup. */
obj->Close();
KObjectName::Free(new_name);
R_THROW(svc::ResultInvalidState());
}
Result KObjectName::Delete(KAutoObject *obj, const char *compare_name) {
/* Ensure we have exclusive access to the global list. */
KScopedLightLock lk(g_object_list_lock);
/* Find a matching entry in the list, and delete it. */
for (auto &name : g_object_list) {
if (name.MatchesName(compare_name) && obj == name.GetObject()) {
/* We found a match, clean up its resources. */
obj->Close();
g_object_list.erase(g_object_list.iterator_to(name));
KObjectName::Free(std::addressof(name));
R_SUCCEED();
}
}
/* We didn't find the object in the list. */
R_THROW(svc::ResultNotFound());
}
KScopedAutoObject<KAutoObject> KObjectName::Find(const char *name) {
/* Ensure we have exclusive access to the global list. */
KScopedLightLock lk(g_object_list_lock);
return FindImpl(name);
}
KScopedAutoObject<KAutoObject> KObjectName::FindImpl(const char *compare_name) {
/* Try to find a matching object in the global list. */
for (const auto &name : g_object_list) {
if (name.MatchesName(compare_name)) {
return name.GetObject();
}
}
/* There's no matching entry in the list. */
return nullptr;
}
}
| 3,619
|
C++
|
.cpp
| 86
| 33.802326
| 86
| 0.617706
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,941
|
kern_k_system_control_base.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_system_control_base.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#if defined(ATMOSPHERE_ARCH_ARM64)
#include <mesosphere/arch/arm64/kern_secure_monitor_base.hpp>
#endif
namespace ams::kern {
namespace init {
/* TODO: Is this function name architecture specific? */
void StartOtherCore(const ams::kern::init::KInitArguments *init_args);
}
/* Initialization. */
size_t KSystemControlBase::Init::GetRealMemorySize() {
return ams::kern::MainMemorySize;
}
size_t KSystemControlBase::Init::GetIntendedMemorySize() {
return ams::kern::MainMemorySize;
}
KPhysicalAddress KSystemControlBase::Init::GetKernelPhysicalBaseAddress(KPhysicalAddress base_address) {
const size_t real_dram_size = KSystemControl::Init::GetRealMemorySize();
const size_t intended_dram_size = KSystemControl::Init::GetIntendedMemorySize();
if (intended_dram_size * 2 <= real_dram_size) {
return base_address;
} else {
return base_address + ((real_dram_size - intended_dram_size) / 2);
}
}
void KSystemControlBase::Init::GetInitialProcessBinaryLayout(InitialProcessBinaryLayout *out, KPhysicalAddress kern_base_address) {
*out = {
.address = GetInteger(KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress)) + KSystemControl::Init::GetIntendedMemorySize() - InitialProcessBinarySizeMax,
._08 = 0,
.kern_address = GetInteger(kern_base_address),
};
}
bool KSystemControlBase::Init::ShouldIncreaseThreadResourceLimit() {
return true;
}
size_t KSystemControlBase::Init::GetApplicationPoolSize() {
return 0;
}
size_t KSystemControlBase::Init::GetAppletPoolSize() {
return 0;
}
size_t KSystemControlBase::Init::GetMinimumNonSecureSystemPoolSize() {
return 0;
}
u8 KSystemControlBase::Init::GetDebugLogUartPort() {
return 0;
}
void KSystemControlBase::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
#if defined(ATMOSPHERE_ARCH_ARM64)
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<0>(core_id, entrypoint, arg)) == 0);
#else
AMS_INFINITE_LOOP();
#endif
}
void KSystemControlBase::Init::TurnOnCpu(u64 core_id, const ams::kern::init::KInitArguments *args) {
/* Get entrypoint. */
KPhysicalAddress entrypoint = Null<KPhysicalAddress>;
while (!cpu::GetPhysicalAddressReadable(std::addressof(entrypoint), reinterpret_cast<uintptr_t>(::ams::kern::init::StartOtherCore), true)) { /* ... */ }
/* Get arguments. */
KPhysicalAddress args_addr = Null<KPhysicalAddress>;
while (!cpu::GetPhysicalAddressReadable(std::addressof(args_addr), reinterpret_cast<uintptr_t>(args), true)) { /* ... */ }
/* Ensure cache is correct for the initial arguments. */
cpu::StoreDataCacheForInitArguments(args, sizeof(*args));
/* Turn on the cpu. */
KSystemControl::Init::CpuOnImpl(core_id, GetInteger(entrypoint), GetInteger(args_addr));
}
/* Randomness for Initialization. */
void KSystemControlBase::Init::GenerateRandom(u64 *dst, size_t count) {
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
const u64 seed = KHardwareTimer::GetTick();
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
s_initialized_random_generator = true;
}
for (size_t i = 0; i < count; ++i) {
dst[i] = s_random_generator.GenerateRandomU64();
}
}
u64 KSystemControlBase::Init::GenerateRandomRange(u64 min, u64 max) {
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
const u64 seed = KHardwareTimer::GetTick();
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
s_initialized_random_generator = true;
}
return KSystemControlBase::GenerateUniformRange(min, max, []() ALWAYS_INLINE_LAMBDA -> u64 { return s_random_generator.GenerateRandomU64(); });
}
/* System Initialization. */
void KSystemControlBase::ConfigureKTargetSystem() {
/* By default, use the default config set in the KTargetSystem header. */
}
void KSystemControlBase::InitializePhase1() {
/* Enable KTargetSystem. */
{
KTargetSystem::SetInitialized();
}
/* Initialize random and resource limit. */
KSystemControlBase::InitializePhase1Base(KHardwareTimer::GetTick());
}
void KSystemControlBase::InitializePhase1Base(u64 seed) {
/* Initialize the rng, if we somehow haven't already. */
if (AMS_UNLIKELY(!s_initialized_random_generator)) {
s_random_generator.Initialize(reinterpret_cast<const u32*>(std::addressof(seed)), sizeof(seed) / sizeof(u32));
s_initialized_random_generator = true;
}
/* Initialize debug logging. */
KDebugLog::Initialize();
/* System ResourceLimit initialization. */
{
/* Construct the resource limit object. */
KResourceLimit &sys_res_limit = Kernel::GetSystemResourceLimit();
KAutoObject::Create<KResourceLimit>(std::addressof(sys_res_limit));
sys_res_limit.Initialize();
/* Set the initial limits. */
const auto [total_memory_size, kernel_memory_size] = KMemoryLayout::GetTotalAndKernelMemorySizes();
/* Update 39-bit address space infos. */
{
/* Heap should be equal to the total memory size, minimum 8 GB, maximum 32 GB. */
/* Alias should be equal to 8 * heap size, maximum 128 GB. */
const size_t heap_size = std::max(std::min(util::AlignUp(total_memory_size, 1_GB), 32_GB), 8_GB);
const size_t alias_size = std::min(heap_size * 8, 128_GB);
/* Set the address space sizes. */
KAddressSpaceInfo::SetAddressSpaceSize(39, KAddressSpaceInfo::Type_Heap, heap_size);
KAddressSpaceInfo::SetAddressSpaceSize(39, KAddressSpaceInfo::Type_Alias, alias_size);
}
const auto &slab_counts = init::GetSlabResourceCounts();
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax, total_memory_size));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_ThreadCountMax, slab_counts.num_KThread));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_EventCountMax, slab_counts.num_KEvent));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax, slab_counts.num_KTransferMemory));
MESOSPHERE_R_ABORT_UNLESS(sys_res_limit.SetLimitValue(ams::svc::LimitableResource_SessionCountMax, slab_counts.num_KSession));
/* Reserve system memory. */
MESOSPHERE_ABORT_UNLESS(sys_res_limit.Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, kernel_memory_size));
}
}
void KSystemControlBase::InitializePhase2() {
/* Initialize KTrace. */
if constexpr (IsKTraceEnabled) {
const auto &ktrace = KMemoryLayout::GetKernelTraceBufferRegion();
KTrace::Initialize(ktrace.GetAddress(), ktrace.GetSize());
}
}
u32 KSystemControlBase::GetCreateProcessMemoryPool() {
return KMemoryManager::Pool_System;
}
/* Privileged Access. */
void KSystemControlBase::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
/* TODO */
MESOSPHERE_UNUSED(out, address, mask, value);
MESOSPHERE_UNIMPLEMENTED();
}
Result KSystemControlBase::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
MESOSPHERE_UNUSED(out, address, mask, value);
R_THROW(svc::ResultNotImplemented());
}
/* Randomness. */
void KSystemControlBase::GenerateRandom(u64 *dst, size_t count) {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
for (size_t i = 0; i < count; ++i) {
dst[i] = s_random_generator.GenerateRandomU64();
}
}
u64 KSystemControlBase::GenerateRandomRange(u64 min, u64 max) {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
return KSystemControlBase::GenerateUniformRange(min, max, []() ALWAYS_INLINE_LAMBDA -> u64 { return s_random_generator.GenerateRandomU64(); });
}
u64 KSystemControlBase::GenerateRandomU64() {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
return s_random_generator.GenerateRandomU64();
}
void KSystemControlBase::SleepSystem() {
MESOSPHERE_LOG("SleepSystem() was called\n");
}
void KSystemControlBase::StopSystem(void *) {
MESOSPHERE_LOG("KSystemControlBase::StopSystem\n");
AMS_INFINITE_LOOP();
}
/* User access. */
#if defined(ATMOSPHERE_ARCH_ARM64)
void KSystemControlBase::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
/* Get the function id for the current call. */
u64 function_id = args->r[0];
/* We'll need to map in pages if arguments are pointers. Prepare page groups to do so. */
auto &page_table = GetCurrentProcess().GetPageTable();
auto *bim = page_table.GetBlockInfoManager();
constexpr size_t MaxMappedRegisters = 7;
std::array<KPageGroup, MaxMappedRegisters> page_groups = { KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), KPageGroup(bim), };
for (size_t i = 0; i < MaxMappedRegisters; i++) {
const size_t reg_id = i + 1;
if (function_id & (1ul << (8 + reg_id))) {
/* Create and open a new page group for the address. */
KVirtualAddress virt_addr = args->r[reg_id];
if (R_SUCCEEDED(page_table.MakeAndOpenPageGroup(std::addressof(page_groups[i]), util::AlignDown(GetInteger(virt_addr), PageSize), 1, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserReadWrite, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryAttribute_None))) {
/* Translate the virtual address to a physical address. */
const auto it = page_groups[i].begin();
MESOSPHERE_ASSERT(it != page_groups[i].end());
MESOSPHERE_ASSERT(it->GetNumPages() == 1);
args->r[reg_id] = GetInteger(it->GetAddress()) | (GetInteger(virt_addr) & (PageSize - 1));
} else {
/* If we couldn't map, we should clear the address. */
args->r[reg_id] = 0;
}
}
}
/* Invoke the secure monitor. */
KSystemControl::CallSecureMonitorFromUserImpl(args);
/* Make sure that we close any pages that we opened. */
for (size_t i = 0; i < MaxMappedRegisters; i++) {
page_groups[i].Close();
}
}
void KSystemControlBase::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* By default, we don't actually support secure monitor, so just set args to a failure code. */
args->r[0] = 1;
}
#endif
/* Secure Memory. */
size_t KSystemControlBase::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
MESOSPHERE_UNUSED(pool);
return size;
}
Result KSystemControlBase::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
/* Ensure the size is aligned. */
constexpr size_t Alignment = PageSize;
R_UNLESS(util::IsAligned(size, Alignment), svc::ResultInvalidSize());
/* Allocate the memory. */
const size_t num_pages = size / PageSize;
const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, Alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
*out = KPageTable::GetHeapVirtualAddress(paddr);
R_SUCCEED();
}
void KSystemControlBase::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
/* Ensure the size is aligned. */
constexpr size_t Alignment = PageSize;
MESOSPHERE_UNUSED(pool);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), Alignment));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, Alignment));
/* Close the secure region's pages. */
Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
}
/* Insecure Memory. */
KResourceLimit *KSystemControlBase::GetInsecureMemoryResourceLimit() {
return std::addressof(Kernel::GetSystemResourceLimit());
}
u32 KSystemControlBase::GetInsecureMemoryPool() {
return KMemoryManager::Pool_SystemNonSecure;
}
}
| 14,038
|
C++
|
.cpp
| 265
| 44.188679
| 302
| 0.661949
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,942
|
kern_k_light_server_session.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_light_server_session.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr u64 InvalidThreadId = -1ull;
class ThreadQueueImplForKLightServerSessionRequest final : public KThreadQueue {
private:
KThread::WaiterList *m_wait_list;
public:
constexpr ThreadQueueImplForKLightServerSessionRequest(KThread::WaiterList *wl) : KThreadQueue(), m_wait_list(wl) { /* ... */ }
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
/* Remove the thread from our wait list. */
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
/* Invoke the base end wait handler. */
KThreadQueue::EndWait(waiting_thread, wait_result);
}
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Remove the thread from our wait list. */
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
class ThreadQueueImplForKLightServerSessionReceive final : public KThreadQueue {
private:
KThread **m_server_thread;
public:
constexpr ThreadQueueImplForKLightServerSessionReceive(KThread **st) : KThreadQueue(), m_server_thread(st) { /* ... */ }
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
/* Clear the server thread. */
*m_server_thread = nullptr;
/* Set the waiting thread as not cancelable. */
waiting_thread->ClearCancellable();
/* Invoke the base end wait handler. */
KThreadQueue::EndWait(waiting_thread, wait_result);
}
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Clear the server thread. */
*m_server_thread = nullptr;
/* Set the waiting thread as not cancelable. */
waiting_thread->ClearCancellable();
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
void KLightServerSession::Destroy() {
MESOSPHERE_ASSERT_THIS();
this->CleanupRequests();
m_parent->OnServerClosed();
}
void KLightServerSession::OnClientClosed() {
MESOSPHERE_ASSERT_THIS();
this->CleanupRequests();
}
Result KLightServerSession::OnRequest(KThread *request_thread) {
MESOSPHERE_ASSERT_THIS();
ThreadQueueImplForKLightServerSessionRequest wait_queue(std::addressof(m_request_list));
/* Send the request. */
{
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Check that the server isn't closed. */
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
/* Check that the request thread isn't terminating. */
R_UNLESS(!request_thread->IsTerminationRequested(), svc::ResultTerminationRequested());
/* Add the request thread to our list. */
m_request_list.push_back(*request_thread);
/* Begin waiting on the request. */
request_thread->BeginWait(std::addressof(wait_queue));
/* If we have a server thread, end its wait. */
if (m_server_thread != nullptr) {
m_server_thread->EndWait(ResultSuccess());
}
}
/* NOTE: Nintendo returns GetCurrentThread().GetWaitResult() here. */
/* This is technically incorrect, although it doesn't cause problems in practice */
/* because this is only ever called with request_thread = GetCurrentThreadPointer(). */
R_RETURN(request_thread->GetWaitResult());
}
Result KLightServerSession::ReplyAndReceive(u32 *data) {
MESOSPHERE_ASSERT_THIS();
/* Set the server context. */
GetCurrentThread().SetLightSessionData(data);
/* Reply, if we need to. */
if (data[0] & KLightSession::ReplyFlag) {
KScopedSchedulerLock sl;
/* Check that we're open. */
R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed());
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
/* Check that we have a request to reply to. */
R_UNLESS(m_current_request != nullptr, svc::ResultInvalidState());
/* Check that the server thread id is correct. */
R_UNLESS(m_server_thread_id == GetCurrentThread().GetId(), svc::ResultInvalidState());
/* If we can reply, do so. */
if (!m_current_request->IsTerminationRequested()) {
std::memcpy(m_current_request->GetLightSessionData(), GetCurrentThread().GetLightSessionData(), KLightSession::DataSize);
m_current_request->EndWait(ResultSuccess());
}
/* Close our current request. */
m_current_request->Close();
/* Clear our current request. */
m_current_request = nullptr;
m_server_thread_id = InvalidThreadId;
}
/* Close any pending objects before we wait. */
GetCurrentThread().DestroyClosedObjects();
/* Create the wait queue for our receive. */
ThreadQueueImplForKLightServerSessionReceive wait_queue(std::addressof(m_server_thread));
/* Receive. */
while (true) {
/* Try to receive a request. */
{
KScopedSchedulerLock sl;
/* Check that we aren't already receiving. */
R_UNLESS(m_server_thread == nullptr, svc::ResultInvalidState());
R_UNLESS(m_server_thread_id == InvalidThreadId, svc::ResultInvalidState());
/* Check that we're open. */
R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed());
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
/* Check that we're not terminating. */
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
/* If we have a request available, use it. */
if (auto head = m_request_list.begin(); head != m_request_list.end()) {
/* Set our current request. */
m_current_request = std::addressof(*head);
m_current_request->Open();
/* Set our server thread id. */
m_server_thread_id = GetCurrentThread().GetId();
/* Copy the client request data. */
std::memcpy(GetCurrentThread().GetLightSessionData(), m_current_request->GetLightSessionData(), KLightSession::DataSize);
/* We successfully received. */
R_SUCCEED();
}
/* We need to wait for a request to come in. */
/* Check if we were cancelled. */
if (GetCurrentThread().IsWaitCancelled()) {
GetCurrentThread().ClearWaitCancelled();
R_THROW(svc::ResultCancelled());
}
/* Mark ourselves as cancellable. */
GetCurrentThread().SetCancellable();
/* Wait for a request to come in. */
m_server_thread = GetCurrentThreadPointer();
GetCurrentThread().BeginWait(std::addressof(wait_queue));
}
/* We waited to receive a request; if our wait failed, return the failing result. */
R_TRY(GetCurrentThread().GetWaitResult());
}
}
void KLightServerSession::CleanupRequests() {
/* Cleanup all pending requests. */
{
KScopedSchedulerLock sl;
/* Handle the current request. */
if (m_current_request != nullptr) {
/* Reply to the current request. */
if (!m_current_request->IsTerminationRequested()) {
m_current_request->EndWait(svc::ResultSessionClosed());
}
/* Clear our current request. */
m_current_request->Close();
m_current_request = nullptr;
m_server_thread_id = InvalidThreadId;
}
/* Reply to all other requests. */
for (auto &thread : m_request_list) {
thread.EndWait(svc::ResultSessionClosed());
}
/* Wait up our server thread, if we have one. */
if (m_server_thread != nullptr) {
m_server_thread->EndWait(svc::ResultSessionClosed());
}
}
}
}
| 9,844
|
C++
|
.cpp
| 190
| 38.505263
| 143
| 0.579112
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,943
|
kern_k_client_session.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_client_session.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KClientSession::Destroy() {
MESOSPHERE_ASSERT_THIS();
m_parent->OnClientClosed();
m_parent->Close();
}
void KClientSession::OnServerClosed() {
MESOSPHERE_ASSERT_THIS();
}
Result KClientSession::SendSyncRequest(uintptr_t address, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Create a session request. */
KSessionRequest *request = KSessionRequest::Create();
R_UNLESS(request != nullptr, svc::ResultOutOfResource());
ON_SCOPE_EXIT { request->Close(); };
/* Initialize the request. */
request->Initialize(nullptr, address, size);
/* Send the request. */
R_RETURN(m_parent->OnRequest(request));
}
Result KClientSession::SendAsyncRequest(KEvent *event, uintptr_t address, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Create a session request. */
KSessionRequest *request = KSessionRequest::Create();
R_UNLESS(request != nullptr, svc::ResultOutOfResource());
ON_SCOPE_EXIT { request->Close(); };
/* Initialize the request. */
request->Initialize(event, address, size);
/* Send the request. */
R_RETURN(m_parent->OnRequest(request));
}
}
| 1,938
|
C++
|
.cpp
| 48
| 34.479167
| 92
| 0.671817
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,944
|
kern_k_code_memory.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_code_memory.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KCodeMemory::Initialize(KProcessAddress addr, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Set members. */
m_owner = GetCurrentProcessPointer();
/* Get the owner page table. */
auto &page_table = m_owner->GetPageTable();
/* Construct the page group, guarding to make sure our state is valid on exit. */
auto pg_guard = util::ConstructAtGuarded(m_page_group, page_table.GetBlockInfoManager());
/* Lock the memory. */
R_TRY(page_table.LockForCodeMemory(GetPointer(m_page_group), addr, size));
/* Clear the memory. */
for (const auto &block : GetReference(m_page_group)) {
/* Clear and store cache. */
void * const block_address = GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress()));
std::memset(block_address, 0xFF, block.GetSize());
cpu::StoreDataCache(block_address, block.GetSize());
}
/* Set remaining tracking members. */
m_owner->Open();
m_address = addr;
m_is_initialized = true;
m_is_owner_mapped = false;
m_is_mapped = false;
/* We succeeded. */
pg_guard.Cancel();
R_SUCCEED();
}
void KCodeMemory::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Unlock. */
if (!m_is_mapped && !m_is_owner_mapped) {
const size_t size = GetReference(m_page_group).GetNumPages() * PageSize;
MESOSPHERE_R_ABORT_UNLESS(m_owner->GetPageTable().UnlockForCodeMemory(m_address, size, GetReference(m_page_group)));
}
/* Close the page group. */
GetReference(m_page_group).Close();
GetReference(m_page_group).Finalize();
/* Close our reference to our owner. */
m_owner->Close();
}
Result KCodeMemory::Map(KProcessAddress address, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Validate the size. */
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Ensure we're not already mapped. */
R_UNLESS(!m_is_mapped, svc::ResultInvalidState());
/* Map the memory. */
R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(m_page_group), KMemoryState_CodeOut, KMemoryPermission_UserReadWrite));
/* Mark ourselves as mapped. */
m_is_mapped = true;
R_SUCCEED();
}
Result KCodeMemory::Unmap(KProcessAddress address, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Validate the size. */
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Unmap the memory. */
R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), KMemoryState_CodeOut));
/* Mark ourselves as unmapped. */
MESOSPHERE_ASSERT(m_is_mapped);
m_is_mapped = false;
R_SUCCEED();
}
Result KCodeMemory::MapToOwner(KProcessAddress address, size_t size, ams::svc::MemoryPermission perm) {
MESOSPHERE_ASSERT_THIS();
/* Validate the size. */
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Ensure we're not already mapped. */
R_UNLESS(!m_is_owner_mapped, svc::ResultInvalidState());
/* Convert the memory permission. */
KMemoryPermission k_perm;
switch (perm) {
case ams::svc::MemoryPermission_Read: k_perm = KMemoryPermission_UserRead; break;
case ams::svc::MemoryPermission_ReadExecute: k_perm = KMemoryPermission_UserReadExecute; break;
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* Map the memory. */
R_TRY(m_owner->GetPageTable().MapPageGroup(address, GetReference(m_page_group), KMemoryState_GeneratedCode, k_perm));
/* Mark ourselves as mapped. */
m_is_owner_mapped = true;
R_SUCCEED();
}
Result KCodeMemory::UnmapFromOwner(KProcessAddress address, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Validate the size. */
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Unmap the memory. */
R_TRY(m_owner->GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), KMemoryState_GeneratedCode));
/* Mark ourselves as unmapped. */
MESOSPHERE_ASSERT(m_is_owner_mapped);
m_is_owner_mapped = false;
R_SUCCEED();
}
}
| 5,627
|
C++
|
.cpp
| 119
| 39.134454
| 155
| 0.640388
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,945
|
kern_k_shared_memory.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_shared_memory.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KSharedMemory::Initialize(KProcess *owner, size_t size, ams::svc::MemoryPermission own_perm, ams::svc::MemoryPermission rem_perm) {
MESOSPHERE_ASSERT_THIS();
/* Set members. */
m_owner_process_id = owner->GetId();
m_owner_perm = own_perm;
m_remote_perm = rem_perm;
/* Get the number of pages. */
const size_t num_pages = util::DivideUp(size, PageSize);
MESOSPHERE_ASSERT(num_pages > 0);
/* Get the resource limit. */
KResourceLimit *reslimit = owner->GetResourceLimit();
/* Reserve memory for ourselves. */
KScopedResourceReservation memory_reservation(reslimit, ams::svc::LimitableResource_PhysicalMemoryMax, size);
R_UNLESS(memory_reservation.Succeeded(), svc::ResultLimitReached());
/* Allocate the memory. */
R_TRY(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(m_page_group), num_pages, 1, owner->GetAllocateOption()));
/* Commit our reservation. */
memory_reservation.Commit();
/* Set our resource limit. */
m_resource_limit = reslimit;
m_resource_limit->Open();
/* Mark initialized. */
m_is_initialized = true;
/* Clear all pages in the memory. */
for (const auto &block : m_page_group) {
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block.GetAddress())), 0, block.GetSize());
}
R_SUCCEED();
}
void KSharedMemory::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Get the number of pages. */
const size_t num_pages = m_page_group.GetNumPages();
const size_t size = num_pages * PageSize;
/* Close and finalize the page group. */
m_page_group.Close();
m_page_group.Finalize();
/* Release the memory reservation. */
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, size);
m_resource_limit->Close();
}
Result KSharedMemory::Map(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process, ams::svc::MemoryPermission map_perm) {
MESOSPHERE_ASSERT_THIS();
/* Validate the size. */
R_UNLESS(m_page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Validate the permission. */
const ams::svc::MemoryPermission test_perm = (process->GetId() == m_owner_process_id) ? m_owner_perm : m_remote_perm;
if (test_perm == ams::svc::MemoryPermission_DontCare) {
MESOSPHERE_ASSERT(map_perm == ams::svc::MemoryPermission_Read || map_perm == ams::svc::MemoryPermission_ReadWrite);
} else {
R_UNLESS(map_perm == test_perm, svc::ResultInvalidNewMemoryPermission());
}
/* Map the memory. */
R_RETURN(table->MapPageGroup(address, m_page_group, KMemoryState_Shared, ConvertToKMemoryPermission(map_perm)));
}
Result KSharedMemory::Unmap(KProcessPageTable *table, KProcessAddress address, size_t size, KProcess *process) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_UNUSED(process);
/* Validate the size. */
R_UNLESS(m_page_group.GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Unmap the memory. */
R_RETURN(table->UnmapPageGroup(address, m_page_group, KMemoryState_Shared));
}
}
| 4,102
|
C++
|
.cpp
| 81
| 43.197531
| 151
| 0.660996
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,946
|
kern_k_condition_variable.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_condition_variable.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
ALWAYS_INLINE bool ReadFromUser(u32 *out, KProcessAddress address) {
return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address));
}
ALWAYS_INLINE bool WriteToUser(KProcessAddress address, const u32 *p) {
return UserspaceAccess::CopyMemoryToUserSize32Bit(GetVoidPointer(address), p);
}
ALWAYS_INLINE bool UpdateLockAtomic(u32 *out, KProcessAddress address, u32 if_zero, u32 new_orr_mask) {
return UserspaceAccess::UpdateLockAtomic(out, GetPointer<u32>(address), if_zero, new_orr_mask);
}
class ThreadQueueImplForKConditionVariableWaitForAddress final : public KThreadQueue {
public:
constexpr ThreadQueueImplForKConditionVariableWaitForAddress() : KThreadQueue() { /* ... */ }
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Remove the thread as a waiter from its owner. */
waiting_thread->GetLockOwner()->RemoveWaiter(waiting_thread);
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
class ThreadQueueImplForKConditionVariableWaitConditionVariable final : public KThreadQueue {
private:
KConditionVariable::ThreadTree *m_tree;
public:
constexpr ThreadQueueImplForKConditionVariableWaitConditionVariable(KConditionVariable::ThreadTree *t) : KThreadQueue(), m_tree(t) { /* ... */ }
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Remove the thread as a waiter from its owner. */
if (KThread *owner = waiting_thread->GetLockOwner(); owner != nullptr) {
owner->RemoveWaiter(waiting_thread);
}
/* If the thread is waiting on a condvar, remove it from the tree. */
if (waiting_thread->IsWaitingForConditionVariable()) {
m_tree->erase(m_tree->iterator_to(*waiting_thread));
waiting_thread->ClearConditionVariable();
}
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
Result KConditionVariable::SignalToAddress(KProcessAddress addr) {
KThread *owner_thread = GetCurrentThreadPointer();
/* Signal the address. */
{
KScopedSchedulerLock sl;
/* Remove waiter thread. */
bool has_waiters;
KThread * const next_owner_thread = owner_thread->RemoveWaiterByKey(std::addressof(has_waiters), addr);
/* Determine the next tag. */
u32 next_value = 0;
if (next_owner_thread != nullptr) {
next_value = next_owner_thread->GetAddressKeyValue();
if (has_waiters) {
next_value |= ams::svc::HandleWaitMask;
}
}
/* Synchronize memory before proceeding. */
cpu::DataMemoryBarrierInnerShareable();
/* Write the value to userspace. */
Result result;
if (AMS_LIKELY(WriteToUser(addr, std::addressof(next_value)))) {
result = ResultSuccess();
} else {
result = svc::ResultInvalidCurrentMemory();
}
/* If necessary, signal the next owner thread. */
if (next_owner_thread != nullptr) {
next_owner_thread->EndWait(result);
}
R_RETURN(result);
}
}
Result KConditionVariable::WaitForAddress(ams::svc::Handle handle, KProcessAddress addr, u32 value) {
KThread *cur_thread = GetCurrentThreadPointer();
ThreadQueueImplForKConditionVariableWaitForAddress wait_queue;
/* Wait for the address. */
KThread *owner_thread;
{
KScopedSchedulerLock sl;
/* Check if the thread should terminate. */
R_UNLESS(!cur_thread->IsTerminationRequested(), svc::ResultTerminationRequested());
/* Read the tag from userspace. */
u32 test_tag;
R_UNLESS(ReadFromUser(std::addressof(test_tag), addr), svc::ResultInvalidCurrentMemory());
/* If the tag isn't the handle (with wait mask), we're done. */
R_SUCCEED_IF(test_tag != (handle | ams::svc::HandleWaitMask));
/* Get the lock owner thread. */
owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(handle).ReleasePointerUnsafe();
R_UNLESS(owner_thread != nullptr, svc::ResultInvalidHandle());
/* Update the lock. */
cur_thread->SetAddressKey(addr, value);
owner_thread->AddWaiter(cur_thread);
/* Begin waiting. */
cur_thread->BeginWait(std::addressof(wait_queue));
}
/* Close our reference to the owner thread, now that the wait is over. */
owner_thread->Close();
/* Get the wait result. */
R_RETURN(cur_thread->GetWaitResult());
}
void KConditionVariable::SignalImpl(KThread *thread) {
/* Check pre-conditions. */
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Update the tag. */
KProcessAddress address = thread->GetAddressKey();
u32 own_tag = thread->GetAddressKeyValue();
u32 prev_tag;
bool can_access;
{
/* NOTE: If scheduler lock is not held here, interrupt disable is required. */
/* KScopedInterruptDisable di; */
can_access = cpu::CanAccessAtomic(address);
if (AMS_LIKELY(can_access)) {
can_access = UpdateLockAtomic(std::addressof(prev_tag), address, own_tag, ams::svc::HandleWaitMask);
}
}
if (AMS_LIKELY(can_access)) {
if (prev_tag == ams::svc::InvalidHandle) {
/* If nobody held the lock previously, we're all good. */
thread->EndWait(ResultSuccess());
} else {
/* Get the previous owner. */
KThread *owner_thread = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KThread>(static_cast<ams::svc::Handle>(prev_tag & ~ams::svc::HandleWaitMask))
.ReleasePointerUnsafe();
if (AMS_LIKELY(owner_thread != nullptr)) {
/* Add the thread as a waiter on the owner. */
owner_thread->AddWaiter(thread);
owner_thread->Close();
} else {
/* The lock was tagged with a thread that doesn't exist. */
thread->EndWait(svc::ResultInvalidState());
}
}
} else {
/* If the address wasn't accessible, note so. */
thread->EndWait(svc::ResultInvalidCurrentMemory());
}
}
void KConditionVariable::Signal(uintptr_t cv_key, s32 count) {
/* Perform signaling. */
int num_waiters = 0;
{
KScopedSchedulerLock sl;
auto it = m_tree.nfind_key({ cv_key, -1 });
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetConditionVariableKey() == cv_key)) {
KThread *target_thread = std::addressof(*it);
it = m_tree.erase(it);
target_thread->ClearConditionVariable();
this->SignalImpl(target_thread);
++num_waiters;
}
/* If we have no waiters, clear the has waiter flag. */
if (it == m_tree.end() || it->GetConditionVariableKey() != cv_key) {
const u32 has_waiter_flag = 0;
WriteToUser(cv_key, std::addressof(has_waiter_flag));
}
}
}
Result KConditionVariable::Wait(KProcessAddress addr, uintptr_t key, u32 value, s64 timeout) {
/* Prepare to wait. */
KThread *cur_thread = GetCurrentThreadPointer();
KHardwareTimer *timer;
ThreadQueueImplForKConditionVariableWaitConditionVariable wait_queue(std::addressof(m_tree));
{
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
/* Check that the thread isn't terminating. */
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Update the value and process for the next owner. */
{
/* Remove waiter thread. */
bool has_waiters;
KThread *next_owner_thread = cur_thread->RemoveWaiterByKey(std::addressof(has_waiters), GetInteger(addr));
/* Update for the next owner thread. */
u32 next_value = 0;
if (next_owner_thread != nullptr) {
/* Get the next tag value. */
next_value = next_owner_thread->GetAddressKeyValue();
if (has_waiters) {
next_value |= ams::svc::HandleWaitMask;
}
/* Wake up the next owner. */
next_owner_thread->EndWait(ResultSuccess());
}
/* Write to the cv key. */
{
const u32 has_waiter_flag = 1;
WriteToUser(key, std::addressof(has_waiter_flag));
cpu::DataMemoryBarrierInnerShareable();
}
/* Write the value to userspace. */
if (!WriteToUser(addr, std::addressof(next_value))) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidCurrentMemory());
}
}
/* If timeout is zero, time out. */
R_UNLESS(timeout != 0, svc::ResultTimedOut());
/* Update condition variable tracking. */
cur_thread->SetConditionVariable(std::addressof(m_tree), addr, key, value);
m_tree.insert(*cur_thread);
/* Begin waiting. */
wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
}
/* Get the wait result. */
R_RETURN(cur_thread->GetWaitResult());
}
}
| 11,457
|
C++
|
.cpp
| 228
| 36.894737
| 183
| 0.573743
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,947
|
kern_k_address_arbiter.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_address_arbiter.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
ALWAYS_INLINE bool ReadFromUser(s32 *out, KProcessAddress address) {
return UserspaceAccess::CopyMemoryFromUserSize32Bit(out, GetVoidPointer(address));
}
ALWAYS_INLINE bool ReadFromUser(s64 *out, KProcessAddress address) {
return UserspaceAccess::CopyMemoryFromUserSize64Bit(out, GetVoidPointer(address));
}
ALWAYS_INLINE bool DecrementIfLessThan(s32 *out, KProcessAddress address, s32 value) {
/* NOTE: If scheduler lock is not held here, interrupt disable is required. */
/* KScopedInterruptDisable di; */
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
if (!cpu::CanAccessAtomic(address)) {
return false;
}
return UserspaceAccess::DecrementIfLessThanAtomic(out, GetPointer<s32>(address), value);
}
ALWAYS_INLINE bool UpdateIfEqual(s32 *out, KProcessAddress address, s32 value, s32 new_value) {
/* NOTE: If scheduler lock is not held here, interrupt disable is required. */
/* KScopedInterruptDisable di; */
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
if (!cpu::CanAccessAtomic(address)) {
return false;
}
return UserspaceAccess::UpdateIfEqualAtomic(out, GetPointer<s32>(address), value, new_value);
}
class ThreadQueueImplForKAddressArbiter final : public KThreadQueue {
private:
KAddressArbiter::ThreadTree *m_tree;
public:
constexpr ThreadQueueImplForKAddressArbiter(KAddressArbiter::ThreadTree *t) : KThreadQueue(), m_tree(t) { /* ... */ }
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* If the thread is waiting on an address arbiter, remove it from the tree. */
if (waiting_thread->IsWaitingForAddressArbiter()) {
m_tree->erase(m_tree->iterator_to(*waiting_thread));
waiting_thread->ClearAddressArbiter();
}
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
Result KAddressArbiter::Signal(uintptr_t addr, s32 count) {
/* Perform signaling. */
s32 num_waiters = 0;
{
KScopedSchedulerLock sl;
auto it = m_tree.nfind_key({ addr, -1 });
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
/* End the thread's wait. */
KThread *target_thread = std::addressof(*it);
target_thread->EndWait(ResultSuccess());
MESOSPHERE_ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->ClearAddressArbiter();
it = m_tree.erase(it);
++num_waiters;
}
}
R_SUCCEED();
}
Result KAddressArbiter::SignalAndIncrementIfEqual(uintptr_t addr, s32 value, s32 count) {
/* Perform signaling. */
s32 num_waiters = 0;
{
KScopedSchedulerLock sl;
/* Check the userspace value. */
s32 user_value;
R_UNLESS(UpdateIfEqual(std::addressof(user_value), addr, value, value + 1), svc::ResultInvalidCurrentMemory());
R_UNLESS(user_value == value, svc::ResultInvalidState());
auto it = m_tree.nfind_key({ addr, -1 });
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
/* End the thread's wait. */
KThread *target_thread = std::addressof(*it);
target_thread->EndWait(ResultSuccess());
MESOSPHERE_ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->ClearAddressArbiter();
it = m_tree.erase(it);
++num_waiters;
}
}
R_SUCCEED();
}
Result KAddressArbiter::SignalAndModifyByWaitingCountIfEqual(uintptr_t addr, s32 value, s32 count) {
/* Perform signaling. */
s32 num_waiters = 0;
{
KScopedSchedulerLock sl;
auto it = m_tree.nfind_key({ addr, -1 });
/* Determine the updated value. */
s32 new_value;
if (count <= 0) {
if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
new_value = value - 1;
} else {
new_value = value + 1;
}
} else {
if ((it != m_tree.end()) && (it->GetAddressArbiterKey() == addr)) {
auto tmp_it = it;
s32 tmp_num_waiters = 0;
while ((++tmp_it != m_tree.end()) && (tmp_it->GetAddressArbiterKey() == addr)) {
if ((++tmp_num_waiters) >= count) {
break;
}
}
if (tmp_num_waiters < count) {
new_value = value - 1;
} else {
new_value = value;
}
} else {
new_value = value + 1;
}
}
/* Check the userspace value. */
s32 user_value;
bool succeeded;
if (value != new_value) {
succeeded = UpdateIfEqual(std::addressof(user_value), addr, value, new_value);
} else {
succeeded = ReadFromUser(std::addressof(user_value), addr);
}
R_UNLESS(succeeded, svc::ResultInvalidCurrentMemory());
R_UNLESS(user_value == value, svc::ResultInvalidState());
while ((it != m_tree.end()) && (count <= 0 || num_waiters < count) && (it->GetAddressArbiterKey() == addr)) {
/* End the thread's wait. */
KThread *target_thread = std::addressof(*it);
target_thread->EndWait(ResultSuccess());
MESOSPHERE_ASSERT(target_thread->IsWaitingForAddressArbiter());
target_thread->ClearAddressArbiter();
it = m_tree.erase(it);
++num_waiters;
}
}
R_SUCCEED();
}
Result KAddressArbiter::WaitIfLessThan(uintptr_t addr, s32 value, bool decrement, s64 timeout) {
/* Prepare to wait. */
KThread *cur_thread = GetCurrentThreadPointer();
KHardwareTimer *timer;
ThreadQueueImplForKAddressArbiter wait_queue(std::addressof(m_tree));
{
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
/* Check that the thread isn't terminating. */
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Read the value from userspace. */
s32 user_value;
bool succeeded;
if (decrement) {
succeeded = DecrementIfLessThan(std::addressof(user_value), addr, value);
} else {
succeeded = ReadFromUser(std::addressof(user_value), addr);
}
if (!succeeded) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidCurrentMemory());
}
/* Check that the value is less than the specified one. */
if (user_value >= value) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidState());
}
/* Check that the timeout is non-zero. */
if (timeout == 0) {
slp.CancelSleep();
R_THROW(svc::ResultTimedOut());
}
/* Set the arbiter. */
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
m_tree.insert(*cur_thread);
/* Wait for the thread to finish. */
wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
}
/* Get the wait result. */
R_RETURN(cur_thread->GetWaitResult());
}
Result KAddressArbiter::WaitIfEqual(uintptr_t addr, s32 value, s64 timeout) {
/* Prepare to wait. */
KThread *cur_thread = GetCurrentThreadPointer();
KHardwareTimer *timer;
ThreadQueueImplForKAddressArbiter wait_queue(std::addressof(m_tree));
{
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
/* Check that the thread isn't terminating. */
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Read the value from userspace. */
s32 user_value;
if (!ReadFromUser(std::addressof(user_value), addr)) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidCurrentMemory());
}
/* Check that the value is equal. */
if (value != user_value) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidState());
}
/* Check that the timeout is non-zero. */
if (timeout == 0) {
slp.CancelSleep();
R_THROW(svc::ResultTimedOut());
}
/* Set the arbiter. */
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
m_tree.insert(*cur_thread);
/* Wait for the thread to finish. */
wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
}
/* Get the wait result. */
R_RETURN(cur_thread->GetWaitResult());
}
Result KAddressArbiter::WaitIfEqual64(uintptr_t addr, s64 value, s64 timeout) {
/* Prepare to wait. */
KThread *cur_thread = GetCurrentThreadPointer();
KHardwareTimer *timer;
ThreadQueueImplForKAddressArbiter wait_queue(std::addressof(m_tree));
{
KScopedSchedulerLockAndSleep slp(std::addressof(timer), cur_thread, timeout);
/* Check that the thread isn't terminating. */
if (cur_thread->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Read the value from userspace. */
s64 user_value;
if (!ReadFromUser(std::addressof(user_value), addr)) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidCurrentMemory());
}
/* Check that the value is equal. */
if (value != user_value) {
slp.CancelSleep();
R_THROW(svc::ResultInvalidState());
}
/* Check that the timeout is non-zero. */
if (timeout == 0) {
slp.CancelSleep();
R_THROW(svc::ResultTimedOut());
}
/* Set the arbiter. */
cur_thread->SetAddressArbiter(std::addressof(m_tree), addr);
m_tree.insert(*cur_thread);
/* Wait for the thread to finish. */
wait_queue.SetHardwareTimer(timer);
cur_thread->BeginWait(std::addressof(wait_queue));
}
/* Get the wait result. */
R_RETURN(cur_thread->GetWaitResult());
}
}
| 12,510
|
C++
|
.cpp
| 273
| 32.81685
| 133
| 0.54759
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,948
|
kern_k_light_lock.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_light_lock.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class ThreadQueueImplForKLightLock final : public KThreadQueue {
public:
constexpr ThreadQueueImplForKLightLock() : KThreadQueue() { /* ... */ }
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Do nothing, waiting to acquire a light lock cannot be canceled. */
MESOSPHERE_UNUSED(waiting_thread, wait_result, cancel_timer_task);
}
};
}
bool KLightLock::LockSlowPath(uintptr_t _owner, uintptr_t _cur_thread) {
KThread *cur_thread = reinterpret_cast<KThread *>(_cur_thread);
ThreadQueueImplForKLightLock wait_queue;
/* Pend the current thread waiting on the owner thread. */
{
KScopedSchedulerLock sl;
/* Ensure we actually have locking to do. */
if (m_tag.Load<std::memory_order_relaxed>() != _owner) {
return false;
}
/* Add the current thread as a waiter on the owner. */
KThread *owner_thread = reinterpret_cast<KThread *>(_owner & ~1ul);
cur_thread->SetAddressKey(reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
owner_thread->AddWaiter(cur_thread);
/* Begin waiting to hold the lock. */
cur_thread->BeginWait(std::addressof(wait_queue));
if (owner_thread->IsSuspended()) {
owner_thread->ContinueIfHasKernelWaiters();
}
}
return true;
}
void KLightLock::UnlockSlowPath(uintptr_t _cur_thread) {
KThread *owner_thread = reinterpret_cast<KThread *>(_cur_thread);
/* Unlock. */
{
KScopedSchedulerLock sl;
/* Get the next owner. */
bool has_waiters;
KThread *next_owner = owner_thread->RemoveWaiterByKey(std::addressof(has_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_tag)));
/* Pass the lock to the next owner. */
uintptr_t next_tag = 0;
if (next_owner != nullptr) {
next_tag = reinterpret_cast<uintptr_t>(next_owner) | static_cast<uintptr_t>(has_waiters);
next_owner->EndWait(ResultSuccess());
if (next_owner->IsSuspended()) {
next_owner->ContinueIfHasKernelWaiters();
}
}
/* We may have unsuspended in the process of acquiring the lock, so we'll re-suspend now if so. */
if (owner_thread->IsSuspended()) {
owner_thread->TrySuspend();
}
/* Write the new tag value. */
m_tag.Store<std::memory_order_release>(next_tag);
}
}
}
| 3,463
|
C++
|
.cpp
| 75
| 36.013333
| 147
| 0.608554
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,949
|
kern_k_server_session.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_server_session.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#pragma GCC push_options
#pragma GCC optimize ("-O3")
namespace ams::kern {
namespace ipc {
using MessageBuffer = ams::svc::ipc::MessageBuffer;
}
namespace {
constexpr inline size_t PointerTransferBufferAlignment = 0x10;
class ThreadQueueImplForKServerSessionRequest final : public KThreadQueue { /* ... */ };
class ReceiveList {
private:
u32 m_data[ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountMax * ipc::MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32)];
s32 m_recv_list_count;
uintptr_t m_msg_buffer_end;
uintptr_t m_msg_buffer_space_end;
public:
static constexpr ALWAYS_INLINE int GetEntryCount(const ipc::MessageBuffer::MessageHeader &header) {
const auto count = header.GetReceiveListCount();
switch (count) {
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_None:
return 0;
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer:
return 0;
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer:
return 1;
default:
return count - ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset;
}
}
public:
ReceiveList(const u32 *dst_msg, uintptr_t dst_address, const KProcessPageTable &dst_page_table, const ipc::MessageBuffer::MessageHeader &dst_header, const ipc::MessageBuffer::SpecialHeader &dst_special_header, size_t msg_size, size_t out_offset, s32 dst_recv_list_idx, bool is_tls) {
m_recv_list_count = dst_header.GetReceiveListCount();
m_msg_buffer_end = dst_address + sizeof(u32) * out_offset;
m_msg_buffer_space_end = dst_address + msg_size;
/* NOTE: Nintendo calculates the receive list index here using the special header. */
/* We pre-calculate it in the caller, and pass it as a parameter. */
MESOSPHERE_UNUSED(dst_special_header);
const u32 *recv_list = dst_msg + dst_recv_list_idx;
const auto entry_count = GetEntryCount(dst_header);
if (is_tls) {
__builtin_memcpy(m_data, recv_list, entry_count * ipc::MessageBuffer::ReceiveListEntry::GetDataSize());
} else {
uintptr_t page_addr = util::AlignDown(dst_address, PageSize);
uintptr_t cur_addr = dst_address + dst_recv_list_idx * sizeof(u32);
for (size_t i = 0; i < entry_count * ipc::MessageBuffer::ReceiveListEntry::GetDataSize() / sizeof(u32); ++i) {
if (page_addr != util::AlignDown(cur_addr, PageSize)) {
KPhysicalAddress phys_addr;
dst_page_table.GetPhysicalAddress(std::addressof(phys_addr), KProcessAddress(cur_addr));
recv_list = GetPointer<u32>(KPageTable::GetHeapVirtualAddress(phys_addr));
page_addr = util::AlignDown(cur_addr, PageSize);
}
m_data[i] = *(recv_list++);
cur_addr += sizeof(u32);
}
}
}
constexpr ALWAYS_INLINE bool IsIndex() const {
return m_recv_list_count > ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset;
}
constexpr ALWAYS_INLINE bool IsToMessageBuffer() const {
return m_recv_list_count == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer;
}
void GetBuffer(uintptr_t &out, size_t size, int &key) const {
switch (m_recv_list_count) {
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_None:
{
out = 0;
}
break;
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer:
{
const uintptr_t buf = util::AlignUp(m_msg_buffer_end + key, PointerTransferBufferAlignment);
if ((buf < buf + size) && (buf + size <= m_msg_buffer_space_end)) {
out = buf;
key = buf + size - m_msg_buffer_end;
} else {
out = 0;
}
}
break;
case ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToSingleBuffer:
{
const ipc::MessageBuffer::ReceiveListEntry entry(m_data[0], m_data[1]);
const uintptr_t buf = util::AlignUp(entry.GetAddress() + key, PointerTransferBufferAlignment);
const uintptr_t entry_addr = entry.GetAddress();
const size_t entry_size = entry.GetSize();
if ((buf < buf + size) && (entry_addr < entry_addr + entry_size) && (buf + size <= entry_addr + entry_size)) {
out = buf;
key = buf + size - entry_addr;
} else {
out = 0;
}
}
break;
default:
{
if (key < m_recv_list_count - ipc::MessageBuffer::MessageHeader::ReceiveListCountType_CountOffset) {
const ipc::MessageBuffer::ReceiveListEntry entry(m_data[2 * key + 0], m_data[2 * key + 1]);
const uintptr_t entry_addr = entry.GetAddress();
const size_t entry_size = entry.GetSize();
if ((entry_addr < entry_addr + entry_size) && (entry_size >= size)) {
out = entry_addr;
}
} else {
out = 0;
}
}
break;
}
}
};
template<bool MoveHandleAllowed>
ALWAYS_INLINE Result ProcessMessageSpecialData(int &offset, KProcess &dst_process, KProcess &src_process, KThread &src_thread, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ipc::MessageBuffer::SpecialHeader &src_special_header) {
/* Copy the special header to the destination. */
offset = dst_msg.Set(src_special_header);
/* Copy the process ID. */
if (src_special_header.GetHasProcessId()) {
/* NOTE: Atmosphere extends the official kernel here to enable the mitm api. */
/* If building the kernel without this support, just set the following to false. */
constexpr bool EnableProcessIdPassthroughForAtmosphere = true;
if constexpr (EnableProcessIdPassthroughForAtmosphere) {
constexpr u64 PassthroughProcessIdMask = UINT64_C(0xFFFF000000000000);
constexpr u64 PassthroughProcessIdValue = UINT64_C(0xFFFE000000000000);
static_assert((PassthroughProcessIdMask & PassthroughProcessIdValue) == PassthroughProcessIdValue);
const u64 src_process_id_value = src_msg.GetProcessId(offset);
const bool is_passthrough = (src_process_id_value & PassthroughProcessIdMask) == PassthroughProcessIdValue;
offset = dst_msg.SetProcessId(offset, is_passthrough ? (src_process_id_value & ~PassthroughProcessIdMask) : src_process.GetId());
} else {
offset = dst_msg.SetProcessId(offset, src_process.GetId());
}
}
/* Prepare to process handles. */
auto &dst_handle_table = dst_process.GetHandleTable();
auto &src_handle_table = src_process.GetHandleTable();
Result result = ResultSuccess();
/* Process copy handles. */
for (auto i = 0; i < src_special_header.GetCopyHandleCount(); ++i) {
/* Get the handles. */
const ams::svc::Handle src_handle = src_msg.GetHandle(offset);
ams::svc::Handle dst_handle = ams::svc::InvalidHandle;
/* If we're in a success state, try to move the handle to the new table. */
if (R_SUCCEEDED(result) && src_handle != ams::svc::InvalidHandle) {
KScopedAutoObject obj = src_handle_table.GetObjectForIpc(src_handle, std::addressof(src_thread));
if (obj.IsNotNull()) {
Result add_result = dst_handle_table.Add(std::addressof(dst_handle), obj.GetPointerUnsafe());
if (R_FAILED(add_result)) {
result = add_result;
dst_handle = ams::svc::InvalidHandle;
}
} else {
result = svc::ResultInvalidHandle();
}
}
/* Set the handle. */
offset = dst_msg.SetHandle(offset, dst_handle);
}
/* Process move handles. */
if constexpr (MoveHandleAllowed) {
for (auto i = 0; i < src_special_header.GetMoveHandleCount(); ++i) {
/* Get the handles. */
const ams::svc::Handle src_handle = src_msg.GetHandle(offset);
ams::svc::Handle dst_handle = ams::svc::InvalidHandle;
/* Whether or not we've succeeded, we need to remove the handles from the source table. */
if (src_handle != ams::svc::InvalidHandle) {
if (R_SUCCEEDED(result)) {
KScopedAutoObject obj = src_handle_table.GetObjectForIpcWithoutPseudoHandle(src_handle);
if (obj.IsNotNull()) {
Result add_result = dst_handle_table.Add(std::addressof(dst_handle), obj.GetPointerUnsafe());
src_handle_table.Remove(src_handle);
if (R_FAILED(add_result)) {
result = add_result;
dst_handle = ams::svc::InvalidHandle;
}
} else {
result = svc::ResultInvalidHandle();
}
} else {
src_handle_table.Remove(src_handle);
}
}
/* Set the handle. */
offset = dst_msg.SetHandle(offset, dst_handle);
}
}
R_RETURN(result);
}
ALWAYS_INLINE Result ProcessReceiveMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) {
/* Get the offset at the start of processing. */
const int cur_offset = offset;
/* Get the pointer desc. */
ipc::MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset);
offset += ipc::MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32);
/* Extract address/size. */
const uintptr_t src_pointer = src_desc.GetAddress();
const size_t recv_size = src_desc.GetSize();
uintptr_t recv_pointer = 0;
/* Process the buffer, if it has a size. */
if (recv_size > 0) {
/* If using indexing, set index. */
if (dst_recv_list.IsIndex()) {
pointer_key = src_desc.GetIndex();
}
/* Get the buffer. */
dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key);
R_UNLESS(recv_pointer != 0, svc::ResultOutOfResource());
/* Perform the pointer data copy. */
if (dst_user) {
R_TRY(src_page_table.CopyMemoryFromHeapToHeapWithoutCheckDestination(dst_page_table, recv_pointer, recv_size,
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked,
src_pointer,
KMemoryState_FlagLinearMapped, KMemoryState_FlagLinearMapped,
KMemoryPermission_UserRead,
KMemoryAttribute_Uncached, KMemoryAttribute_None));
} else {
R_TRY(src_page_table.CopyMemoryFromLinearToUser(recv_pointer, recv_size, src_pointer,
KMemoryState_FlagLinearMapped, KMemoryState_FlagLinearMapped,
KMemoryPermission_UserRead,
KMemoryAttribute_Uncached, KMemoryAttribute_None));
}
}
/* Set the output descriptor. */
dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast<void *>(recv_pointer), recv_size, src_desc.GetIndex()));
R_SUCCEED();
}
constexpr ALWAYS_INLINE Result GetMapAliasMemoryState(KMemoryState &out, ipc::MessageBuffer::MapAliasDescriptor::Attribute attr) {
switch (attr) {
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_Ipc: out = KMemoryState_Ipc; break;
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonSecureIpc: out = KMemoryState_NonSecureIpc; break;
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonDeviceIpc: out = KMemoryState_NonDeviceIpc; break;
default: R_THROW(svc::ResultInvalidCombination());
}
R_SUCCEED();
}
constexpr ALWAYS_INLINE Result GetMapAliasTestStateAndAttributeMask(u32 &out_state, u32 &out_attr_mask, KMemoryState state) {
switch (state) {
case KMemoryState_Ipc:
out_state = KMemoryState_FlagCanUseIpc;
out_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_DeviceShared | KMemoryAttribute_Locked;
break;
case KMemoryState_NonSecureIpc:
out_state = KMemoryState_FlagCanUseNonSecureIpc;
out_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
break;
case KMemoryState_NonDeviceIpc:
out_state = KMemoryState_FlagCanUseNonDeviceIpc;
out_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
break;
default:
R_THROW(svc::ResultInvalidCombination());
}
R_SUCCEED();
}
ALWAYS_INLINE void CleanupSpecialData(KProcess &dst_process, u32 *dst_msg_ptr, size_t dst_buffer_size) {
/* Parse the message. */
const ipc::MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size);
const ipc::MessageBuffer::MessageHeader dst_header(dst_msg);
const ipc::MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header);
/* Check that the size is big enough. */
if (ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) > dst_buffer_size) {
return;
}
/* Set the special header. */
int offset = dst_msg.Set(dst_special_header);
/* Clear the process id, if needed. */
if (dst_special_header.GetHasProcessId()) {
offset = dst_msg.SetProcessId(offset, 0);
}
/* Clear handles, as relevant. */
auto &dst_handle_table = dst_process.GetHandleTable();
for (auto i = 0; i < (dst_special_header.GetCopyHandleCount() + dst_special_header.GetMoveHandleCount()); ++i) {
const ams::svc::Handle handle = dst_msg.GetHandle(offset);
if (handle != ams::svc::InvalidHandle) {
dst_handle_table.Remove(handle);
}
offset = dst_msg.SetHandle(offset, ams::svc::InvalidHandle);
}
}
ALWAYS_INLINE Result CleanupServerHandles(uintptr_t message, size_t buffer_size, KPhysicalAddress message_paddr) {
/* Server is assumed to be current thread. */
const KThread &thread = GetCurrentThread();
/* Get the linear message pointer. */
u32 *msg_ptr;
if (message) {
msg_ptr = GetPointer<u32>(KPageTable::GetHeapVirtualAddress(message_paddr));
} else {
msg_ptr = static_cast<ams::svc::ThreadLocalRegion *>(thread.GetThreadLocalRegionHeapAddress())->message_buffer;
buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer);
message = GetInteger(thread.GetThreadLocalRegionAddress());
}
/* Parse the message. */
const ipc::MessageBuffer msg(msg_ptr, buffer_size);
const ipc::MessageBuffer::MessageHeader header(msg);
const ipc::MessageBuffer::SpecialHeader special_header(msg, header);
/* Check that the size is big enough. */
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(header, special_header) <= buffer_size, svc::ResultInvalidCombination());
/* If there's a special header, there may be move handles we need to close. */
if (header.GetHasSpecialHeader()) {
/* Determine the offset to the start of handles. */
auto offset = msg.GetSpecialDataIndex(header, special_header);
if (special_header.GetHasProcessId()) {
offset += sizeof(u64) / sizeof(u32);
}
if (auto copy_count = special_header.GetCopyHandleCount(); copy_count > 0) {
offset += (sizeof(ams::svc::Handle) * copy_count) / sizeof(u32);
}
/* Get the handle table. */
auto &handle_table = thread.GetOwnerProcess()->GetHandleTable();
/* Close the handles. */
for (auto i = 0; i < special_header.GetMoveHandleCount(); ++i) {
handle_table.Remove(msg.GetHandle(offset));
offset += sizeof(ams::svc::Handle) / sizeof(u32);
}
}
R_SUCCEED();
}
ALWAYS_INLINE Result CleanupServerMap(KSessionRequest *request, KProcess *server_process) {
/* If there's no server process, there's nothing to clean up. */
R_SUCCEED_IF(server_process == nullptr);
/* Get the page table. */
auto &server_page_table = server_process->GetPageTable();
/* Cleanup Send mappings. */
for (size_t i = 0; i < request->GetSendCount(); ++i) {
R_TRY(server_page_table.CleanupForIpcServer(request->GetSendServerAddress(i), request->GetSendSize(i), request->GetSendMemoryState(i)));
}
/* Cleanup Receive mappings. */
for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
R_TRY(server_page_table.CleanupForIpcServer(request->GetReceiveServerAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i)));
}
/* Cleanup Exchange mappings. */
for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
R_TRY(server_page_table.CleanupForIpcServer(request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
}
R_SUCCEED();
}
ALWAYS_INLINE Result CleanupClientMap(KSessionRequest *request, KProcessPageTable *client_page_table) {
/* If there's no client page table, there's nothing to clean up. */
R_SUCCEED_IF(client_page_table == nullptr);
/* Cleanup Send mappings. */
for (size_t i = 0; i < request->GetSendCount(); ++i) {
R_TRY(client_page_table->CleanupForIpcClient(request->GetSendClientAddress(i), request->GetSendSize(i), request->GetSendMemoryState(i)));
}
/* Cleanup Receive mappings. */
for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
R_TRY(client_page_table->CleanupForIpcClient(request->GetReceiveClientAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i)));
}
/* Cleanup Exchange mappings. */
for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
R_TRY(client_page_table->CleanupForIpcClient(request->GetExchangeClientAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
}
R_SUCCEED();
}
ALWAYS_INLINE Result CleanupMap(KSessionRequest *request, KProcess *server_process, KProcessPageTable *client_page_table) {
/* Cleanup the server map. */
R_TRY(CleanupServerMap(request, server_process));
/* Cleanup the client map. */
R_TRY(CleanupClientMap(request, client_page_table));
R_SUCCEED();
}
ALWAYS_INLINE Result ProcessReceiveMessageMapAliasDescriptors(int &offset, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, KSessionRequest *request, KMemoryPermission perm, bool send) {
/* Get the offset at the start of processing. */
const int cur_offset = offset;
/* Get the map alias descriptor. */
ipc::MessageBuffer::MapAliasDescriptor src_desc(src_msg, cur_offset);
offset += ipc::MessageBuffer::MapAliasDescriptor::GetDataSize() / sizeof(u32);
/* Extract address/size. */
const KProcessAddress src_address = src_desc.GetAddress();
const size_t size = src_desc.GetSize();
KProcessAddress dst_address = 0;
/* Determine the result memory state. */
KMemoryState dst_state;
R_TRY(GetMapAliasMemoryState(dst_state, src_desc.GetAttribute()));
/* Process the buffer, if it has a size. */
if (size > 0) {
/* Set up the source pages for ipc. */
R_TRY(dst_page_table.SetupForIpc(std::addressof(dst_address), size, src_address, src_page_table, perm, dst_state, send));
/* Ensure that we clean up on failure. */
ON_RESULT_FAILURE {
dst_page_table.CleanupForIpcServer(dst_address, size, dst_state);
src_page_table.CleanupForIpcClient(src_address, size, dst_state);
};
/* Push the appropriate mapping. */
if (perm == KMemoryPermission_UserRead) {
R_TRY(request->PushSend(src_address, dst_address, size, dst_state));
} else if (send) {
R_TRY(request->PushExchange(src_address, dst_address, size, dst_state));
} else {
R_TRY(request->PushReceive(src_address, dst_address, size, dst_state));
}
}
/* Set the output descriptor. */
dst_msg.Set(cur_offset, ipc::MessageBuffer::MapAliasDescriptor(GetVoidPointer(dst_address), size, src_desc.GetAttribute()));
R_SUCCEED();
}
ALWAYS_INLINE Result ReceiveMessage(bool &recv_list_broken, uintptr_t dst_message_buffer, size_t dst_buffer_size, KPhysicalAddress dst_message_paddr, KThread &src_thread, uintptr_t src_message_buffer, size_t src_buffer_size, KServerSession *session, KSessionRequest *request) {
/* Prepare variables for receive. */
const KThread &dst_thread = GetCurrentThread();
KProcess &dst_process = *(dst_thread.GetOwnerProcess());
KProcess &src_process = *(src_thread.GetOwnerProcess());
auto &dst_page_table = dst_process.GetPageTable();
auto &src_page_table = src_process.GetPageTable();
/* NOTE: Session is used only for debugging, and so may go unused. */
MESOSPHERE_UNUSED(session);
/* The receive list is initially not broken. */
recv_list_broken = false;
/* Set the server process for the request. */
request->SetServerProcess(std::addressof(dst_process));
/* Determine the message buffers. */
u32 *dst_msg_ptr, *src_msg_ptr;
bool dst_user, src_user;
if (dst_message_buffer) {
dst_msg_ptr = GetPointer<u32>(KPageTable::GetHeapVirtualAddress(dst_message_paddr));
dst_user = true;
} else {
dst_msg_ptr = static_cast<ams::svc::ThreadLocalRegion *>(dst_thread.GetThreadLocalRegionHeapAddress())->message_buffer;
dst_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer);
dst_message_buffer = GetInteger(dst_thread.GetThreadLocalRegionAddress());
dst_user = false;
}
if (src_message_buffer) {
/* NOTE: Nintendo does not check the result of this GetPhysicalAddress call. */
KPhysicalAddress src_message_paddr;
src_page_table.GetPhysicalAddress(std::addressof(src_message_paddr), src_message_buffer);
src_msg_ptr = GetPointer<u32>(KPageTable::GetHeapVirtualAddress(src_message_paddr));
src_user = true;
} else {
src_msg_ptr = static_cast<ams::svc::ThreadLocalRegion *>(src_thread.GetThreadLocalRegionHeapAddress())->message_buffer;
src_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer);
src_message_buffer = GetInteger(src_thread.GetThreadLocalRegionAddress());
src_user = false;
}
/* Parse the headers. */
const ipc::MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size);
const ipc::MessageBuffer src_msg(src_msg_ptr, src_buffer_size);
const ipc::MessageBuffer::MessageHeader dst_header(dst_msg);
const ipc::MessageBuffer::MessageHeader src_header(src_msg);
const ipc::MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header);
const ipc::MessageBuffer::SpecialHeader src_special_header(src_msg, src_header);
/* Get the end of the source message. */
const size_t src_end_offset = ipc::MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount();
/* Ensure that the headers fit. */
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size, svc::ResultInvalidCombination());
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size, svc::ResultInvalidCombination());
/* Ensure the receive list offset is after the end of raw data. */
if (dst_header.GetReceiveListOffset()) {
R_UNLESS(dst_header.GetReceiveListOffset() >= ipc::MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + dst_header.GetRawCount(), svc::ResultInvalidCombination());
}
/* Ensure that the destination buffer is big enough to receive the source. */
R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), svc::ResultMessageTooLarge());
/* Get the receive list. */
const s32 dst_recv_list_idx = ipc::MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header);
ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, dst_special_header, dst_buffer_size, src_end_offset, dst_recv_list_idx, !dst_user);
/* Ensure that the source special header isn't invalid. */
const bool src_has_special_header = src_header.GetHasSpecialHeader();
if (src_has_special_header) {
/* Sending move handles from client -> server is not allowed. */
R_UNLESS(src_special_header.GetMoveHandleCount() == 0, svc::ResultInvalidCombination());
}
/* Prepare for further processing. */
int pointer_key = 0;
int offset = dst_msg.Set(src_header);
/* Set up a guard to make sure that we end up in a clean state on error. */
ON_RESULT_FAILURE {
/* Cleanup mappings. */
CleanupMap(request, std::addressof(dst_process), std::addressof(src_page_table));
/* Cleanup special data. */
if (src_header.GetHasSpecialHeader()) {
CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size);
}
/* Cleanup the header if the receive list isn't broken. */
if (!recv_list_broken) {
dst_msg.Set(dst_header);
if (dst_header.GetHasSpecialHeader()) {
dst_msg.Set(dst_special_header);
}
}
};
/* Process any special data. */
if (src_header.GetHasSpecialHeader()) {
/* After we process, make sure we track whether the receive list is broken. */
ON_SCOPE_EXIT { if (offset > dst_recv_list_idx) { recv_list_broken = true; } };
/* Process special data. */
R_TRY(ProcessMessageSpecialData<false>(offset, dst_process, src_process, src_thread, dst_msg, src_msg, src_special_header));
}
/* Process any pointer buffers. */
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
/* After we process, make sure we track whether the receive list is broken. */
ON_SCOPE_EXIT { if (offset > dst_recv_list_idx) { recv_list_broken = true; } };
R_TRY(ProcessReceiveMessagePointerDescriptors(offset, pointer_key, dst_page_table, src_page_table, dst_msg, src_msg, dst_recv_list, dst_user && dst_header.GetReceiveListCount() == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer));
}
/* Process any map alias buffers. */
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
/* After we process, make sure we track whether the receive list is broken. */
ON_SCOPE_EXIT { if (offset > dst_recv_list_idx) { recv_list_broken = true; } };
/* We process in order send, recv, exch. Buffers after send (recv/exch) are ReadWrite. */
const KMemoryPermission perm = (i >= src_header.GetSendCount()) ? KMemoryPermission_UserReadWrite : KMemoryPermission_UserRead;
/* Buffer is send if it is send or exch. */
const bool send = (i < src_header.GetSendCount()) || (i >= src_header.GetSendCount() + src_header.GetReceiveCount());
R_TRY(ProcessReceiveMessageMapAliasDescriptors(offset, dst_page_table, src_page_table, dst_msg, src_msg, request, perm, send));
}
/* Process any raw data. */
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
/* After we process, make sure we track whether the receive list is broken. */
ON_SCOPE_EXIT { if (offset + raw_count > dst_recv_list_idx) { recv_list_broken = true; } };
/* Get the offset and size. */
const size_t offset_words = offset * sizeof(u32);
const size_t raw_size = raw_count * sizeof(u32);
/* Fast case is TLS -> TLS, do raw memcpy if we can. */
if (!dst_user && !src_user) {
std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size);
} else if (dst_user) {
/* Determine how much fast size we can copy. */
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
const size_t fast_size = max_fast_size - offset_words;
/* Determine source state; if user buffer, we require heap, and otherwise only linear mapped (to enable tls use). */
const auto src_state = src_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
/* Determine the source permission. User buffer should be unmapped + read, TLS should be user readable. */
const KMemoryPermission src_perm = static_cast<KMemoryPermission>(src_user ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelRead) : KMemoryPermission_UserRead);
/* Perform the fast part of the copy. */
R_TRY(src_page_table.CopyMemoryFromLinearToKernel(reinterpret_cast<uintptr_t>(dst_msg_ptr) + offset_words, fast_size, src_message_buffer + offset_words,
src_state, src_state,
src_perm,
KMemoryAttribute_Uncached, KMemoryAttribute_None));
/* If the fast part of the copy didn't get everything, perform the slow part of the copy. */
if (fast_size < raw_size) {
R_TRY(src_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite),
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked,
src_message_buffer + max_fast_size,
src_state, src_state,
src_perm,
KMemoryAttribute_Uncached, KMemoryAttribute_None));
}
} else /* if (src_user) */ {
/* The source is a user buffer, so it should be unmapped + readable. */
constexpr KMemoryPermission SourcePermission = static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelRead);
/* Copy the memory. */
R_TRY(src_page_table.CopyMemoryFromLinearToUser(dst_message_buffer + offset_words, raw_size, src_message_buffer + offset_words,
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
SourcePermission,
KMemoryAttribute_Uncached, KMemoryAttribute_None));
}
}
/* We succeeded! */
R_SUCCEED();
}
ALWAYS_INLINE Result ProcessSendMessageReceiveMapping(KProcessPageTable &dst_page_table, KProcessAddress client_address, KProcessAddress server_address, size_t size, KMemoryState src_state) {
/* If the size is zero, there's nothing to process. */
R_SUCCEED_IF(size == 0);
/* Get the memory state and attribute mask to test. */
u32 test_state;
u32 test_attr_mask;
R_TRY(GetMapAliasTestStateAndAttributeMask(test_state, test_attr_mask, src_state));
/* Determine buffer extents. */
KProcessAddress aligned_dst_start = util::AlignDown(GetInteger(client_address), PageSize);
KProcessAddress aligned_dst_end = util::AlignUp(GetInteger(client_address) + size, PageSize);
KProcessAddress mapping_dst_start = util::AlignUp(GetInteger(client_address), PageSize);
KProcessAddress mapping_dst_end = util::AlignDown(GetInteger(client_address) + size, PageSize);
KProcessAddress mapping_src_end = util::AlignDown(GetInteger(server_address) + size, PageSize);
/* If the start of the buffer is unaligned, handle that. */
if (aligned_dst_start != mapping_dst_start) {
MESOSPHERE_ASSERT(client_address < mapping_dst_start);
const size_t copy_size = std::min<size_t>(size, mapping_dst_start - client_address);
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(client_address, copy_size,
test_state, test_state,
KMemoryPermission_UserReadWrite,
test_attr_mask, KMemoryAttribute_None,
server_address));
}
/* If the end of the buffer is unaligned, handle that. */
if (mapping_dst_end < aligned_dst_end && (aligned_dst_start == mapping_dst_start || aligned_dst_start < mapping_dst_end)) {
const size_t copy_size = client_address + size - mapping_dst_end;
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(mapping_dst_end, copy_size,
test_state, test_state,
KMemoryPermission_UserReadWrite,
test_attr_mask, KMemoryAttribute_None,
mapping_src_end));
}
R_SUCCEED();
}
ALWAYS_INLINE Result ProcessSendMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) {
/* Get the offset at the start of processing. */
const int cur_offset = offset;
/* Get the pointer desc. */
ipc::MessageBuffer::PointerDescriptor src_desc(src_msg, cur_offset);
offset += ipc::MessageBuffer::PointerDescriptor::GetDataSize() / sizeof(u32);
/* Extract address/size. */
const uintptr_t src_pointer = src_desc.GetAddress();
const size_t recv_size = src_desc.GetSize();
uintptr_t recv_pointer = 0;
/* Process the buffer, if it has a size. */
if (recv_size > 0) {
/* If using indexing, set index. */
if (dst_recv_list.IsIndex()) {
pointer_key = src_desc.GetIndex();
}
/* Get the buffer. */
dst_recv_list.GetBuffer(recv_pointer, recv_size, pointer_key);
R_UNLESS(recv_pointer != 0, svc::ResultOutOfResource());
/* Perform the pointer data copy. */
const bool dst_heap = dst_user && dst_recv_list.IsToMessageBuffer();
const auto dst_state = dst_heap ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_heap ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite) : KMemoryPermission_UserReadWrite);
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(recv_pointer, recv_size,
dst_state, dst_state,
dst_perm,
KMemoryAttribute_Uncached, KMemoryAttribute_None,
src_pointer));
}
/* Set the output descriptor. */
dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast<void *>(recv_pointer), recv_size, src_desc.GetIndex()));
R_SUCCEED();
}
ALWAYS_INLINE Result SendMessage(uintptr_t src_message_buffer, size_t src_buffer_size, KPhysicalAddress src_message_paddr, KThread &dst_thread, uintptr_t dst_message_buffer, size_t dst_buffer_size, KServerSession *session, KSessionRequest *request) {
/* Prepare variables for send. */
KThread &src_thread = GetCurrentThread();
KProcess &dst_process = *(dst_thread.GetOwnerProcess());
KProcess &src_process = *(src_thread.GetOwnerProcess());
auto &dst_page_table = dst_process.GetPageTable();
auto &src_page_table = src_process.GetPageTable();
/* NOTE: Session is used only for debugging, and so may go unused. */
MESOSPHERE_UNUSED(session);
/* NOTE: Source page table is not used, and so may go unused. */
MESOSPHERE_UNUSED(src_page_table);
/* Determine the message buffers. */
u32 *dst_msg_ptr, *src_msg_ptr;
bool dst_user, src_user;
if (dst_message_buffer) {
/* NOTE: Nintendo does not check the result of this GetPhysicalAddress call. */
KPhysicalAddress dst_message_paddr;
dst_page_table.GetPhysicalAddress(std::addressof(dst_message_paddr), dst_message_buffer);
dst_msg_ptr = GetPointer<u32>(KPageTable::GetHeapVirtualAddress(dst_message_paddr));
dst_user = true;
} else {
dst_msg_ptr = static_cast<ams::svc::ThreadLocalRegion *>(dst_thread.GetThreadLocalRegionHeapAddress())->message_buffer;
dst_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer);
dst_message_buffer = GetInteger(dst_thread.GetThreadLocalRegionAddress());
dst_user = false;
}
if (src_message_buffer) {
src_msg_ptr = GetPointer<u32>(KPageTable::GetHeapVirtualAddress(src_message_paddr));
src_user = true;
} else {
src_msg_ptr = static_cast<ams::svc::ThreadLocalRegion *>(src_thread.GetThreadLocalRegionHeapAddress())->message_buffer;
src_buffer_size = sizeof(ams::svc::ThreadLocalRegion{}.message_buffer);
src_message_buffer = GetInteger(src_thread.GetThreadLocalRegionAddress());
src_user = false;
}
/* Parse the headers. */
const ipc::MessageBuffer dst_msg(dst_msg_ptr, dst_buffer_size);
const ipc::MessageBuffer src_msg(src_msg_ptr, src_buffer_size);
const ipc::MessageBuffer::MessageHeader dst_header(dst_msg);
const ipc::MessageBuffer::MessageHeader src_header(src_msg);
const ipc::MessageBuffer::SpecialHeader dst_special_header(dst_msg, dst_header);
const ipc::MessageBuffer::SpecialHeader src_special_header(src_msg, src_header);
/* Get the end of the source message. */
const size_t src_end_offset = ipc::MessageBuffer::GetRawDataIndex(src_header, src_special_header) + src_header.GetRawCount();
/* Declare variables for processing. */
int offset = 0;
int pointer_key = 0;
bool processed_special_data = false;
/* Send the message. */
{
/* Make sure that we end up in a clean state on error. */
ON_RESULT_FAILURE {
/* Cleanup special data. */
if (processed_special_data) {
if (src_header.GetHasSpecialHeader()) {
CleanupSpecialData(dst_process, dst_msg_ptr, dst_buffer_size);
}
} else {
CleanupServerHandles(src_user ? src_message_buffer : 0, src_buffer_size, src_message_paddr);
}
/* Cleanup mappings. */
CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table));
};
/* Ensure that the headers fit. */
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(src_header, src_special_header) <= src_buffer_size, svc::ResultInvalidCombination());
R_UNLESS(ipc::MessageBuffer::GetMessageBufferSize(dst_header, dst_special_header) <= dst_buffer_size, svc::ResultInvalidCombination());
/* Ensure the receive list offset is after the end of raw data. */
if (dst_header.GetReceiveListOffset()) {
R_UNLESS(dst_header.GetReceiveListOffset() >= ipc::MessageBuffer::GetRawDataIndex(dst_header, dst_special_header) + dst_header.GetRawCount(), svc::ResultInvalidCombination());
}
/* Ensure that the destination buffer is big enough to receive the source. */
R_UNLESS(dst_buffer_size >= src_end_offset * sizeof(u32), svc::ResultMessageTooLarge());
/* Replies must have no buffers. */
R_UNLESS(src_header.GetSendCount() == 0, svc::ResultInvalidCombination());
R_UNLESS(src_header.GetReceiveCount() == 0, svc::ResultInvalidCombination());
R_UNLESS(src_header.GetExchangeCount() == 0, svc::ResultInvalidCombination());
/* Get the receive list. */
const s32 dst_recv_list_idx = ipc::MessageBuffer::GetReceiveListIndex(dst_header, dst_special_header);
ReceiveList dst_recv_list(dst_msg_ptr, dst_message_buffer, dst_page_table, dst_header, dst_special_header, dst_buffer_size, src_end_offset, dst_recv_list_idx, !dst_user);
/* Handle any receive buffers. */
for (size_t i = 0; i < request->GetReceiveCount(); ++i) {
R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetReceiveClientAddress(i), request->GetReceiveServerAddress(i), request->GetReceiveSize(i), request->GetReceiveMemoryState(i)));
}
/* Handle any exchange buffers. */
for (size_t i = 0; i < request->GetExchangeCount(); ++i) {
R_TRY(ProcessSendMessageReceiveMapping(dst_page_table, request->GetExchangeClientAddress(i), request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
}
/* Set the header. */
offset = dst_msg.Set(src_header);
/* Process any special data. */
MESOSPHERE_ASSERT(GetCurrentThreadPointer() == std::addressof(src_thread));
processed_special_data = true;
if (src_header.GetHasSpecialHeader()) {
R_TRY(ProcessMessageSpecialData<true>(offset, dst_process, src_process, src_thread, dst_msg, src_msg, src_special_header));
}
/* Process any pointer buffers. */
for (auto i = 0; i < src_header.GetPointerCount(); ++i) {
R_TRY(ProcessSendMessagePointerDescriptors(offset, pointer_key, dst_page_table, dst_msg, src_msg, dst_recv_list, dst_user && dst_header.GetReceiveListCount() == ipc::MessageBuffer::MessageHeader::ReceiveListCountType_ToMessageBuffer));
}
/* Clear any map alias buffers. */
for (auto i = 0; i < src_header.GetMapAliasCount(); ++i) {
offset = dst_msg.Set(offset, ipc::MessageBuffer::MapAliasDescriptor());
}
/* Process any raw data. */
if (const auto raw_count = src_header.GetRawCount(); raw_count != 0) {
/* Get the offset and size. */
const size_t offset_words = offset * sizeof(u32);
const size_t raw_size = raw_count * sizeof(u32);
/* Fast case is TLS -> TLS, do raw memcpy if we can. */
if (!dst_user && !src_user) {
std::memcpy(dst_msg_ptr + offset, src_msg_ptr + offset, raw_size);
} else if (src_user) {
/* Determine how much fast size we can copy. */
const size_t max_fast_size = std::min<size_t>(offset_words + raw_size, PageSize);
const size_t fast_size = max_fast_size - offset_words;
/* Determine dst state; if user buffer, we require heap, and otherwise only linear mapped (to enable tls use). */
const auto dst_state = dst_user ? KMemoryState_FlagReferenceCounted : KMemoryState_FlagLinearMapped;
/* Determine the dst permission. User buffer should be unmapped + read, TLS should be user readable. */
const KMemoryPermission dst_perm = static_cast<KMemoryPermission>(dst_user ? (KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite) : KMemoryPermission_UserReadWrite);
/* Perform the fast part of the copy. */
R_TRY(dst_page_table.CopyMemoryFromKernelToLinear(dst_message_buffer + offset_words, fast_size,
dst_state, dst_state,
dst_perm,
KMemoryAttribute_Uncached, KMemoryAttribute_None,
reinterpret_cast<uintptr_t>(src_msg_ptr) + offset_words));
/* If the fast part of the copy didn't get everything, perform the slow part of the copy. */
if (fast_size < raw_size) {
R_TRY(dst_page_table.CopyMemoryFromHeapToHeap(dst_page_table, dst_message_buffer + max_fast_size, raw_size - fast_size,
dst_state, dst_state,
dst_perm,
KMemoryAttribute_Uncached, KMemoryAttribute_None,
src_message_buffer + max_fast_size,
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelRead),
KMemoryAttribute_Uncached | KMemoryAttribute_Locked, KMemoryAttribute_Locked));
}
} else /* if (dst_user) */ {
/* The destination is a user buffer, so it should be unmapped + readable. */
constexpr KMemoryPermission DestinationPermission = static_cast<KMemoryPermission>(KMemoryPermission_NotMapped | KMemoryPermission_KernelReadWrite);
/* Copy the memory. */
R_TRY(dst_page_table.CopyMemoryFromUserToLinear(dst_message_buffer + offset_words, raw_size,
KMemoryState_FlagReferenceCounted, KMemoryState_FlagReferenceCounted,
DestinationPermission,
KMemoryAttribute_Uncached, KMemoryAttribute_None,
src_message_buffer + offset_words));
}
}
}
/* Perform (and validate) any remaining cleanup. */
R_RETURN(CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table)));
}
ALWAYS_INLINE void ReplyAsyncError(KProcess *to_process, uintptr_t to_msg_buf, size_t to_msg_buf_size, Result result) {
/* Convert the buffer to a physical address. */
KPhysicalAddress phys_addr;
to_process->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), KProcessAddress(to_msg_buf));
/* Convert the physical address to a linear pointer. */
u32 *to_msg = GetPointer<u32>(KPageTable::GetHeapVirtualAddress(phys_addr));
/* Set the error. */
ipc::MessageBuffer msg(to_msg, to_msg_buf_size);
msg.SetAsyncResult(result);
}
}
void KServerSession::Destroy() {
MESOSPHERE_ASSERT_THIS();
m_parent->OnServerClosed();
this->CleanupRequests();
m_parent->Close();
}
Result KServerSession::ReceiveRequest(uintptr_t server_message, uintptr_t server_buffer_size, KPhysicalAddress server_message_paddr) {
MESOSPHERE_ASSERT_THIS();
/* Lock the session. */
KScopedLightLock lk(m_lock);
/* Get the request and client thread. */
KSessionRequest *request;
KThread *client_thread;
{
KScopedSchedulerLock sl;
/* Ensure that we can service the request. */
R_UNLESS(!m_parent->IsClientClosed(), svc::ResultSessionClosed());
/* Ensure we aren't already servicing a request. */
R_UNLESS(m_current_request == nullptr, svc::ResultNotFound());
/* Ensure we have a request to service. */
R_UNLESS(!m_request_list.empty(), svc::ResultNotFound());
/* Pop the first request from the list. */
request = std::addressof(m_request_list.front());
m_request_list.pop_front();
/* Get the thread for the request. */
client_thread = request->GetThread();
R_UNLESS(client_thread != nullptr, svc::ResultSessionClosed());
/* Open the client thread. */
client_thread->Open();
}
ON_SCOPE_EXIT { client_thread->Close(); };
/* Set the request as our current. */
m_current_request = request;
/* Get the client address. */
uintptr_t client_message = request->GetAddress();
size_t client_buffer_size = request->GetSize();
bool recv_list_broken = false;
/* Receive the message. */
Result result = ReceiveMessage(recv_list_broken, server_message, server_buffer_size, server_message_paddr, *client_thread, client_message, client_buffer_size, this, request);
/* Handle cleanup on receive failure. */
if (R_FAILED(result)) {
/* Cache the result to return it to the client. */
const Result result_for_client = result;
/* Clear the current request. */
{
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(m_current_request == request);
m_current_request = nullptr;
if (!m_request_list.empty()) {
this->NotifyAvailable();
}
}
/* Reply to the client. */
{
/* After we reply, close our reference to the request. */
ON_SCOPE_EXIT { request->Close(); };
/* Get the event to check whether the request is async. */
if (KEvent *event = request->GetEvent(); event != nullptr) {
/* The client sent an async request. */
KProcess *client = client_thread->GetOwnerProcess();
auto &client_pt = client->GetPageTable();
/* Send the async result. */
if (R_FAILED(result_for_client)) {
ReplyAsyncError(client, client_message, client_buffer_size, result_for_client);
}
/* Unlock the client buffer. */
/* NOTE: Nintendo does not check the result of this. */
client_pt.UnlockForIpcUserBuffer(client_message, client_buffer_size);
/* Signal the event. */
event->Signal();
} else {
/* End the client thread's wait. */
KScopedSchedulerLock sl;
if (!client_thread->IsTerminationRequested()) {
client_thread->EndWait(result_for_client);
}
}
}
/* Set the server result. */
if (recv_list_broken) {
result = svc::ResultReceiveListBroken();
} else {
result = svc::ResultNotFound();
}
}
R_RETURN(result);
}
Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buffer_size, KPhysicalAddress server_message_paddr) {
MESOSPHERE_ASSERT_THIS();
/* Lock the session. */
KScopedLightLock lk(m_lock);
/* Get the request. */
KSessionRequest *request;
{
KScopedSchedulerLock sl;
/* Get the current request. */
request = m_current_request;
R_UNLESS(request != nullptr, svc::ResultInvalidState());
/* Clear the current request, since we're processing it. */
m_current_request = nullptr;
if (!m_request_list.empty()) {
this->NotifyAvailable();
}
}
/* Close reference to the request once we're done processing it. */
ON_SCOPE_EXIT { request->Close(); };
/* Extract relevant information from the request. */
const uintptr_t client_message = request->GetAddress();
const size_t client_buffer_size = request->GetSize();
KThread *client_thread = request->GetThread();
KEvent *event = request->GetEvent();
/* Check whether we're closed. */
const bool closed = (client_thread == nullptr || m_parent->IsClientClosed());
Result result;
if (!closed) {
/* If we're not closed, send the reply. */
result = SendMessage(server_message, server_buffer_size, server_message_paddr, *client_thread, client_message, client_buffer_size, this, request);
} else {
/* Otherwise, we'll need to do some cleanup. */
KProcess *server_process = request->GetServerProcess();
KProcess *client_process = (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr;
KProcessPageTable *client_page_table = (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr;
/* Cleanup server handles. */
result = CleanupServerHandles(server_message, server_buffer_size, server_message_paddr);
/* Cleanup mappings. */
Result cleanup_map_result = CleanupMap(request, server_process, client_page_table);
/* If we successfully cleaned up handles, use the map cleanup result as our result. */
if (R_SUCCEEDED(result)) {
result = cleanup_map_result;
}
}
/* Select a result for the client. */
Result client_result = result;
if (closed && R_SUCCEEDED(result)) {
result = svc::ResultSessionClosed();
client_result = svc::ResultSessionClosed();
} else {
result = ResultSuccess();
}
/* If there's a client thread, update it. */
if (client_thread != nullptr) {
if (event != nullptr) {
/* Get the client process/page table. */
KProcess *client_process = client_thread->GetOwnerProcess();
KProcessPageTable *client_page_table = std::addressof(client_process->GetPageTable());
/* If we need to, reply with an async error. */
if (R_FAILED(client_result)) {
ReplyAsyncError(client_process, client_message, client_buffer_size, client_result);
}
/* Unlock the client buffer. */
/* NOTE: Nintendo does not check the result of this. */
client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
/* Signal the event. */
event->Signal();
} else {
/* End the client thread's wait. */
KScopedSchedulerLock sl;
if (!client_thread->IsTerminationRequested()) {
client_thread->EndWait(client_result);
}
}
}
R_RETURN(result);
}
Result KServerSession::OnRequest(KSessionRequest *request) {
MESOSPHERE_ASSERT_THIS();
/* Create the wait queue. */
ThreadQueueImplForKServerSessionRequest wait_queue;
/* Handle the request. */
{
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Ensure that we can handle new requests. */
R_UNLESS(!m_parent->IsServerClosed(), svc::ResultSessionClosed());
/* Check that we're not terminating. */
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
/* Get whether we're empty. */
const bool was_empty = m_request_list.empty();
/* Add the request to the list. */
request->Open();
m_request_list.push_back(*request);
/* If we were empty, signal. */
if (was_empty) {
this->NotifyAvailable();
}
/* If we have a request, this is asynchronous, and we don't need to wait. */
R_SUCCEED_IF(request->GetEvent() != nullptr);
/* This is a synchronous request, so we should wait for our request to complete. */
GetCurrentThread().BeginWait(std::addressof(wait_queue));
}
R_RETURN(GetCurrentThread().GetWaitResult());
}
bool KServerSession::IsSignaledImpl() const {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* If the client is closed, we're always signaled. */
if (m_parent->IsClientClosed()) {
return true;
}
/* Otherwise, we're signaled if we have a request and aren't handling one. */
return !m_request_list.empty() && m_current_request == nullptr;
}
bool KServerSession::IsSignaled() const {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
return this->IsSignaledImpl();
}
void KServerSession::CleanupRequests() {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
/* Clean up any pending requests. */
while (true) {
/* Get the next request. */
KSessionRequest *request = nullptr;
{
KScopedSchedulerLock sl;
if (m_current_request) {
/* Choose the current request if we have one. */
request = m_current_request;
m_current_request = nullptr;
} else if (!m_request_list.empty()) {
/* Pop the request from the front of the list. */
request = std::addressof(m_request_list.front());
m_request_list.pop_front();
}
}
/* If there's no request, we're done. */
if (request == nullptr) {
break;
}
/* Close a reference to the request once it's cleaned up. */
ON_SCOPE_EXIT { request->Close(); };
/* Extract relevant information from the request. */
const uintptr_t client_message = request->GetAddress();
const size_t client_buffer_size = request->GetSize();
KThread *client_thread = request->GetThread();
KEvent *event = request->GetEvent();
KProcess *server_process = request->GetServerProcess();
KProcess *client_process = (client_thread != nullptr) ? client_thread->GetOwnerProcess() : nullptr;
KProcessPageTable *client_page_table = (client_process != nullptr) ? std::addressof(client_process->GetPageTable()) : nullptr;
/* Cleanup the mappings. */
Result result = CleanupMap(request, server_process, client_page_table);
/* If there's a client thread, update it. */
if (client_thread != nullptr) {
if (event != nullptr) {
/* We need to reply async. */
ReplyAsyncError(client_process, client_message, client_buffer_size, (R_SUCCEEDED(result) ? svc::ResultSessionClosed() : result));
/* Unlock the client buffer. */
/* NOTE: Nintendo does not check the result of this. */
client_page_table->UnlockForIpcUserBuffer(client_message, client_buffer_size);
/* Signal the event. */
event->Signal();
} else {
/* End the client thread's wait. */
KScopedSchedulerLock sl;
if (!client_thread->IsTerminationRequested()) {
client_thread->EndWait(R_SUCCEEDED(result) ? svc::ResultSessionClosed() : result);
}
}
}
}
}
void KServerSession::OnClientClosed() {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
/* Handle any pending requests. */
KSessionRequest *prev_request = nullptr;
while (true) {
/* Declare variables for processing the request. */
KSessionRequest *request = nullptr;
KEvent *event = nullptr;
KThread *thread = nullptr;
bool cur_request = false;
bool terminate = false;
/* Get the next request. */
{
KScopedSchedulerLock sl;
if (m_current_request != nullptr && m_current_request != prev_request) {
/* Set the request, open a reference as we process it. */
request = m_current_request;
request->Open();
cur_request = true;
/* Get thread and event for the request. */
thread = request->GetThread();
event = request->GetEvent();
/* If the thread is terminating, handle that. */
if (thread->IsTerminationRequested()) {
request->ClearThread();
request->ClearEvent();
terminate = true;
}
prev_request = request;
} else if (!m_request_list.empty()) {
/* Pop the request from the front of the list. */
request = std::addressof(m_request_list.front());
m_request_list.pop_front();
/* Get thread and event for the request. */
thread = request->GetThread();
event = request->GetEvent();
}
}
/* If there are no requests, we're done. */
if (request == nullptr) {
break;
}
/* All requests must have threads. */
MESOSPHERE_ASSERT(thread != nullptr);
/* Ensure that we close the request when done. */
ON_SCOPE_EXIT { request->Close(); };
/* If we're terminating, close a reference to the thread and event. */
if (terminate) {
thread->Close();
if (event != nullptr) {
event->Close();
}
}
/* If we need to, reply. */
if (event != nullptr && !cur_request) {
/* There must be no mappings. */
MESOSPHERE_ASSERT(request->GetSendCount() == 0);
MESOSPHERE_ASSERT(request->GetReceiveCount() == 0);
MESOSPHERE_ASSERT(request->GetExchangeCount() == 0);
/* Get the process and page table. */
KProcess *client_process = thread->GetOwnerProcess();
auto &client_pt = client_process->GetPageTable();
/* Reply to the request. */
ReplyAsyncError(client_process, request->GetAddress(), request->GetSize(), svc::ResultSessionClosed());
/* Unlock the buffer. */
/* NOTE: Nintendo does not check the result of this. */
client_pt.UnlockForIpcUserBuffer(request->GetAddress(), request->GetSize());
/* Signal the event. */
event->Signal();
}
}
/* Notify. */
this->NotifyAvailable(svc::ResultSessionClosed());
}
void KServerSession::Dump() {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
{
KScopedSchedulerLock sl;
MESOSPHERE_RELEASE_LOG("Dump Session %p\n", this);
/* Dump current request. */
bool has_request = false;
if (m_current_request != nullptr) {
KThread *thread = m_current_request->GetThread();
const s32 thread_id = thread != nullptr ? static_cast<s32>(thread->GetId()) : -1;
MESOSPHERE_RELEASE_LOG(" CurrentReq %p Thread=%p ID=%d\n", m_current_request, thread, thread_id);
has_request = true;
}
/* Dump all rqeuests in list. */
for (auto it = m_request_list.begin(); it != m_request_list.end(); ++it) {
KThread *thread = it->GetThread();
const s32 thread_id = thread != nullptr ? static_cast<s32>(thread->GetId()) : -1;
MESOSPHERE_RELEASE_LOG(" Req %p Thread=%p ID=%d\n", m_current_request, thread, thread_id);
has_request = true;
}
/* If we didn't have any requests, print so. */
if (!has_request) {
MESOSPHERE_RELEASE_LOG(" None\n");
}
}
}
}
#pragma GCC pop_options
| 73,763
|
C++
|
.cpp
| 1,159
| 44.946506
| 299
| 0.544102
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,950
|
kern_k_dump_object.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_dump_object.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::KDumpObject {
namespace {
constexpr const char * const ThreadStates[] = {
[KThread::ThreadState_Initialized] = "Initialized",
[KThread::ThreadState_Waiting] = "Waiting",
[KThread::ThreadState_Runnable] = "Runnable",
[KThread::ThreadState_Terminated] = "Terminated",
};
void DumpThread(KThread *thread) {
if (KProcess *process = thread->GetOwnerProcess(); process != nullptr) {
MESOSPHERE_RELEASE_LOG("Thread ID=%5lu pid=%3lu %-11s Pri=%2d %-11s KernelStack=%4zu/%4zu Run=%d Ideal=%d (%d) Affinity=%016lx (%016lx)\n",
thread->GetId(), process->GetId(), process->GetName(), thread->GetPriority(), ThreadStates[thread->GetState()],
thread->GetKernelStackUsage(), PageSize, thread->GetActiveCore(), thread->GetIdealVirtualCore(), thread->GetIdealPhysicalCore(),
thread->GetVirtualAffinityMask(), thread->GetAffinityMask().GetAffinityMask());
MESOSPHERE_RELEASE_LOG(" State: 0x%04x Suspend: 0x%04x Dpc: 0x%x\n", thread->GetRawState(), thread->GetSuspendFlags(), thread->GetDpc());
MESOSPHERE_RELEASE_LOG(" TLS: %p (%p)\n", GetVoidPointer(thread->GetThreadLocalRegionAddress()), thread->GetThreadLocalRegionHeapAddress());
} else {
MESOSPHERE_RELEASE_LOG("Thread ID=%5lu pid=%3d %-11s Pri=%2d %-11s KernelStack=%4zu/%4zu Run=%d Ideal=%d (%d) Affinity=%016lx (%016lx)\n",
thread->GetId(), -1, "(kernel)", thread->GetPriority(), ThreadStates[thread->GetState()],
thread->GetKernelStackUsage(), PageSize, thread->GetActiveCore(), thread->GetIdealVirtualCore(), thread->GetIdealPhysicalCore(),
thread->GetVirtualAffinityMask(), thread->GetAffinityMask().GetAffinityMask());
}
}
void DumpThreadCallStack(KThread *thread) {
if (KProcess *process = thread->GetOwnerProcess(); process != nullptr) {
MESOSPHERE_RELEASE_LOG("Thread ID=%5lu pid=%3lu %-11s Pri=%2d %-11s KernelStack=%4zu/%4zu\n",
thread->GetId(), process->GetId(), process->GetName(), thread->GetPriority(), ThreadStates[thread->GetState()], thread->GetKernelStackUsage(), PageSize);
KDebug::PrintRegister(thread);
KDebug::PrintBacktrace(thread);
} else {
MESOSPHERE_RELEASE_LOG("Thread ID=%5lu pid=%3d %-11s Pri=%2d %-11s KernelStack=%4zu/%4zu\n",
thread->GetId(), -1, "(kernel)", thread->GetPriority(), ThreadStates[thread->GetState()], thread->GetKernelStackUsage(), PageSize);
}
}
void DumpHandle(const KProcess::ListAccessor &accessor, KProcess *process) {
MESOSPHERE_RELEASE_LOG("Process ID=%lu (%s)\n", process->GetId(), process->GetName());
const auto end = accessor.end();
const auto &handle_table = process->GetHandleTable();
const size_t max_handles = handle_table.GetTableSize();
for (size_t i = 0; i < max_handles; ++i) {
/* Get the object + handle. */
ams::svc::Handle handle = ams::svc::InvalidHandle;
KScopedAutoObject obj = handle_table.GetObjectByIndex(std::addressof(handle), i);
if (obj.IsNotNull()) {
if (auto *target = obj->DynamicCast<KServerSession *>(); target != nullptr) {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s Client=%p\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), std::addressof(target->GetParent()->GetClientSession()));
target->Dump();
} else if (auto *target = obj->DynamicCast<KClientSession *>(); target != nullptr) {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s Server=%p\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), std::addressof(target->GetParent()->GetServerSession()));
} else if (auto *target = obj->DynamicCast<KThread *>(); target != nullptr) {
KProcess *target_owner = target->GetOwnerProcess();
const s32 owner_pid = target_owner != nullptr ? static_cast<s32>(target_owner->GetId()) : -1;
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s ID=%d PID=%d\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), static_cast<s32>(target->GetId()), owner_pid);
} else if (auto *target = obj->DynamicCast<KProcess *>(); target != nullptr) {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s ID=%d\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), static_cast<s32>(target->GetId()));
} else if (auto *target = obj->DynamicCast<KSharedMemory *>(); target != nullptr) {
/* Find the owner. */
KProcess *target_owner = nullptr;
for (auto it = accessor.begin(); it != end; ++it) {
if (static_cast<KProcess *>(std::addressof(*it))->GetId() == target->GetOwnerProcessId()) {
target_owner = static_cast<KProcess *>(std::addressof(*it));
break;
}
}
MESOSPHERE_ASSERT(target_owner != nullptr);
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s Size=%zu KB OwnerPID=%d (%s)\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), target->GetSize() / 1_KB, static_cast<s32>(target_owner->GetId()), target_owner->GetName());
} else if (auto *target = obj->DynamicCast<KTransferMemory *>(); target != nullptr) {
KProcess *target_owner = target->GetOwner();
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s OwnerPID=%d (%s) OwnerAddress=%lx Size=%zu KB\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), static_cast<s32>(target_owner->GetId()), target_owner->GetName(), GetInteger(target->GetSourceAddress()), target->GetSize() / 1_KB);
} else if (auto *target = obj->DynamicCast<KCodeMemory *>(); target != nullptr) {
KProcess *target_owner = target->GetOwner();
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s OwnerPID=%d (%s) OwnerAddress=%lx Size=%zu KB\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), static_cast<s32>(target_owner->GetId()), target_owner->GetName(), GetInteger(target->GetSourceAddress()), target->GetSize() / 1_KB);
} else if (auto *target = obj->DynamicCast<KInterruptEvent *>(); target != nullptr) {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s irq=%d\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), target->GetInterruptId());
} else if (auto *target = obj->DynamicCast<KEvent *>(); target != nullptr) {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName());
} else if (auto *target = obj->DynamicCast<KReadableEvent *>(); target != nullptr) {
if (KEvent *event = target->GetParent(); event != nullptr) {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s Parent=%p\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName(), event);
} else {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName());
}
} else {
MESOSPHERE_RELEASE_LOG("Handle %08x Obj=%p Ref=%d Type=%s\n", handle, obj.GetPointerUnsafe(), obj->GetReferenceCount() - 1, obj->GetTypeName());
}
if (auto *sync = obj->DynamicCast<KSynchronizationObject *>(); sync != nullptr) {
sync->DumpWaiters();
}
}
}
MESOSPHERE_RELEASE_LOG("%zu(max %zu)/%zu used.\n", handle_table.GetCount(), max_handles, handle_table.GetTableSize());
MESOSPHERE_RELEASE_LOG("\n\n");
}
void DumpMemory(KProcess *process) {
const auto process_id = process->GetId();
MESOSPHERE_RELEASE_LOG("Process ID=%3lu (%s)\n", process_id, process->GetName());
/* Dump the memory blocks. */
process->GetPageTable().DumpMemoryBlocks();
/* Collect information about memory totals. */
const size_t code = process->GetPageTable().GetCodeSize();
const size_t code_data = process->GetPageTable().GetCodeDataSize();
const size_t alias_code = process->GetPageTable().GetAliasCodeSize();
const size_t alias_code_data = process->GetPageTable().GetAliasCodeDataSize();
const size_t normal = process->GetPageTable().GetNormalMemorySize();
const size_t main_stack = process->GetMainStackSize();
size_t shared = 0;
{
KSharedMemory::ListAccessor accessor;
const auto end = accessor.end();
for (auto it = accessor.begin(); it != end; ++it) {
KSharedMemory *shared_mem = static_cast<KSharedMemory *>(std::addressof(*it));
if (shared_mem->GetOwnerProcessId() == process_id) {
shared += shared_mem->GetSize();
}
}
}
/* Dump the totals. */
MESOSPHERE_RELEASE_LOG("---\n");
MESOSPHERE_RELEASE_LOG("Code %8zu KB\n", code / 1_KB);
MESOSPHERE_RELEASE_LOG("CodeData %8zu KB\n", code_data / 1_KB);
MESOSPHERE_RELEASE_LOG("AliasCode %8zu KB\n", alias_code / 1_KB);
MESOSPHERE_RELEASE_LOG("AliasCodeData %8zu KB\n", alias_code_data / 1_KB);
MESOSPHERE_RELEASE_LOG("Heap %8zu KB\n", normal / 1_KB);
MESOSPHERE_RELEASE_LOG("SharedMemory %8zu KB\n", shared / 1_KB);
MESOSPHERE_RELEASE_LOG("InitialStack %8zu KB\n", main_stack / 1_KB);
MESOSPHERE_RELEASE_LOG("---\n");
MESOSPHERE_RELEASE_LOG("TOTAL %8zu KB\n", (code + code_data + alias_code + alias_code_data + normal + main_stack + shared) / 1_KB);
MESOSPHERE_RELEASE_LOG("\n\n");
}
void DumpPageTable(KProcess *process) {
MESOSPHERE_RELEASE_LOG("Process ID=%3lu (%s)\n", process->GetId(), process->GetName());
process->GetPageTable().DumpPageTable();
MESOSPHERE_RELEASE_LOG("\n\n");
}
void DumpProcess(KProcess *process) {
MESOSPHERE_RELEASE_LOG("Process ID=%3lu index=%3zu State=%d (%s)\n", process->GetId(), process->GetSlabIndex(), process->GetState(), process->GetName());
}
void DumpPort(const KProcess::ListAccessor &accessor, KProcess *process) {
MESOSPHERE_RELEASE_LOG("Dump Port Process ID=%lu (%s)\n", process->GetId(), process->GetName());
const auto end = accessor.end();
const auto &handle_table = process->GetHandleTable();
const size_t max_handles = handle_table.GetTableSize();
for (size_t i = 0; i < max_handles; ++i) {
/* Get the object + handle. */
ams::svc::Handle handle = ams::svc::InvalidHandle;
KScopedAutoObject obj = handle_table.GetObjectByIndex(std::addressof(handle), i);
if (obj.IsNull()) {
continue;
}
/* Process the object as a port. */
if (auto *server = obj->DynamicCast<KServerPort *>(); server != nullptr) {
const KClientPort *client = std::addressof(server->GetParent()->GetClientPort());
const uintptr_t port_name = server->GetParent()->GetName();
/* Get the port name. */
char name[9] = {};
{
/* Find the client port process. */
KProcess *client_port_process = nullptr;
ON_SCOPE_EXIT { if (client_port_process != nullptr) { client_port_process->Close(); } };
{
for (auto it = accessor.begin(); it != end && client_port_process == nullptr; ++it) {
KProcess *cur = static_cast<KProcess *>(std::addressof(*it));
for (size_t j = 0; j < cur->GetHandleTable().GetTableSize(); ++j) {
ams::svc::Handle cur_h = ams::svc::InvalidHandle;
KScopedAutoObject cur_o = cur->GetHandleTable().GetObjectByIndex(std::addressof(cur_h), j);
if (cur_o.IsNotNull()) {
if (cur_o.GetPointerUnsafe() == client) {
client_port_process = cur;
client_port_process->Open();
break;
}
}
}
}
}
/* Read the port name. */
if (client_port_process != nullptr) {
if (R_FAILED(client_port_process->GetPageTable().CopyMemoryFromLinearToKernel(KProcessAddress(name), 8, port_name, KMemoryState_None, KMemoryState_None, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
std::memset(name, 0, sizeof(name));
}
for (size_t i = 0; i < 8 && name[i] != 0; i++) {
if (name[i] > 0x7F) {
std::memset(name, 0, sizeof(name));
break;
}
}
}
}
MESOSPHERE_RELEASE_LOG("%-9s: Handle %08x Obj=%p Cur=%3d Peak=%3d Max=%3d\n", name, handle, obj.GetPointerUnsafe(), client->GetNumSessions(), client->GetPeakSessions(), client->GetMaxSessions());
/* Identify any sessions. */
{
for (auto it = accessor.begin(); it != end; ++it) {
KProcess *cur = static_cast<KProcess *>(std::addressof(*it));
for (size_t j = 0; j < cur->GetHandleTable().GetTableSize(); ++j) {
ams::svc::Handle cur_h = ams::svc::InvalidHandle;
KScopedAutoObject cur_o = cur->GetHandleTable().GetObjectByIndex(std::addressof(cur_h), j);
if (cur_o.IsNull()) {
continue;
}
if (auto *session = cur_o->DynamicCast<KClientSession *>(); session != nullptr && session->GetParent()->GetParent() == client) {
MESOSPHERE_RELEASE_LOG(" Client %p Server %p %-12s: PID=%3lu\n", session, std::addressof(session->GetParent()->GetServerSession()), cur->GetName(), cur->GetId());
}
}
}
}
}
}
}
ALWAYS_INLINE s64 GetTickOrdered() {
__asm__ __volatile__("" ::: "memory");
const s64 tick = KHardwareTimer::GetTick();
__asm__ __volatile__("" ::: "memory");
return tick;
}
}
void DumpThread() {
MESOSPHERE_RELEASE_LOG("Dump Thread\n");
{
/* Lock the list. */
KThread::ListAccessor accessor;
const auto end = accessor.end();
/* Dump each thread. */
for (auto it = accessor.begin(); it != end; ++it) {
DumpThread(static_cast<KThread *>(std::addressof(*it)));
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpThread(u64 thread_id) {
MESOSPHERE_RELEASE_LOG("Dump Thread\n");
{
/* Find and dump the target thread. */
if (KThread *thread = KThread::GetThreadFromId(thread_id); thread != nullptr) {
ON_SCOPE_EXIT { thread->Close(); };
DumpThread(thread);
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpThreadCallStack() {
MESOSPHERE_RELEASE_LOG("Dump Thread\n");
{
/* Lock the list. */
KThread::ListAccessor accessor;
const auto end = accessor.end();
/* Dump each thread. */
for (auto it = accessor.begin(); it != end; ++it) {
DumpThreadCallStack(static_cast<KThread *>(std::addressof(*it)));
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpThreadCallStack(u64 thread_id) {
MESOSPHERE_RELEASE_LOG("Dump Thread\n");
{
/* Find and dump the target thread. */
if (KThread *thread = KThread::GetThreadFromId(thread_id); thread != nullptr) {
ON_SCOPE_EXIT { thread->Close(); };
DumpThreadCallStack(thread);
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpKernelObject() {
MESOSPHERE_LOG("Dump Kernel Object\n");
{
/* Static slab heaps. */
{
#define DUMP_KSLABOBJ(__OBJECT__) \
MESOSPHERE_RELEASE_LOG(#__OBJECT__ "\n"); \
MESOSPHERE_RELEASE_LOG(" Cur=%3zu Peak=%3zu Max=%3zu\n", __OBJECT__::GetSlabHeapSize() - __OBJECT__::GetNumRemaining(), __OBJECT__::GetPeakIndex(), __OBJECT__::GetSlabHeapSize())
DUMP_KSLABOBJ(KEvent);
DUMP_KSLABOBJ(KInterruptEvent);
DUMP_KSLABOBJ(KProcess);
DUMP_KSLABOBJ(KThread);
DUMP_KSLABOBJ(KPort);
DUMP_KSLABOBJ(KSharedMemory);
DUMP_KSLABOBJ(KTransferMemory);
DUMP_KSLABOBJ(KDeviceAddressSpace);
DUMP_KSLABOBJ(KDebug);
DUMP_KSLABOBJ(KSession);
DUMP_KSLABOBJ(KLightSession);
DUMP_KSLABOBJ(KThreadLocalPage);
DUMP_KSLABOBJ(KObjectName);
DUMP_KSLABOBJ(KEventInfo);
DUMP_KSLABOBJ(KSessionRequest);
DUMP_KSLABOBJ(KResourceLimit);
DUMP_KSLABOBJ(KIoPool);
DUMP_KSLABOBJ(KIoRegion);
#undef DUMP_KSLABOBJ
}
MESOSPHERE_RELEASE_LOG("\n");
/* Dynamic slab heaps. */
{
/* Memory block slabs. */
{
MESOSPHERE_RELEASE_LOG("App Memory Block\n");
auto &app = Kernel::GetApplicationSystemResource().GetMemoryBlockSlabManager();
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", app.GetUsed(), app.GetPeak(), app.GetCount());
MESOSPHERE_RELEASE_LOG("Sys Memory Block\n");
auto &sys = Kernel::GetSystemSystemResource().GetMemoryBlockSlabManager();
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", sys.GetUsed(), sys.GetPeak(), sys.GetCount());
}
/* KBlockInfo slab. */
{
MESOSPHERE_RELEASE_LOG("KBlockInfo\n");
auto &manager = Kernel::GetSystemSystemResource().GetBlockInfoManager();
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
}
/* Page Table slab. */
{
MESOSPHERE_RELEASE_LOG("Page Table\n");
auto &manager = Kernel::GetSystemSystemResource().GetPageTableManager();
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", manager.GetUsed(), manager.GetPeak(), manager.GetCount());
}
}
MESOSPHERE_RELEASE_LOG("\n");
/* Process resources. */
{
KProcess::ListAccessor accessor;
size_t process_pts = 0;
const auto end = accessor.end();
for (auto it = accessor.begin(); it != end; ++it) {
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
/* Count the number of threads. */
int threads = 0;
{
KThread::ListAccessor thr_accessor;
const auto thr_end = thr_accessor.end();
for (auto thr_it = thr_accessor.begin(); thr_it != thr_end; ++thr_it) {
KThread *thread = static_cast<KThread *>(std::addressof(*thr_it));
if (thread->GetOwnerProcess() == process) {
++threads;
}
}
}
/* Count the number of events. */
int events = 0;
{
KEvent::ListAccessor ev_accessor;
const auto ev_end = ev_accessor.end();
for (auto ev_it = ev_accessor.begin(); ev_it != ev_end; ++ev_it) {
KEvent *event = static_cast<KEvent *>(std::addressof(*ev_it));
if (event->GetOwner() == process) {
++events;
}
}
}
size_t pts = process->GetPageTable().CountPageTables();
process_pts += pts;
MESOSPHERE_RELEASE_LOG("%-12s: PID=%3lu Thread %4d / Event %4d / PageTable %5zu\n", process->GetName(), process->GetId(), threads, events, pts);
if (const auto &system_resource = process->GetSystemResource(); system_resource.IsSecureResource()) {
const auto &secure_resource = static_cast<const KSecureSystemResource &>(system_resource);
MESOSPHERE_RELEASE_LOG(" System Resource\n");
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", secure_resource.GetDynamicPageManager().GetUsed(), secure_resource.GetDynamicPageManager().GetPeak(), secure_resource.GetDynamicPageManager().GetCount());
MESOSPHERE_RELEASE_LOG(" Memory Block\n");
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", secure_resource.GetMemoryBlockSlabManager().GetUsed(), secure_resource.GetMemoryBlockSlabManager().GetPeak(), secure_resource.GetMemoryBlockSlabManager().GetCount());
MESOSPHERE_RELEASE_LOG(" Page Table\n");
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", secure_resource.GetPageTableManager().GetUsed(), secure_resource.GetPageTableManager().GetPeak(), secure_resource.GetPageTableManager().GetCount());
MESOSPHERE_RELEASE_LOG(" Block Info\n");
MESOSPHERE_RELEASE_LOG(" Cur=%6zu Peak=%6zu Max=%6zu\n", secure_resource.GetBlockInfoManager().GetUsed(), secure_resource.GetBlockInfoManager().GetPeak(), secure_resource.GetBlockInfoManager().GetCount());
}
}
MESOSPHERE_RELEASE_LOG("Process Page Table %zu\n", process_pts);
MESOSPHERE_RELEASE_LOG("Kernel Page Table %zu\n", Kernel::GetKernelPageTable().CountPageTables());
}
MESOSPHERE_RELEASE_LOG("\n");
/* Resource limits. */
{
auto &sys_rl = Kernel::GetSystemResourceLimit();
u64 cur = sys_rl.GetCurrentValue(ams::svc::LimitableResource_PhysicalMemoryMax);
u64 lim = sys_rl.GetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax);
MESOSPHERE_RELEASE_LOG("System ResourceLimit PhysicalMemory 0x%01x_%08x / 0x%01x_%08x\n", static_cast<u32>(cur >> 32), static_cast<u32>(cur), static_cast<u32>(lim >> 32), static_cast<u32>(lim));
cur = sys_rl.GetCurrentValue(ams::svc::LimitableResource_ThreadCountMax);
lim = sys_rl.GetLimitValue(ams::svc::LimitableResource_ThreadCountMax);
MESOSPHERE_RELEASE_LOG("System ResourceLimit Thread %4lu / %4lu\n", cur, lim);
cur = sys_rl.GetCurrentValue(ams::svc::LimitableResource_EventCountMax);
lim = sys_rl.GetLimitValue(ams::svc::LimitableResource_EventCountMax);
MESOSPHERE_RELEASE_LOG("System ResourceLimit Event %4lu / %4lu\n", cur, lim);
cur = sys_rl.GetCurrentValue(ams::svc::LimitableResource_TransferMemoryCountMax);
lim = sys_rl.GetLimitValue(ams::svc::LimitableResource_TransferMemoryCountMax);
MESOSPHERE_RELEASE_LOG("System ResourceLimit TransferMemory %4lu / %4lu\n", cur, lim);
cur = sys_rl.GetCurrentValue(ams::svc::LimitableResource_SessionCountMax);
lim = sys_rl.GetLimitValue(ams::svc::LimitableResource_SessionCountMax);
MESOSPHERE_RELEASE_LOG("System ResourceLimit Session %4lu / %4lu\n", cur, lim);
{
KResourceLimit::ListAccessor accessor;
const auto end = accessor.end();
for (auto it = accessor.begin(); it != end; ++it) {
KResourceLimit *rl = static_cast<KResourceLimit *>(std::addressof(*it));
cur = rl->GetCurrentValue(ams::svc::LimitableResource_PhysicalMemoryMax);
lim = rl->GetLimitValue(ams::svc::LimitableResource_PhysicalMemoryMax);
MESOSPHERE_RELEASE_LOG("ResourceLimit %zu PhysicalMemory 0x%01x_%08x / 0x%01x_%08x\n", rl->GetSlabIndex(), static_cast<u32>(cur >> 32), static_cast<u32>(cur), static_cast<u32>(lim >> 32), static_cast<u32>(lim));
}
}
}
MESOSPHERE_RELEASE_LOG("\n");
/* Memory Manager. */
{
auto &mm = Kernel::GetMemoryManager();
u64 max = mm.GetSize();
u64 cur = max - mm.GetFreeSize();
MESOSPHERE_RELEASE_LOG("Kernel Heap Size 0x%01x_%08x / 0x%01x_%08x\n", static_cast<u32>(cur >> 32), static_cast<u32>(cur), static_cast<u32>(max >> 32), static_cast<u32>(max));
MESOSPHERE_RELEASE_LOG("\n");
max = mm.GetSize(KMemoryManager::Pool_Application);
cur = max - mm.GetFreeSize(KMemoryManager::Pool_Application);
MESOSPHERE_RELEASE_LOG("Application 0x%01x_%08x / 0x%01x_%08x\n", static_cast<u32>(cur >> 32), static_cast<u32>(cur), static_cast<u32>(max >> 32), static_cast<u32>(max));
mm.DumpFreeList(KMemoryManager::Pool_Application);
MESOSPHERE_RELEASE_LOG("\n");
max = mm.GetSize(KMemoryManager::Pool_Applet);
cur = max - mm.GetFreeSize(KMemoryManager::Pool_Applet);
MESOSPHERE_RELEASE_LOG("Applet 0x%01x_%08x / 0x%01x_%08x\n", static_cast<u32>(cur >> 32), static_cast<u32>(cur), static_cast<u32>(max >> 32), static_cast<u32>(max));
mm.DumpFreeList(KMemoryManager::Pool_Applet);
MESOSPHERE_RELEASE_LOG("\n");
max = mm.GetSize(KMemoryManager::Pool_System);
cur = max - mm.GetFreeSize(KMemoryManager::Pool_System);
MESOSPHERE_RELEASE_LOG("System 0x%01x_%08x / 0x%01x_%08x\n", static_cast<u32>(cur >> 32), static_cast<u32>(cur), static_cast<u32>(max >> 32), static_cast<u32>(max));
mm.DumpFreeList(KMemoryManager::Pool_System);
MESOSPHERE_RELEASE_LOG("\n");
max = mm.GetSize(KMemoryManager::Pool_SystemNonSecure);
cur = max - mm.GetFreeSize(KMemoryManager::Pool_SystemNonSecure);
MESOSPHERE_RELEASE_LOG("SystemNonSecure 0x%01x_%08x / 0x%01x_%08x\n", static_cast<u32>(cur >> 32), static_cast<u32>(cur), static_cast<u32>(max >> 32), static_cast<u32>(max));
mm.DumpFreeList(KMemoryManager::Pool_SystemNonSecure);
MESOSPHERE_RELEASE_LOG("\n");
}
MESOSPHERE_RELEASE_LOG("\n");
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpHandle() {
MESOSPHERE_RELEASE_LOG("Dump Handle\n");
{
/* Lock the list. */
KProcess::ListAccessor accessor;
const auto end = accessor.end();
/* Dump each process. */
for (auto it = accessor.begin(); it != end; ++it) {
DumpHandle(accessor, static_cast<KProcess *>(std::addressof(*it)));
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpHandle(u64 process_id) {
MESOSPHERE_RELEASE_LOG("Dump Handle\n");
{
/* Find and dump the target process. */
if (KProcess *process = KProcess::GetProcessFromId(process_id); process != nullptr) {
ON_SCOPE_EXIT { process->Close(); };
/* Lock the list. */
KProcess::ListAccessor accessor;
DumpHandle(accessor, process);
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpKernelMemory() {
MESOSPHERE_RELEASE_LOG("Dump Kernel Memory Info\n");
{
Kernel::GetKernelPageTable().DumpMemoryBlocks();
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpMemory() {
MESOSPHERE_RELEASE_LOG("Dump Memory Info\n");
{
/* Lock the list. */
KProcess::ListAccessor accessor;
const auto end = accessor.end();
/* Dump each process. */
for (auto it = accessor.begin(); it != end; ++it) {
DumpMemory(static_cast<KProcess *>(std::addressof(*it)));
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpMemory(u64 process_id) {
MESOSPHERE_RELEASE_LOG("Dump Memory Info\n");
{
/* Find and dump the target process. */
if (KProcess *process = KProcess::GetProcessFromId(process_id); process != nullptr) {
ON_SCOPE_EXIT { process->Close(); };
DumpMemory(process);
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpProcess() {
MESOSPHERE_RELEASE_LOG("Dump Process\n");
{
/* Lock the list. */
KProcess::ListAccessor accessor;
const auto end = accessor.end();
/* Dump each process. */
for (auto it = accessor.begin(); it != end; ++it) {
DumpProcess(static_cast<KProcess *>(std::addressof(*it)));
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpKernelPageTable() {
MESOSPHERE_RELEASE_LOG("Dump Kernel PageTable\n");
{
Kernel::GetKernelPageTable().DumpPageTable();
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpPageTable() {
MESOSPHERE_RELEASE_LOG("Dump Process\n");
{
/* Lock the list. */
KProcess::ListAccessor accessor;
const auto end = accessor.end();
/* Dump each process. */
for (auto it = accessor.begin(); it != end; ++it) {
DumpPageTable(static_cast<KProcess *>(std::addressof(*it)));
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpPageTable(u64 process_id) {
MESOSPHERE_RELEASE_LOG("Dump PageTable\n");
{
/* Find and dump the target process. */
if (KProcess *process = KProcess::GetProcessFromId(process_id); process != nullptr) {
ON_SCOPE_EXIT { process->Close(); };
DumpPageTable(process);
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpKernelCpuUtilization() {
MESOSPHERE_RELEASE_LOG("Dump Kernel Cpu Utilization\n");
constexpr size_t MaxObjects = 64;
{
/* Create tracking arrays. */
KAutoObject *objects[MaxObjects];
u32 cpu_time[MaxObjects];
s64 start_tick;
size_t i, n;
KDpcManager::Sync();
{
/* Lock the thread list. */
KThread::ListAccessor accessor;
/* Begin tracking. */
start_tick = GetTickOrdered();
/* Iterate, finding kernel threads. */
const auto end = accessor.end();
i = 0;
for (auto it = accessor.begin(); it != end; ++it) {
KThread *thread = static_cast<KThread *>(std::addressof(*it));
if (KProcess *process = thread->GetOwnerProcess(); process == nullptr) {
if (AMS_LIKELY(i < MaxObjects)) {
if (AMS_LIKELY(thread->Open())) {
cpu_time[i] = thread->GetCpuTime();
objects[i] = thread;
++i;
}
}
}
}
/* Keep track of how many kernel threads we found. */
n = i;
}
/* Wait one second. */
const s64 timeout = KHardwareTimer::GetTick() + ams::svc::Tick(TimeSpan::FromSeconds(1));
GetCurrentThread().Sleep(timeout);
KDpcManager::Sync();
/* Update our metrics. */
for (i = 0; i < n; ++i) {
KThread *thread = static_cast<KThread *>(objects[i]);
cpu_time[i] = thread->GetCpuTime() - cpu_time[i];
}
/* End tracking. */
const s64 end_tick = GetTickOrdered();
/* Log thread utilization. */
for (i = 0; i < n; ++i) {
KThread *thread = static_cast<KThread *>(objects[i]);
const s64 t = static_cast<u64>(cpu_time[i]) * 1000 / (end_tick - start_tick);
MESOSPHERE_RELEASE_LOG("tid=%3lu (kernel) %3lu.%lu%% pri=%2d af=%lx\n", thread->GetId(), t / 10, t % 10, thread->GetPriority(), thread->GetAffinityMask().GetAffinityMask());
}
/* Close all objects. */
for (i = 0; i < n; ++i) {
objects[i]->Close();
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpCpuUtilization() {
MESOSPHERE_RELEASE_LOG("Dump Cpu Utilization\n");
/* NOTE: Nintendo uses 0x40 as maximum here, but the KProcess slabheap has 0x50 entries. */
/* We have the stack space, so there's no reason not to allow logging all processes. */
constexpr size_t MaxObjects = 0x50;
{
/* Create tracking arrays. */
KAutoObject *objects[MaxObjects];
u32 cpu_time[MaxObjects];
s64 start_tick;
size_t i, n;
KDpcManager::Sync();
{
/* Lock the process list. */
KProcess::ListAccessor accessor;
/* Begin tracking. */
start_tick = GetTickOrdered();
/* Iterate, finding processes. */
const auto end = accessor.end();
i = 0;
for (auto it = accessor.begin(); it != end; ++it) {
KProcess *process = static_cast<KProcess *>(std::addressof(*it));
if (AMS_LIKELY(i < MaxObjects)) {
if (AMS_LIKELY(process->Open())) {
cpu_time[i] = process->GetCpuTime();
objects[i] = process;
++i;
}
}
}
/* Keep track of how many processes we found. */
n = i;
}
/* Wait one second. */
const s64 timeout = KHardwareTimer::GetTick() + ams::svc::Tick(TimeSpan::FromSeconds(1));
GetCurrentThread().Sleep(timeout);
KDpcManager::Sync();
/* Update our metrics. */
for (i = 0; i < n; ++i) {
KProcess *process = static_cast<KProcess *>(objects[i]);
cpu_time[i] = process->GetCpuTime() - cpu_time[i];
}
/* End tracking. */
const s64 end_tick = GetTickOrdered();
/* Log process utilization. */
for (i = 0; i < n; ++i) {
KProcess *process = static_cast<KProcess *>(objects[i]);
const s64 t = static_cast<u64>(cpu_time[i]) * 1000 / (end_tick - start_tick);
MESOSPHERE_RELEASE_LOG("pid=%3lu %-11s %3lu.%lu%%\n", process->GetId(), process->GetName(), t / 10, t % 10);
}
/* Close all objects. */
for (i = 0; i < n; ++i) {
objects[i]->Close();
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpCpuUtilization(u64 process_id) {
MESOSPHERE_RELEASE_LOG("Dump Cpu Utilization\n");
constexpr size_t MaxObjects = 64;
{
/* Create tracking arrays. */
KAutoObject *objects[MaxObjects];
u32 cpu_time[MaxObjects];
s64 start_tick;
size_t i, n;
KDpcManager::Sync();
{
/* Lock the thread list. */
KThread::ListAccessor accessor;
/* Begin tracking. */
start_tick = GetTickOrdered();
/* Iterate, finding process threads. */
const auto end = accessor.end();
i = 0;
for (auto it = accessor.begin(); it != end; ++it) {
KThread *thread = static_cast<KThread *>(std::addressof(*it));
if (KProcess *process = thread->GetOwnerProcess(); process != nullptr && process->GetId() == process_id) {
if (AMS_LIKELY(i < MaxObjects)) {
if (AMS_LIKELY(thread->Open())) {
cpu_time[i] = thread->GetCpuTime();
objects[i] = thread;
++i;
}
}
}
}
/* Keep track of how many process threads we found. */
n = i;
}
/* Wait one second. */
const s64 timeout = KHardwareTimer::GetTick() + ams::svc::Tick(TimeSpan::FromSeconds(1));
GetCurrentThread().Sleep(timeout);
KDpcManager::Sync();
/* Update our metrics. */
for (i = 0; i < n; ++i) {
KThread *thread = static_cast<KThread *>(objects[i]);
cpu_time[i] = thread->GetCpuTime() - cpu_time[i];
}
/* End tracking. */
const s64 end_tick = GetTickOrdered();
/* Log thread utilization. */
for (i = 0; i < n; ++i) {
KThread *thread = static_cast<KThread *>(objects[i]);
KProcess *process = thread->GetOwnerProcess();
const s64 t = static_cast<u64>(cpu_time[i]) * 1000 / (end_tick - start_tick);
MESOSPHERE_RELEASE_LOG("tid=%3lu pid=%3lu %-11s %3lu.%lu%% pri=%2d af=%lx\n", thread->GetId(), process->GetId(), process->GetName(), t / 10, t % 10, thread->GetPriority(), thread->GetAffinityMask().GetAffinityMask());
}
/* Close all objects. */
for (i = 0; i < n; ++i) {
objects[i]->Close();
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpProcess(u64 process_id) {
MESOSPHERE_RELEASE_LOG("Dump Process\n");
{
/* Find and dump the target process. */
if (KProcess *process = KProcess::GetProcessFromId(process_id); process != nullptr) {
ON_SCOPE_EXIT { process->Close(); };
DumpProcess(process);
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpPort() {
MESOSPHERE_RELEASE_LOG("Dump Port\n");
{
/* Lock the list. */
KProcess::ListAccessor accessor;
const auto end = accessor.end();
/* Dump each process. */
for (auto it = accessor.begin(); it != end; ++it) {
DumpPort(accessor, static_cast<KProcess *>(std::addressof(*it)));
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
void DumpPort(u64 process_id) {
MESOSPHERE_RELEASE_LOG("Dump Port\n");
{
/* Find and dump the target process. */
if (KProcess *process = KProcess::GetProcessFromId(process_id); process != nullptr) {
ON_SCOPE_EXIT { process->Close(); };
/* Lock the list. */
KProcess::ListAccessor accessor;
DumpPort(accessor, process);
}
}
MESOSPHERE_RELEASE_LOG("\n");
}
}
| 43,339
|
C++
|
.cpp
| 756
| 40.912698
| 346
| 0.516783
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,951
|
kern_k_page_group.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_page_group.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KPageGroup::Finalize() {
KBlockInfo *cur = m_first_block;
while (cur != nullptr) {
KBlockInfo *next = cur->GetNext();
m_manager->Free(cur);
cur = next;
}
m_first_block = nullptr;
m_last_block = nullptr;
}
void KPageGroup::CloseAndReset() {
auto &mm = Kernel::GetMemoryManager();
KBlockInfo *cur = m_first_block;
while (cur != nullptr) {
KBlockInfo *next = cur->GetNext();
mm.Close(cur->GetAddress(), cur->GetNumPages());
m_manager->Free(cur);
cur = next;
}
m_first_block = nullptr;
m_last_block = nullptr;
}
size_t KPageGroup::GetNumPages() const {
size_t num_pages = 0;
for (const auto &it : *this) {
num_pages += it.GetNumPages();
}
return num_pages;
}
Result KPageGroup::AddBlock(KPhysicalAddress addr, size_t num_pages) {
/* Succeed immediately if we're adding no pages. */
R_SUCCEED_IF(num_pages == 0);
/* Check for overflow. */
MESOSPHERE_ASSERT(addr < addr + num_pages * PageSize);
/* Try to just append to the last block. */
if (m_last_block != nullptr) {
R_SUCCEED_IF(m_last_block->TryConcatenate(addr, num_pages));
}
/* Allocate a new block. */
KBlockInfo *new_block = m_manager->Allocate();
R_UNLESS(new_block != nullptr, svc::ResultOutOfResource());
/* Initialize the block. */
new_block->Initialize(addr, num_pages);
/* Add the block to our list. */
if (m_last_block != nullptr) {
m_last_block->SetNext(new_block);
} else {
m_first_block = new_block;
}
m_last_block = new_block;
R_SUCCEED();
}
Result KPageGroup::CopyRangeTo(KPageGroup &out, size_t range_offset, size_t range_size) const {
/* Get the previous last block for the group. */
KBlockInfo * const out_last = out.m_last_block;
const auto out_last_addr = out_last != nullptr ? out_last->GetAddress() : Null<KPhysicalAddress>;
const auto out_last_np = out_last != nullptr ? out_last->GetNumPages() : 0;
/* Ensure we cleanup the group on failure. */
ON_RESULT_FAILURE {
KBlockInfo *cur = out_last != nullptr ? out_last->GetNext() : out.m_first_block;
while (cur != nullptr) {
KBlockInfo *next = cur->GetNext();
out.m_manager->Free(cur);
cur = next;
}
if (out_last != nullptr) {
out_last->Initialize(out_last_addr, out_last_np);
out_last->SetNext(nullptr);
} else {
out.m_first_block = nullptr;
}
out.m_last_block = out_last;
};
/* Find the pages within the requested range. */
size_t cur_offset = 0, remaining_size = range_size;
for (auto it = this->begin(); it != this->end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = range_offset - cur_offset;
const bool is_before = cur_offset <= range_offset;
cur_offset += cur_size;
if (is_before && range_offset < cur_offset) {
/* It is, so add the block. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
R_TRY(out.AddBlock(it->GetAddress() + rel_diff, block_size / PageSize));
/* Advance. */
cur_offset = range_offset + block_size;
remaining_size -= block_size;
range_offset += block_size;
}
}
/* Check that we successfully copied the range. */
MESOSPHERE_ABORT_UNLESS(remaining_size == 0);
R_SUCCEED();
}
void KPageGroup::Open() const {
auto &mm = Kernel::GetMemoryManager();
for (const auto &it : *this) {
mm.Open(it.GetAddress(), it.GetNumPages());
}
}
void KPageGroup::OpenFirst() const {
auto &mm = Kernel::GetMemoryManager();
for (const auto &it : *this) {
mm.OpenFirst(it.GetAddress(), it.GetNumPages());
}
}
void KPageGroup::Close() const {
auto &mm = Kernel::GetMemoryManager();
for (const auto &it : *this) {
mm.Close(it.GetAddress(), it.GetNumPages());
}
}
bool KPageGroup::IsEquivalentTo(const KPageGroup &rhs) const {
auto lit = this->begin();
auto rit = rhs.begin();
auto lend = this->end();
auto rend = rhs.end();
while (lit != lend && rit != rend) {
if (*lit != *rit) {
return false;
}
++lit;
++rit;
}
return lit == lend && rit == rend;
}
}
| 5,759
|
C++
|
.cpp
| 146
| 29.917808
| 106
| 0.555934
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,952
|
kern_k_memory_layout.board.nintendo_nx.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_memory_layout.board.nintendo_nx.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr size_t ReservedEarlyDramSize = 0x60000;
constexpr size_t CarveoutAlignment = 0x20000;
constexpr size_t CarveoutSizeMax = 512_MB - CarveoutAlignment;
template<typename... T> requires (std::same_as<T, KMemoryRegionAttr> && ...)
constexpr ALWAYS_INLINE KMemoryRegionType GetMemoryRegionType(KMemoryRegionType base, T... attr) {
return util::FromUnderlying<KMemoryRegionType>(util::ToUnderlying(base) | (util::ToUnderlying<T>(attr) | ...));
}
ALWAYS_INLINE bool SetupUartPhysicalMemoryRegion() {
#if defined(MESOSPHERE_DEBUG_LOG_USE_UART)
switch (KSystemControl::Init::GetDebugLogUartPort()) {
case 0: return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006000, 0x40, GetMemoryRegionType(KMemoryRegionType_Uart, KMemoryRegionAttr_ShouldKernelMap));
case 1: return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006040, 0x40, GetMemoryRegionType(KMemoryRegionType_Uart, KMemoryRegionAttr_ShouldKernelMap));
case 2: return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006200, 0x100, GetMemoryRegionType(KMemoryRegionType_Uart, KMemoryRegionAttr_ShouldKernelMap));
case 3: return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70006300, 0x100, GetMemoryRegionType(KMemoryRegionType_Uart, KMemoryRegionAttr_ShouldKernelMap));
default: return false;
}
#elif defined(MESOSPHERE_DEBUG_LOG_USE_IRAM_RINGBUFFER)
return true;
#else
#error "Unknown Debug UART device!"
#endif
}
ALWAYS_INLINE bool SetupPowerManagementControllerMemoryRegion() {
/* For backwards compatibility, the PMC must remain mappable on < 2.0.0. */
const KMemoryRegionAttr rtc_restrict_attr = GetTargetFirmware() >= TargetFirmware_2_0_0 ? KMemoryRegionAttr_NoUserMap : static_cast<KMemoryRegionAttr>(0);
const KMemoryRegionAttr pmc_restrict_attr = GetTargetFirmware() >= TargetFirmware_2_0_0 ? KMemoryRegionAttr_NoUserMap : KMemoryRegionAttr_ShouldKernelMap;
return KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7000E000, 0x400, GetMemoryRegionType(KMemoryRegionType_None, rtc_restrict_attr)) &&
KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7000E400, 0xC00, GetMemoryRegionType(KMemoryRegionType_PowerManagementController, pmc_restrict_attr));
}
void InsertPoolPartitionRegionIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) {
const u32 attr = cur_attr++;
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(start, size, phys_type, attr));
const KMemoryRegion *phys = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(phys_type, attr);
MESOSPHERE_INIT_ABORT_UNLESS(phys != nullptr);
MESOSPHERE_INIT_ABORT_UNLESS(phys->GetEndAddress() != 0);
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size, virt_type, attr));
}
}
namespace init {
void SetupDevicePhysicalMemoryRegions() {
/* TODO: Give these constexpr defines somewhere? */
MESOSPHERE_INIT_ABORT_UNLESS(SetupUartPhysicalMemoryRegion());
MESOSPHERE_INIT_ABORT_UNLESS(SetupPowerManagementControllerMemoryRegion());
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70019000, 0x1000, GetMemoryRegionType(KMemoryRegionType_MemoryController, KMemoryRegionAttr_NoUserMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7001C000, 0x1000, GetMemoryRegionType(KMemoryRegionType_MemoryController0, KMemoryRegionAttr_NoUserMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x7001D000, 0x1000, GetMemoryRegionType(KMemoryRegionType_MemoryController1, KMemoryRegionAttr_NoUserMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50040000, 0x1000, GetMemoryRegionType(KMemoryRegionType_None, KMemoryRegionAttr_NoUserMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50041000, 0x1000, GetMemoryRegionType(KMemoryRegionType_InterruptDistributor, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50042000, 0x1000, GetMemoryRegionType(KMemoryRegionType_InterruptCpuInterface, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x50043000, 0x1D000, GetMemoryRegionType(KMemoryRegionType_None, KMemoryRegionAttr_NoUserMap)));
/* Map IRAM unconditionally, to support debug-logging-to-iram build config. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x40000000, 0x40000, GetMemoryRegionType(KMemoryRegionType_LegacyLpsIram, KMemoryRegionAttr_ShouldKernelMap)));
if (GetTargetFirmware() >= TargetFirmware_2_0_0) {
/* Prevent mapping the bpmp exception vectors or the ipatch region. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x6000F000, 0x1000, GetMemoryRegionType(KMemoryRegionType_None, KMemoryRegionAttr_NoUserMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x6001DC00, 0x400, GetMemoryRegionType(KMemoryRegionType_None, KMemoryRegionAttr_NoUserMap)));
} else {
/* Map devices required for legacy lps driver. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x6000F000, 0x1000, GetMemoryRegionType(KMemoryRegionType_LegacyLpsExceptionVectors, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x60007000, 0x1000, GetMemoryRegionType(KMemoryRegionType_LegacyLpsFlowController, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x60004000, 0x1000, GetMemoryRegionType(KMemoryRegionType_LegacyLpsPrimaryICtlr, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x60001000, 0x1000, GetMemoryRegionType(KMemoryRegionType_LegacyLpsSemaphore, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x70016000, 0x1000, GetMemoryRegionType(KMemoryRegionType_LegacyLpsAtomics, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x60006000, 0x1000, GetMemoryRegionType(KMemoryRegionType_LegacyLpsClkRst, KMemoryRegionAttr_ShouldKernelMap)));
}
}
void SetupDramPhysicalMemoryRegions() {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
/* Insert blocks into the tree. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
}
void SetupPoolPartitionMemoryRegions() {
/* Start by identifying the extents of the DRAM memory region. */
const auto dram_extents = KMemoryLayout::GetMainMemoryPhysicalExtents();
MESOSPHERE_INIT_ABORT_UNLESS(dram_extents.GetEndAddress() != 0);
/* Find the pool partitions region. */
const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0);
MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr);
const uintptr_t pool_partitions_start = pool_partitions_region->GetAddress();
/* Determine the end of the pool region. */
const uintptr_t pool_end = pool_partitions_region->GetEndAddress();
MESOSPHERE_INIT_ABORT_UNLESS(pool_end == dram_extents.GetEndAddress());
/* Find the start of the kernel DRAM region. */
const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase);
MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr);
const uintptr_t kernel_dram_start = kernel_dram_region->GetAddress();
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(kernel_dram_start, CarveoutAlignment));
/* Setup the pool partition layouts. */
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
/* On 5.0.0+, setup modern 4-pool-partition layout. */
/* Get Application and Applet pool sizes. */
const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize();
const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize();
const size_t unsafe_system_pool_min_size = KSystemControl::Init::GetMinimumNonSecureSystemPoolSize();
/* Decide on starting addresses for our pools. */
const uintptr_t application_pool_start = pool_end - application_pool_size;
const uintptr_t applet_pool_start = application_pool_start - applet_pool_size;
const uintptr_t unsafe_system_pool_start = std::min(kernel_dram_start + CarveoutSizeMax, util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, CarveoutAlignment));
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
/* We want to arrange application pool depending on where the middle of dram is. */
const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
u32 cur_pool_attr = 0;
size_t total_overhead_size = 0;
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
} else {
const size_t first_application_pool_size = dram_midpoint - application_pool_start;
const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint;
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
}
/* Insert the applet pool. */
InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
/* Insert the nonsecure system pool. */
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
/* Determine final total overhead size. */
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
/* Insert the system pool. */
const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = total_overhead_size;
u32 pool_management_attr = 0;
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
} else {
/* On < 5.0.0, setup a legacy 2-pool layout for backwards compatibility. */
static_assert(KMemoryManager::Pool_Count == 4);
static_assert(KMemoryManager::Pool_Unsafe == KMemoryManager::Pool_Application);
static_assert(KMemoryManager::Pool_Secure == KMemoryManager::Pool_System);
/* Get Secure pool size. */
const size_t secure_pool_size = [](auto target_firmware) ALWAYS_INLINE_LAMBDA -> size_t {
constexpr size_t LegacySecureKernelSize = 8_MB; /* KPageBuffer pages, other small kernel allocations. */
constexpr size_t LegacySecureMiscSize = 1_MB; /* Miscellaneous pages for secure process mapping. */
constexpr size_t LegacySecureHeapSize = 24_MB; /* Heap pages for secure process mapping (fs). */
constexpr size_t LegacySecureEsSize = 1_MB + 232_KB; /* Size for additional secure process (es, 4.0.0+). */
/* The baseline size for the secure region is enough to cover any allocations the kernel might make. */
size_t size = LegacySecureKernelSize;
/* If on 2.0.0+, initial processes will fall within the secure region. */
if (target_firmware >= TargetFirmware_2_0_0) {
/* Account for memory used directly for the processes. */
size += GetInitialProcessesSecureMemorySize();
/* Account for heap and transient memory used by the processes. */
size += LegacySecureHeapSize + LegacySecureMiscSize;
}
/* If on 4.0.0+, any process may use secure memory via a create process flag. */
/* In process this is used for es alone, and the secure pool's size should be */
/* increased to accommodate es's binary. */
if (target_firmware >= TargetFirmware_4_0_0) {
size += LegacySecureEsSize;
}
return size;
}(GetTargetFirmware());
/* Calculate the overhead for the secure and (defunct) applet/non-secure-system pools. */
size_t total_overhead_size = KMemoryManager::CalculateManagementOverheadSize(secure_pool_size);
/* Calculate the overhead for (an amount larger than) the unsafe pool. */
const size_t approximate_total_overhead_size = total_overhead_size + KMemoryManager::CalculateManagementOverheadSize((pool_end - pool_partitions_start) - secure_pool_size - total_overhead_size) + 2 * PageSize;
/* Determine the start of the unsafe region. */
const uintptr_t unsafe_memory_start = util::AlignUp(pool_partitions_start + secure_pool_size + approximate_total_overhead_size, CarveoutAlignment);
/* Determine the start of the pool regions. */
const uintptr_t application_pool_start = unsafe_memory_start;
/* Determine the pool sizes. */
const size_t application_pool_size = pool_end - application_pool_start;
/* We want to arrange application pool depending on where the middle of dram is. */
const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
u32 cur_pool_attr = 0;
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
} else {
const size_t first_application_pool_size = dram_midpoint - application_pool_start;
const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint;
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
}
/* Validate the true overhead size. */
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= approximate_total_overhead_size);
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the UserPool regions are contiguous. */
/* Insert the secure pool. */
const uintptr_t secure_pool_start = unsafe_memory_start - secure_pool_size;
InsertPoolPartitionRegionIntoBothTrees(secure_pool_start, secure_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = secure_pool_start - pool_management_start;
MESOSPHERE_INIT_ABORT_UNLESS(total_overhead_size <= pool_management_size);
u32 pool_management_attr = 0;
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
}
}
}
}
| 21,141
|
C++
|
.cpp
| 220
| 81.109091
| 225
| 0.693421
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,953
|
kern_k_initial_process_reader.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_initial_process_reader.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
struct BlzSegmentFlags {
using Offset = util::BitPack16::Field<0, 12, u32>;
using Size = util::BitPack16::Field<Offset::Next, 4, u32>;
};
NOINLINE void BlzUncompress(void *_end) {
/* Parse the footer, endian agnostic. */
static_assert(sizeof(u32) == 4);
static_assert(sizeof(u16) == 2);
static_assert(sizeof(u8) == 1);
u8 *end = static_cast<u8 *>(_end);
const u32 total_size = (end[-12] << 0) | (end[-11] << 8) | (end[-10] << 16) | (end[- 9] << 24);
const u32 footer_size = (end[- 8] << 0) | (end[- 7] << 8) | (end[- 6] << 16) | (end[- 5] << 24);
const u32 additional_size = (end[- 4] << 0) | (end[- 3] << 8) | (end[- 2] << 16) | (end[- 1] << 24);
/* Prepare to decompress. */
u8 *cmp_start = end - total_size;
u32 cmp_ofs = total_size - footer_size;
u32 out_ofs = total_size + additional_size;
/* Decompress. */
while (out_ofs) {
u8 control = cmp_start[--cmp_ofs];
/* Each bit in the control byte is a flag indicating compressed or not compressed. */
for (size_t i = 0; i < 8 && out_ofs; ++i, control <<= 1) {
if (control & 0x80) {
/* NOTE: Nintendo does not check if it's possible to decompress. */
/* As such, we will leave the following as a debug assertion, and not a release assertion. */
MESOSPHERE_AUDIT(cmp_ofs >= sizeof(u16));
cmp_ofs -= sizeof(u16);
/* Extract segment bounds. */
const util::BitPack16 seg_flags{static_cast<u16>((cmp_start[cmp_ofs] << 0) | (cmp_start[cmp_ofs + 1] << 8))};
const u32 seg_ofs = seg_flags.Get<BlzSegmentFlags::Offset>() + 3;
const u32 seg_size = std::min(seg_flags.Get<BlzSegmentFlags::Size>() + 3, out_ofs);
MESOSPHERE_AUDIT(out_ofs + seg_ofs <= total_size + additional_size);
/* Copy the data. */
out_ofs -= seg_size;
for (size_t j = 0; j < seg_size; j++) {
cmp_start[out_ofs + j] = cmp_start[out_ofs + seg_ofs + j];
}
} else {
/* NOTE: Nintendo does not check if it's possible to copy. */
/* As such, we will leave the following as a debug assertion, and not a release assertion. */
MESOSPHERE_AUDIT(cmp_ofs >= sizeof(u8));
cmp_start[--out_ofs] = cmp_start[--cmp_ofs];
}
}
}
}
NOINLINE void LoadInitialProcessSegment(const KPageGroup &pg, size_t seg_offset, size_t seg_size, size_t binary_size, KVirtualAddress data, bool compressed) {
/* Save the original binary extents, for later use. */
const KPhysicalAddress binary_phys = KMemoryLayout::GetLinearPhysicalAddress(data);
/* Create a page group representing the segment. */
KPageGroup segment_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
MESOSPHERE_R_ABORT_UNLESS(pg.CopyRangeTo(segment_pg, seg_offset, util::AlignUp(seg_size, PageSize)));
/* Setup the new page group's memory so that we can load the segment. */
{
KVirtualAddress last_block = Null<KVirtualAddress>;
KVirtualAddress last_data = Null<KVirtualAddress>;
size_t last_copy_size = 0;
size_t last_clear_size = 0;
size_t remaining_copy_size = binary_size;
for (const auto &block : segment_pg) {
/* Get the current block extents. */
const auto block_addr = block.GetAddress();
const size_t block_size = block.GetSize();
if (remaining_copy_size > 0) {
/* Determine if we need to copy anything. */
const size_t cur_size = std::min<size_t>(block_size, remaining_copy_size);
/* NOTE: The first block may potentially overlap the binary we want to copy to. */
/* Consider e.g. the case where the overall compressed image has size 0x40000, seg_offset is 0x30000, and binary_size is > 0x20000. */
/* Suppose too that data points, say, 0x18000 into the compressed image. */
/* Suppose finally that we simply naively copy in order. */
/* The first iteration of this loop will perform an 0x10000 copy from image+0x18000 to image + 0x30000 (as there is no overlap). */
/* The second iteration will perform a copy from image+0x28000 to <allocated pages>. */
/* However, the first copy will have trashed the data in the second copy. */
/* Thus, we must copy the first block after-the-fact to avoid potentially trashing data in the overlap case. */
/* It is guaranteed by pre-condition that only the very first block can overlap with the physical binary, so we can simply memmove it at the end. */
if (last_block != Null<KVirtualAddress>) {
/* This is guaranteed by pre-condition, but for ease of debugging, check for no overlap. */
MESOSPHERE_ASSERT(!util::HasOverlap(GetInteger(binary_phys), binary_size, GetInteger(block_addr), cur_size));
MESOSPHERE_UNUSED(binary_phys);
/* We need to copy. */
std::memcpy(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr)), GetVoidPointer(data), cur_size);
/* If we need to, clear past where we're copying. */
if (cur_size != block_size) {
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr + cur_size)), 0, block_size - cur_size);
}
/* Advance. */
remaining_copy_size -= cur_size;
data += cur_size;
} else {
/* Save the first block, which may potentially overlap, so that we can copy it later. */
last_block = KMemoryLayout::GetLinearVirtualAddress(block_addr);
last_data = data;
last_copy_size = cur_size;
last_clear_size = block_size - cur_size;
/* Advance. */
remaining_copy_size -= cur_size;
data += cur_size;
}
} else {
/* We don't have data to copy, so we should just clear the pages. */
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_addr)), 0, block_size);
}
}
/* Handle a last block. */
if (last_copy_size != 0) {
if (last_block != last_data) {
std::memmove(GetVoidPointer(last_block), GetVoidPointer(last_data), last_copy_size);
}
if (last_clear_size != 0) {
std::memset(GetVoidPointer(last_block + last_copy_size), 0, last_clear_size);
}
}
}
/* If compressed, uncompress the data. */
if (compressed) {
/* Get the temporary region. */
const auto &temp_region = KMemoryLayout::GetTempRegion();
MESOSPHERE_ABORT_UNLESS(temp_region.GetEndAddress() != 0);
/* Map the process's memory into the temporary region. */
KProcessAddress temp_address = Null<KProcessAddress>;
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().MapPageGroup(std::addressof(temp_address), segment_pg, temp_region.GetAddress(), temp_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
ON_SCOPE_EXIT { MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPageGroup(temp_address, segment_pg, KMemoryState_Kernel)); };
/* Uncompress the data. */
BlzUncompress(GetVoidPointer(temp_address + binary_size));
}
}
}
Result KInitialProcessReader::MakeCreateProcessParameter(ams::svc::CreateProcessParameter *out, bool enable_aslr) const {
/* Get and validate addresses/sizes. */
const uintptr_t rx_address = m_kip_header.GetRxAddress();
const size_t rx_size = m_kip_header.GetRxSize();
const uintptr_t ro_address = m_kip_header.GetRoAddress();
const size_t ro_size = m_kip_header.GetRoSize();
const uintptr_t rw_address = m_kip_header.GetRwAddress();
const size_t rw_size = m_kip_header.GetRwSize();
const uintptr_t bss_address = m_kip_header.GetBssAddress();
const size_t bss_size = m_kip_header.GetBssSize();
R_UNLESS(util::IsAligned(rx_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(ro_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(rw_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(rx_address <= rx_address + util::AlignUp(rx_size, PageSize), svc::ResultInvalidAddress());
R_UNLESS(ro_address <= ro_address + util::AlignUp(ro_size, PageSize), svc::ResultInvalidAddress());
R_UNLESS(rw_address <= rw_address + util::AlignUp(rw_size, PageSize), svc::ResultInvalidAddress());
R_UNLESS(bss_address <= bss_address + util::AlignUp(bss_size, PageSize), svc::ResultInvalidAddress());
R_UNLESS(rx_address + util::AlignUp(rx_size, PageSize) <= ro_address, svc::ResultInvalidAddress());
R_UNLESS(ro_address + util::AlignUp(ro_size, PageSize) <= rw_address, svc::ResultInvalidAddress());
R_UNLESS(rw_address + rw_size <= bss_address, svc::ResultInvalidAddress());
/* Validate the address space. */
if (this->Is64BitAddressSpace()) {
R_UNLESS(this->Is64Bit(), svc::ResultInvalidCombination());
}
const uintptr_t start_address = rx_address;
const uintptr_t end_address = bss_size > 0 ? bss_address + bss_size : rw_address + rw_size;
MESOSPHERE_ABORT_UNLESS(start_address == 0);
/* Default fields in parameter to zero. */
*out = {};
/* Set fields in parameter. */
out->code_address = 0;
out->code_num_pages = util::AlignUp(end_address - start_address, PageSize) / PageSize;
out->program_id = m_kip_header.GetProgramId();
out->version = m_kip_header.GetVersion();
out->flags = 0;
out->reslimit = ams::svc::InvalidHandle;
out->system_resource_num_pages = 0;
/* Copy name field. */
m_kip_header.GetName(out->name, sizeof(out->name));
/* Apply other flags. */
if (this->Is64Bit()) {
out->flags |= ams::svc::CreateProcessFlag_Is64Bit;
}
if (this->Is64BitAddressSpace()) {
out->flags |= (GetTargetFirmware() >= TargetFirmware_2_0_0) ? ams::svc::CreateProcessFlag_AddressSpace64Bit : ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated;
} else {
out->flags |= ams::svc::CreateProcessFlag_AddressSpace32Bit;
}
if (enable_aslr) {
out->flags |= ams::svc::CreateProcessFlag_EnableAslr;
}
/* All initial processes should disable device address space merge. */
out->flags |= ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge;
/* Set and check code address. */
using ASType = KAddressSpaceInfo::Type;
const ASType as_type = this->Is64BitAddressSpace() ? ((GetTargetFirmware() >= TargetFirmware_2_0_0) ? KAddressSpaceInfo::Type_Map39Bit : KAddressSpaceInfo::Type_MapSmall) : KAddressSpaceInfo::Type_MapSmall;
const uintptr_t map_start = KAddressSpaceInfo::GetAddressSpaceStart(static_cast<ams::svc::CreateProcessFlag>(out->flags), as_type);
const size_t map_size = KAddressSpaceInfo::GetAddressSpaceSize(static_cast<ams::svc::CreateProcessFlag>(out->flags), as_type);
const uintptr_t map_end = map_start + map_size;
out->code_address = map_start + start_address;
MESOSPHERE_ABORT_UNLESS((out->code_address / PageSize) + out->code_num_pages <= (map_end / PageSize));
/* Apply ASLR, if needed. */
if (enable_aslr) {
const size_t choices = (map_end / KernelAslrAlignment) - (util::AlignUp(out->code_address + out->code_num_pages * PageSize, KernelAslrAlignment) / KernelAslrAlignment);
out->code_address += KSystemControl::GenerateRandomRange(0, choices) * KernelAslrAlignment;
}
R_SUCCEED();
}
void KInitialProcessReader::Load(const KPageGroup &pg, KVirtualAddress data) const {
/* Prepare to layout the data. */
const KVirtualAddress rx_data = data;
const KVirtualAddress ro_data = rx_data + m_kip_header.GetRxCompressedSize();
const KVirtualAddress rw_data = ro_data + m_kip_header.GetRoCompressedSize();
const size_t rx_size = m_kip_header.GetRxSize();
const size_t ro_size = m_kip_header.GetRoSize();
const size_t rw_size = m_kip_header.GetRwSize();
/* If necessary, setup bss. */
if (const size_t bss_size = m_kip_header.GetBssSize(); bss_size > 0) {
/* Determine how many additional pages are needed for bss. */
const u64 rw_end = util::AlignUp<u64>(m_kip_header.GetRwAddress() + m_kip_header.GetRwSize(), PageSize);
const u64 bss_end = util::AlignUp<u64>(m_kip_header.GetBssAddress() + m_kip_header.GetBssSize(), PageSize);
if (rw_end != bss_end) {
/* Find the pages corresponding to bss. */
size_t cur_offset = 0;
size_t remaining_size = bss_end - rw_end;
size_t bss_offset = rw_end - m_kip_header.GetRxAddress();
for (auto it = pg.begin(); it != pg.end() && remaining_size > 0; ++it) {
/* Get the current size. */
const size_t cur_size = it->GetSize();
/* Determine if the offset is in range. */
const size_t rel_diff = bss_offset - cur_offset;
const bool is_before = cur_offset <= bss_offset;
cur_offset += cur_size;
if (is_before && bss_offset < cur_offset) {
/* It is, so clear the bss range. */
const size_t block_size = std::min<size_t>(cur_size - rel_diff, remaining_size);
std::memset(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(it->GetAddress() + rel_diff)), 0, block_size);
/* Advance. */
cur_offset = bss_offset + block_size;
remaining_size -= block_size;
bss_offset += block_size;
}
}
}
}
/* Load .rwdata. */
LoadInitialProcessSegment(pg, m_kip_header.GetRwAddress() - m_kip_header.GetRxAddress(), rw_size, m_kip_header.GetRwCompressedSize(), rw_data, m_kip_header.IsRwCompressed());
/* Load .rodata. */
LoadInitialProcessSegment(pg, m_kip_header.GetRoAddress() - m_kip_header.GetRxAddress(), ro_size, m_kip_header.GetRoCompressedSize(), ro_data, m_kip_header.IsRoCompressed());
/* Load .text. */
LoadInitialProcessSegment(pg, m_kip_header.GetRxAddress() - m_kip_header.GetRxAddress(), rx_size, m_kip_header.GetRxCompressedSize(), rx_data, m_kip_header.IsRxCompressed());
}
Result KInitialProcessReader::SetMemoryPermissions(KProcessPageTable &page_table, const ams::svc::CreateProcessParameter ¶ms) const {
const size_t rx_size = m_kip_header.GetRxSize();
const size_t ro_size = m_kip_header.GetRoSize();
const size_t rw_size = m_kip_header.GetRwSize();
const size_t bss_size = m_kip_header.GetBssSize();
/* Set R-X pages. */
if (rx_size) {
const uintptr_t start = m_kip_header.GetRxAddress() + params.code_address;
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(rx_size, PageSize), ams::svc::MemoryPermission_ReadExecute));
}
/* Set R-- pages. */
if (ro_size) {
const uintptr_t start = m_kip_header.GetRoAddress() + params.code_address;
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(ro_size, PageSize), ams::svc::MemoryPermission_Read));
}
/* Set RW- pages. */
if (rw_size || bss_size) {
const uintptr_t start = (rw_size ? m_kip_header.GetRwAddress() : m_kip_header.GetBssAddress()) + params.code_address;
const uintptr_t end = (bss_size ? m_kip_header.GetBssAddress() + bss_size : m_kip_header.GetRwAddress() + rw_size) + params.code_address;
R_TRY(page_table.SetProcessMemoryPermission(start, util::AlignUp(end - start, PageSize), ams::svc::MemoryPermission_ReadWrite));
}
R_SUCCEED();
}
}
| 18,734
|
C++
|
.cpp
| 281
| 51.786477
| 243
| 0.56775
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,954
|
kern_k_trace.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_trace.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
/* Static initializations. */
constinit bool KTrace::s_is_active = false;
namespace {
constinit KSpinLock g_ktrace_lock;
constinit KVirtualAddress g_ktrace_buffer_address = Null<KVirtualAddress>;
constinit size_t g_ktrace_buffer_size = 0;
constinit u64 g_type_filter = 0;
struct KTraceHeader {
u32 magic;
u32 offset;
u32 index;
u32 count;
static constexpr u32 Magic = util::FourCC<'K','T','R','0'>::Code;
};
static_assert(util::is_pod<KTraceHeader>::value);
struct KTraceRecord {
u8 core_id;
u8 type;
u16 process_id;
u32 thread_id;
u64 tick;
u64 data[6];
};
static_assert(util::is_pod<KTraceRecord>::value);
static_assert(sizeof(KTraceRecord) == 0x40);
ALWAYS_INLINE bool IsTypeFiltered(u8 type) {
return (g_type_filter & (UINT64_C(1) << (type & (BITSIZEOF(u64) - 1)))) != 0;
}
}
void KTrace::Initialize(KVirtualAddress address, size_t size) {
/* Only perform tracing when on development hardware. */
if (KTargetSystem::IsDebugMode()) {
const size_t offset = util::AlignUp(sizeof(KTraceHeader), sizeof(KTraceRecord));
if (offset < size) {
/* Clear the trace buffer. */
std::memset(GetVoidPointer(address), 0, size);
/* Initialize the KTrace header. */
KTraceHeader *header = GetPointer<KTraceHeader>(address);
header->magic = KTraceHeader::Magic;
header->offset = offset;
header->index = 0;
header->count = (size - offset) / sizeof(KTraceRecord);
/* Set the global data. */
g_ktrace_buffer_address = address;
g_ktrace_buffer_size = size;
/* Set the filters to defaults. */
g_type_filter = ~(UINT64_C(0));
}
}
}
void KTrace::Start() {
if (g_ktrace_buffer_address != Null<KVirtualAddress>) {
/* Get exclusive access to the trace buffer. */
KScopedInterruptDisable di;
KScopedSpinLock lk(g_ktrace_lock);
/* Reset the header. */
KTraceHeader *header = GetPointer<KTraceHeader>(g_ktrace_buffer_address);
header->index = 0;
/* Reset the records. */
KTraceRecord *records = GetPointer<KTraceRecord>(g_ktrace_buffer_address + header->offset);
std::memset(records, 0, sizeof(*records) * header->count);
/* Note that we're active. */
s_is_active = true;
}
}
void KTrace::Stop() {
if (g_ktrace_buffer_address != Null<KVirtualAddress>) {
/* Get exclusive access to the trace buffer. */
KScopedInterruptDisable di;
KScopedSpinLock lk(g_ktrace_lock);
/* Note that we're paused. */
s_is_active = false;
}
}
void KTrace::PushRecord(u8 type, u64 param0, u64 param1, u64 param2, u64 param3, u64 param4, u64 param5) {
/* Get exclusive access to the trace buffer. */
KScopedInterruptDisable di;
KScopedSpinLock lk(g_ktrace_lock);
/* Check whether we should push the record to the trace buffer. */
if (s_is_active && IsTypeFiltered(type)) {
/* Get the current thread and process. */
KThread &cur_thread = GetCurrentThread();
KProcess *cur_process = GetCurrentProcessPointer();
/* Get the current record index from the header. */
KTraceHeader *header = GetPointer<KTraceHeader>(g_ktrace_buffer_address);
u32 index = header->index;
/* Get the current record. */
KTraceRecord *record = GetPointer<KTraceRecord>(g_ktrace_buffer_address + header->offset + index * sizeof(KTraceRecord));
/* Set the record's data. */
*record = {
.core_id = static_cast<u8>(GetCurrentCoreId()),
.type = type,
.process_id = static_cast<u16>(cur_process != nullptr ? cur_process->GetId() : ~0),
.thread_id = static_cast<u32>(cur_thread.GetId()),
.tick = static_cast<u64>(KHardwareTimer::GetTick()),
.data = { param0, param1, param2, param3, param4, param5 },
};
/* Advance the current index. */
if ((++index) >= header->count) {
index = 0;
}
/* Set the next index. */
header->index = index;
}
}
}
| 5,430
|
C++
|
.cpp
| 123
| 33.544715
| 133
| 0.576136
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,955
|
kern_k_event.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_event.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KEvent::Initialize() {
MESOSPHERE_ASSERT_THIS();
/* Create our readable event. */
KAutoObject::Create<KReadableEvent>(std::addressof(m_readable_event));
/* Initialize our readable event. */
m_readable_event.Initialize(this);
/* Set our owner process. */
m_owner = GetCurrentProcessPointer();
m_owner->Open();
/* Mark initialized. */
m_initialized = true;
}
void KEvent::Finalize() {
MESOSPHERE_ASSERT_THIS();
}
Result KEvent::Signal() {
KScopedSchedulerLock sl;
R_SUCCEED_IF(m_readable_event_destroyed);
R_RETURN(m_readable_event.Signal());
}
Result KEvent::Clear() {
KScopedSchedulerLock sl;
R_SUCCEED_IF(m_readable_event_destroyed);
R_RETURN(m_readable_event.Clear());
}
void KEvent::PostDestroy(uintptr_t arg) {
/* Release the event count resource the owner process holds. */
KProcess *owner = reinterpret_cast<KProcess *>(arg);
owner->ReleaseResource(ams::svc::LimitableResource_EventCountMax, 1);
owner->Close();
}
}
| 1,833
|
C++
|
.cpp
| 49
| 31.571429
| 78
| 0.672131
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,956
|
kern_k_handle_table.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_handle_table.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KHandleTable::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Get the table and clear our record of it. */
u16 saved_table_size = 0;
{
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
std::swap(m_table_size, saved_table_size);
}
/* Close and free all entries. */
for (size_t i = 0; i < saved_table_size; i++) {
if (KAutoObject *obj = m_objects[i]; obj != nullptr) {
obj->Close();
}
}
R_SUCCEED();
}
bool KHandleTable::Remove(ams::svc::Handle handle) {
MESOSPHERE_ASSERT_THIS();
/* Don't allow removal of a pseudo-handle. */
if (AMS_UNLIKELY(ams::svc::IsPseudoHandle(handle))) {
return false;
}
/* Handles must not have reserved bits set. */
const auto handle_pack = GetHandleBitPack(handle);
if (AMS_UNLIKELY(handle_pack.Get<HandleReserved>() != 0)) {
return false;
}
/* Find the object and free the entry. */
KAutoObject *obj = nullptr;
{
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
if (AMS_LIKELY(this->IsValidHandle(handle))) {
const auto index = handle_pack.Get<HandleIndex>();
obj = m_objects[index];
this->FreeEntry(index);
} else {
return false;
}
}
/* Close the object. */
obj->Close();
return true;
}
Result KHandleTable::Add(ams::svc::Handle *out_handle, KAutoObject *obj) {
MESOSPHERE_ASSERT_THIS();
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
/* Never exceed our capacity. */
R_UNLESS(m_count < m_table_size, svc::ResultOutOfHandles());
/* Allocate entry, set output handle. */
{
const auto linear_id = this->AllocateLinearId();
const auto index = this->AllocateEntry();
m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
*out_handle = EncodeHandle(index, linear_id);
}
R_SUCCEED();
}
Result KHandleTable::Reserve(ams::svc::Handle *out_handle) {
MESOSPHERE_ASSERT_THIS();
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
/* Never exceed our capacity. */
R_UNLESS(m_count < m_table_size, svc::ResultOutOfHandles());
*out_handle = EncodeHandle(this->AllocateEntry(), this->AllocateLinearId());
R_SUCCEED();
}
void KHandleTable::Unreserve(ams::svc::Handle handle) {
MESOSPHERE_ASSERT_THIS();
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
/* Unpack the handle. */
const auto handle_pack = GetHandleBitPack(handle);
const auto index = handle_pack.Get<HandleIndex>();
const auto linear_id = handle_pack.Get<HandleLinearId>();
const auto reserved = handle_pack.Get<HandleReserved>();
MESOSPHERE_ASSERT(reserved == 0);
MESOSPHERE_ASSERT(linear_id != 0);
MESOSPHERE_UNUSED(linear_id, reserved);
if (AMS_LIKELY(index < m_table_size)) {
/* NOTE: This code does not check the linear id. */
MESOSPHERE_ASSERT(m_objects[index] == nullptr);
this->FreeEntry(index);
}
}
void KHandleTable::Register(ams::svc::Handle handle, KAutoObject *obj) {
MESOSPHERE_ASSERT_THIS();
KScopedDisableDispatch dd;
KScopedSpinLock lk(m_lock);
/* Unpack the handle. */
const auto handle_pack = GetHandleBitPack(handle);
const auto index = handle_pack.Get<HandleIndex>();
const auto linear_id = handle_pack.Get<HandleLinearId>();
const auto reserved = handle_pack.Get<HandleReserved>();
MESOSPHERE_ASSERT(reserved == 0);
MESOSPHERE_ASSERT(linear_id != 0);
MESOSPHERE_UNUSED(reserved);
if (AMS_LIKELY(index < m_table_size)) {
/* Set the entry. */
MESOSPHERE_ASSERT(m_objects[index] == nullptr);
m_entry_infos[index].linear_id = linear_id;
m_objects[index] = obj;
obj->Open();
}
}
}
| 5,067
|
C++
|
.cpp
| 127
| 30.913386
| 84
| 0.593113
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,957
|
kern_k_unused_slab_memory.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_unused_slab_memory.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class KUnusedSlabMemory : public util::IntrusiveRedBlackTreeBaseNode<KUnusedSlabMemory> {
NON_COPYABLE(KUnusedSlabMemory);
NON_MOVEABLE(KUnusedSlabMemory);
private:
size_t m_size;
public:
struct RedBlackKeyType {
size_t m_size;
constexpr ALWAYS_INLINE size_t GetSize() const {
return m_size;
}
};
template<typename T> requires (std::same_as<T, KUnusedSlabMemory> || std::same_as<T, RedBlackKeyType>)
static constexpr ALWAYS_INLINE int Compare(const T &lhs, const KUnusedSlabMemory &rhs) {
if (lhs.GetSize() < rhs.GetSize()) {
return -1;
} else {
return 1;
}
}
public:
KUnusedSlabMemory(size_t size) : m_size(size) { /* ... */ }
constexpr ALWAYS_INLINE KVirtualAddress GetAddress() const { return reinterpret_cast<uintptr_t>(this); }
constexpr ALWAYS_INLINE size_t GetSize() const { return m_size; }
};
static_assert(std::is_trivially_destructible<KUnusedSlabMemory>::value);
using KUnusedSlabMemoryTree = util::IntrusiveRedBlackTreeBaseTraits<KUnusedSlabMemory>::TreeType<KUnusedSlabMemory>;
constinit KLightLock g_unused_slab_memory_lock;
constinit KUnusedSlabMemoryTree g_unused_slab_memory_tree;
}
KVirtualAddress AllocateUnusedSlabMemory(size_t size, size_t alignment) {
/* Acquire exclusive access to the memory tree. */
KScopedLightLock lk(g_unused_slab_memory_lock);
/* Adjust size and alignment. */
size = std::max(size, sizeof(KUnusedSlabMemory));
alignment = std::max(alignment, alignof(KUnusedSlabMemory));
/* Find the smallest block which fits our allocation. */
KUnusedSlabMemory *best_fit = std::addressof(*g_unused_slab_memory_tree.nfind_key({ size - 1 }));
/* Ensure that the chunk is valid. */
size_t prefix_waste;
KVirtualAddress alloc_start;
KVirtualAddress alloc_last;
KVirtualAddress alloc_end;
KVirtualAddress chunk_last;
KVirtualAddress chunk_end;
while (true) {
/* Check that we still have a chunk satisfying our size requirement. */
if (AMS_UNLIKELY(best_fit == nullptr)) {
return Null<KVirtualAddress>;
}
/* Determine where the actual allocation would start. */
alloc_start = util::AlignUp(GetInteger(best_fit->GetAddress()), alignment);
if (AMS_LIKELY(alloc_start >= best_fit->GetAddress())) {
prefix_waste = alloc_start - best_fit->GetAddress();
alloc_end = alloc_start + size;
alloc_last = alloc_end - 1;
/* Check that the allocation remains in bounds. */
if (alloc_start <= alloc_last) {
chunk_end = best_fit->GetAddress() + best_fit->GetSize();
chunk_last = chunk_end - 1;
if (AMS_LIKELY(alloc_last <= chunk_last)) {
break;
}
}
}
/* Check the next smallest block. */
best_fit = best_fit->GetNext();
}
/* Remove the chunk we selected from the tree. */
g_unused_slab_memory_tree.erase(g_unused_slab_memory_tree.iterator_to(*best_fit));
std::destroy_at(best_fit);
/* If there's enough prefix waste due to alignment for a new chunk, insert it into the tree. */
if (prefix_waste >= sizeof(KUnusedSlabMemory)) {
std::construct_at(best_fit, prefix_waste);
g_unused_slab_memory_tree.insert(*best_fit);
}
/* If there's enough suffix waste after the allocation for a new chunk, insert it into the tree. */
if (alloc_last < alloc_end + sizeof(KUnusedSlabMemory) - 1 && alloc_end + sizeof(KUnusedSlabMemory) - 1 <= chunk_last) {
KUnusedSlabMemory *suffix_chunk = GetPointer<KUnusedSlabMemory>(alloc_end);
std::construct_at(suffix_chunk, chunk_end - alloc_end);
g_unused_slab_memory_tree.insert(*suffix_chunk);
}
/* Return the allocated memory. */
return alloc_start;
}
void FreeUnusedSlabMemory(KVirtualAddress address, size_t size) {
/* NOTE: This is called only during initialization, so we don't need exclusive access. */
/* Nintendo doesn't acquire the lock here, either. */
/* Check that there's anything at all for us to free. */
if (AMS_UNLIKELY(size == 0)) {
return;
}
/* Determine the start of the block. */
const KVirtualAddress block_start = util::AlignUp(GetInteger(address), alignof(KUnusedSlabMemory));
/* Check that there's space for a KUnusedSlabMemory to exist. */
if (AMS_UNLIKELY(std::numeric_limits<uintptr_t>::max() - sizeof(KUnusedSlabMemory) < GetInteger(block_start))) {
return;
}
/* Determine the end of the block region. */
const KVirtualAddress block_end = util::AlignDown(GetInteger(address) + size, alignof(KUnusedSlabMemory));
/* Check that the block remains within bounds. */
if (AMS_UNLIKELY(block_start + sizeof(KUnusedSlabMemory) - 1 > block_end - 1)){
return;
}
/* Create the block. */
KUnusedSlabMemory *block = GetPointer<KUnusedSlabMemory>(block_start);
std::construct_at(block, GetInteger(block_end) - GetInteger(block_start));
/* Insert the block into the tree. */
g_unused_slab_memory_tree.insert(*block);
}
}
| 6,577
|
C++
|
.cpp
| 129
| 39.844961
| 128
| 0.60938
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,958
|
kern_k_page_heap.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_page_heap.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KPageHeap::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management_address, size_t management_size, const size_t *block_shifts, size_t num_block_shifts) {
/* Check our assumptions. */
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(size, PageSize));
MESOSPHERE_ASSERT(0 < num_block_shifts && num_block_shifts <= NumMemoryBlockPageShifts);
const KVirtualAddress management_end = management_address + management_size;
/* Set our members. */
m_heap_address = address;
m_heap_size = size;
m_num_blocks = num_block_shifts;
/* Setup bitmaps. */
u64 *cur_bitmap_storage = GetPointer<u64>(management_address);
for (size_t i = 0; i < num_block_shifts; i++) {
const size_t cur_block_shift = block_shifts[i];
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
cur_bitmap_storage = m_blocks[i].Initialize(m_heap_address, m_heap_size, cur_block_shift, next_block_shift, cur_bitmap_storage);
}
/* Ensure we didn't overextend our bounds. */
MESOSPHERE_ABORT_UNLESS(KVirtualAddress(cur_bitmap_storage) <= management_end);
}
size_t KPageHeap::GetNumFreePages() const {
size_t num_free = 0;
for (size_t i = 0; i < m_num_blocks; i++) {
num_free += m_blocks[i].GetNumFreePages();
}
return num_free;
}
KPhysicalAddress KPageHeap::AllocateByLinearSearch(s32 index) {
const size_t needed_size = m_blocks[index].GetSize();
for (s32 i = index; i < static_cast<s32>(m_num_blocks); i++) {
if (const KPhysicalAddress addr = m_blocks[i].PopBlock(false); addr != Null<KPhysicalAddress>) {
if (const size_t allocated_size = m_blocks[i].GetSize(); allocated_size > needed_size) {
this->Free(addr + needed_size, (allocated_size - needed_size) / PageSize);
}
return addr;
}
}
return Null<KPhysicalAddress>;
}
KPhysicalAddress KPageHeap::AllocateByRandom(s32 index, size_t num_pages, size_t align_pages) {
/* Get the size and required alignment. */
const size_t needed_size = num_pages * PageSize;
const size_t align_size = align_pages * PageSize;
/* Determine meta-alignment of our desired alignment size. */
const size_t align_shift = util::CountTrailingZeros(align_size);
/* Decide on a block to allocate from. */
constexpr size_t MinimumPossibleAlignmentsForRandomAllocation = 4;
{
/* By default, we'll want to look at all blocks larger than our current one. */
s32 max_blocks = static_cast<s32>(m_num_blocks);
/* Determine the maximum block we should try to allocate from. */
size_t possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
/* Add the possible alignments from blocks at the current size. */
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks();
/* If there are enough possible alignments, we don't need to look at larger blocks. */
if (possible_alignments >= MinimumPossibleAlignmentsForRandomAllocation) {
max_blocks = i + 1;
break;
}
}
/* If we have any possible alignments which require a larger block, we need to pick one. */
if (possible_alignments > 0 && index + 1 < max_blocks) {
/* Select a random alignment from the possibilities. */
const size_t rnd = m_rng.GenerateRandom(possible_alignments);
/* Determine which block corresponds to the random alignment we chose. */
possible_alignments = 0;
for (s32 i = index; i < max_blocks; ++i) {
/* Add the possible alignments from blocks at the current size. */
possible_alignments += (1 + ((m_blocks[i].GetSize() - needed_size) >> align_shift)) * m_blocks[i].GetNumFreeBlocks();
/* If the current block gets us to our random choice, use the current block. */
if (rnd < possible_alignments) {
index = i;
break;
}
}
}
}
/* Pop a block from the index we selected. */
if (KPhysicalAddress addr = m_blocks[index].PopBlock(true); addr != Null<KPhysicalAddress>) {
/* Determine how much size we have left over. */
if (const size_t leftover_size = m_blocks[index].GetSize() - needed_size; leftover_size > 0) {
/* Determine how many valid alignments we can have. */
const size_t possible_alignments = 1 + (leftover_size >> align_shift);
/* Select a random valid alignment. */
const size_t random_offset = m_rng.GenerateRandom(possible_alignments) << align_shift;
/* Free memory before the random offset. */
if (random_offset != 0) {
this->Free(addr, random_offset / PageSize);
}
/* Advance our block by the random offset. */
addr += random_offset;
/* Free memory after our allocated block. */
if (random_offset != leftover_size) {
this->Free(addr + needed_size, (leftover_size - random_offset) / PageSize);
}
}
/* Return the block we allocated. */
return addr;
}
return Null<KPhysicalAddress>;
}
void KPageHeap::FreeBlock(KPhysicalAddress block, s32 index) {
do {
block = m_blocks[index++].PushBlock(block);
} while (block != Null<KPhysicalAddress>);
}
void KPageHeap::Free(KPhysicalAddress addr, size_t num_pages) {
/* Freeing no pages is a no-op. */
if (num_pages == 0) {
return;
}
/* Find the largest block size that we can free, and free as many as possible. */
s32 big_index = static_cast<s32>(m_num_blocks) - 1;
const KPhysicalAddress start = addr;
const KPhysicalAddress end = addr + num_pages * PageSize;
KPhysicalAddress before_start = start;
KPhysicalAddress before_end = start;
KPhysicalAddress after_start = end;
KPhysicalAddress after_end = end;
while (big_index >= 0) {
const size_t block_size = m_blocks[big_index].GetSize();
const KPhysicalAddress big_start = util::AlignUp(GetInteger(start), block_size);
const KPhysicalAddress big_end = util::AlignDown(GetInteger(end), block_size);
if (big_start < big_end) {
/* Free as many big blocks as we can. */
for (auto block = big_start; block < big_end; block += block_size) {
this->FreeBlock(block, big_index);
}
before_end = big_start;
after_start = big_end;
break;
}
big_index--;
}
MESOSPHERE_ASSERT(big_index >= 0);
/* Free space before the big blocks. */
for (s32 i = big_index - 1; i >= 0; i--) {
const size_t block_size = m_blocks[i].GetSize();
while (before_start + block_size <= before_end) {
before_end -= block_size;
this->FreeBlock(before_end, i);
}
}
/* Free space after the big blocks. */
for (s32 i = big_index - 1; i >= 0; i--) {
const size_t block_size = m_blocks[i].GetSize();
while (after_start + block_size <= after_end) {
this->FreeBlock(after_start, i);
after_start += block_size;
}
}
}
size_t KPageHeap::CalculateManagementOverheadSize(size_t region_size, const size_t *block_shifts, size_t num_block_shifts) {
size_t overhead_size = 0;
for (size_t i = 0; i < num_block_shifts; i++) {
const size_t cur_block_shift = block_shifts[i];
const size_t next_block_shift = (i != num_block_shifts - 1) ? block_shifts[i + 1] : 0;
overhead_size += KPageHeap::Block::CalculateManagementOverheadSize(region_size, cur_block_shift, next_block_shift);
}
return util::AlignUp(overhead_size, PageSize);
}
void KPageHeap::DumpFreeList() const {
MESOSPHERE_RELEASE_LOG("KPageHeap::DumpFreeList %p\n", this);
for (size_t i = 0; i < m_num_blocks; ++i) {
const size_t block_size = m_blocks[i].GetSize();
const char *suffix;
size_t size;
if (block_size >= 1_GB) {
suffix = "GiB";
size = block_size / 1_GB;
} else if (block_size >= 1_MB) {
suffix = "MiB";
size = block_size / 1_MB;
} else if (block_size >= 1_KB) {
suffix = "KiB";
size = block_size / 1_KB;
} else {
suffix = "B";
size = block_size;
}
MESOSPHERE_RELEASE_LOG(" %4zu %s block x %zu\n", size, suffix, m_blocks[i].GetNumFreeBlocks());
}
}
}
| 10,243
|
C++
|
.cpp
| 202
| 39.024752
| 184
| 0.572614
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,959
|
kern_k_scheduler.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_scheduler.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#pragma GCC push_options
#pragma GCC optimize ("-O3")
namespace ams::kern {
bool KScheduler::s_scheduler_update_needed;
KScheduler::LockType KScheduler::s_scheduler_lock;
KSchedulerPriorityQueue KScheduler::s_priority_queue;
namespace {
class KSchedulerInterruptHandler : public KInterruptHandler {
public:
constexpr KSchedulerInterruptHandler() : KInterruptHandler() { /* ... */ }
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
MESOSPHERE_UNUSED(interrupt_id);
return GetDummyInterruptTask();
}
};
ALWAYS_INLINE void IncrementScheduledCount(KThread *thread) {
if (KProcess *parent = thread->GetOwnerProcess(); parent != nullptr) {
parent->IncrementScheduledCount();
}
}
KSchedulerInterruptHandler g_scheduler_interrupt_handler;
ALWAYS_INLINE auto *GetSchedulerInterruptHandler() {
return std::addressof(g_scheduler_interrupt_handler);
}
}
void KScheduler::Initialize(KThread *idle_thread) {
/* Set core ID/idle thread/interrupt task manager. */
m_core_id = GetCurrentCoreId();
m_idle_thread = idle_thread;
m_state.idle_thread_stack = m_idle_thread->GetStackTop();
m_state.interrupt_task_manager = std::addressof(Kernel::GetInterruptTaskManager());
/* Insert the main thread into the priority queue. */
{
KScopedSchedulerLock lk;
GetPriorityQueue().PushBack(GetCurrentThreadPointer());
SetSchedulerUpdateNeeded();
}
/* Bind interrupt handler. */
Kernel::GetInterruptManager().BindHandler(GetSchedulerInterruptHandler(), KInterruptName_Scheduler, m_core_id, KInterruptController::PriorityLevel_Scheduler, false, false);
/* Set the current thread. */
m_current_thread = GetCurrentThreadPointer();
}
void KScheduler::Activate() {
MESOSPHERE_ASSERT(GetCurrentThread().GetDisableDispatchCount() == 1);
m_state.should_count_idle = KTargetSystem::IsDebugMode();
m_is_active = true;
RescheduleCurrentCore();
}
u64 KScheduler::UpdateHighestPriorityThread(KThread *highest_thread) {
if (KThread *prev_highest_thread = m_state.highest_priority_thread; AMS_LIKELY(prev_highest_thread != highest_thread)) {
if (AMS_LIKELY(prev_highest_thread != nullptr)) {
IncrementScheduledCount(prev_highest_thread);
prev_highest_thread->SetLastScheduledTick(KHardwareTimer::GetTick());
}
if (m_state.should_count_idle) {
if (AMS_LIKELY(highest_thread != nullptr)) {
if (KProcess *process = highest_thread->GetOwnerProcess(); process != nullptr) {
/* Set running thread (and increment switch count). */
process->SetRunningThread(m_core_id, highest_thread, m_state.idle_count, ++m_state.switch_count);
}
} else {
/* Set idle count and switch count to switch count + 1. */
m_state.idle_count = ++m_state.switch_count;
}
}
MESOSPHERE_KTRACE_SCHEDULE_UPDATE(m_core_id, (prev_highest_thread != nullptr ? prev_highest_thread : m_idle_thread), (highest_thread != nullptr ? highest_thread : m_idle_thread));
m_state.highest_priority_thread = highest_thread;
m_state.needs_scheduling = true;
return (1ul << m_core_id);
} else {
return 0;
}
}
u64 KScheduler::UpdateHighestPriorityThreadsImpl() {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Clear that we need to update. */
ClearSchedulerUpdateNeeded();
u64 cores_needing_scheduling = 0, idle_cores = 0;
KThread *top_threads[cpu::NumCores];
auto &priority_queue = GetPriorityQueue();
/* We want to go over all cores, finding the highest priority thread and determining if scheduling is needed for that core. */
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
KThread *top_thread = priority_queue.GetScheduledFront(core_id);
if (top_thread != nullptr) {
/* We need to check if the thread's process has a pinned thread. */
if (KProcess *parent = top_thread->GetOwnerProcess(); parent != nullptr) {
/* Check that there's a pinned thread other than the current top thread. */
if (KThread *pinned = parent->GetPinnedThread(core_id); pinned != nullptr && pinned != top_thread) {
/* We need to prefer threads with kernel waiters to the pinned thread. */
if (top_thread->GetNumKernelWaiters() == 0 && top_thread != parent->GetExceptionThread()) {
/* If the pinned thread is runnable, use it. */
if (pinned->GetRawState() == KThread::ThreadState_Runnable) {
top_thread = pinned;
} else {
top_thread = nullptr;
}
}
}
}
} else {
idle_cores |= (1ul << core_id);
}
top_threads[core_id] = top_thread;
cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
}
/* Idle cores are bad. We're going to try to migrate threads to each idle core in turn. */
while (idle_cores != 0) {
s32 core_id = __builtin_ctzll(idle_cores);
if (KThread *suggested = priority_queue.GetSuggestedFront(core_id); suggested != nullptr) {
s32 migration_candidates[cpu::NumCores];
size_t num_candidates = 0;
/* While we have a suggested thread, try to migrate it! */
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_thread = (suggested_core >= 0) ? top_threads[suggested_core] : nullptr; top_thread != suggested) {
/* Make sure we're not dealing with threads too high priority for migration. */
if (top_thread != nullptr && top_thread->GetPriority() < HighestCoreMigrationAllowedPriority) {
break;
}
/* The suggested thread isn't bound to its core, so we can migrate it! */
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested);
MESOSPHERE_KTRACE_CORE_MIGRATION(suggested->GetId(), suggested_core, core_id, 1);
top_threads[core_id] = suggested;
cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
break;
}
/* Note this core as a candidate for migration. */
MESOSPHERE_ASSERT(num_candidates < cpu::NumCores);
migration_candidates[num_candidates++] = suggested_core;
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
/* If suggested is nullptr, we failed to migrate a specific thread. So let's try all our candidate cores' top threads. */
if (suggested == nullptr) {
for (size_t i = 0; i < num_candidates; i++) {
/* Check if there's some other thread that can run on the candidate core. */
const s32 candidate_core = migration_candidates[i];
suggested = top_threads[candidate_core];
if (KThread *next_on_candidate_core = priority_queue.GetScheduledNext(candidate_core, suggested); next_on_candidate_core != nullptr) {
/* The candidate core can run some other thread! We'll migrate its current top thread to us. */
top_threads[candidate_core] = next_on_candidate_core;
cores_needing_scheduling |= Kernel::GetScheduler(candidate_core).UpdateHighestPriorityThread(top_threads[candidate_core]);
/* Perform the migration. */
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(candidate_core, suggested);
MESOSPHERE_KTRACE_CORE_MIGRATION(suggested->GetId(), candidate_core, core_id, 2);
top_threads[core_id] = suggested;
cores_needing_scheduling |= Kernel::GetScheduler(core_id).UpdateHighestPriorityThread(top_threads[core_id]);
break;
}
}
}
}
idle_cores &= ~(1ul << core_id);
}
return cores_needing_scheduling;
}
void KScheduler::SwitchThread(KThread *next_thread) {
KProcess * const cur_process = GetCurrentProcessPointer();
KThread * const cur_thread = GetCurrentThreadPointer();
/* We never want to schedule a null thread, so use the idle thread if we don't have a next. */
if (next_thread == nullptr) {
next_thread = m_idle_thread;
}
if (next_thread->GetCurrentCore() != m_core_id) {
next_thread->SetCurrentCore(m_core_id);
}
/* If we're not actually switching thread, there's nothing to do. */
if (next_thread == cur_thread) {
return;
}
/* Next thread is now known not to be nullptr, and must not be dispatchable. */
MESOSPHERE_ASSERT(next_thread->GetDisableDispatchCount() == 1);
/* Update the CPU time tracking variables. */
const s64 prev_tick = m_last_context_switch_time;
const s64 cur_tick = KHardwareTimer::GetTick();
const s64 tick_diff = cur_tick - prev_tick;
cur_thread->AddCpuTime(m_core_id, tick_diff);
if (cur_process != nullptr) {
cur_process->AddCpuTime(tick_diff);
}
m_last_context_switch_time = cur_tick;
/* Update our previous thread. */
if (cur_process != nullptr) {
/* NOTE: Combining this into AMS_LIKELY(!... && ...) triggers an internal compiler error: Segmentation fault in GCC 9.2.0. */
if (AMS_LIKELY(!cur_thread->IsTerminationRequested()) && AMS_LIKELY(cur_thread->GetActiveCore() == m_core_id)) {
m_state.prev_thread = cur_thread;
} else {
m_state.prev_thread = nullptr;
}
}
MESOSPHERE_KTRACE_THREAD_SWITCH(next_thread);
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
/* Ensure the single-step bit in mdscr reflects the correct single-step state for the new thread. */
/* NOTE: Per ARM docs, changing the single-step bit requires a "context synchronization event" to */
/* be sure that our new configuration takes. However, there are three types of synchronization event: */
/* Taking an exception, returning from an exception, and ISB. The single-step bit change only matters */
/* in EL0...which implies a return-from-exception has occurred since we set the bit. Thus, forcing */
/* an ISB is unnecessary, and we can modify the register safely and be confident it will affect the next */
/* userland instruction executed. */
cpu::MonitorDebugSystemControlRegisterAccessor().SetSoftwareStep(next_thread->IsHardwareSingleStep()).Store();
#endif
/* Switch the current process, if we're switching processes. */
if (KProcess *next_process = next_thread->GetOwnerProcess(); next_process != cur_process) {
KProcess::Switch(cur_process, next_process);
}
/* Set the new thread. */
SetCurrentThread(next_thread);
m_current_thread = next_thread;
/* Set the new Thread Local region. */
cpu::SwitchThreadLocalRegion(GetInteger(next_thread->GetThreadLocalRegionAddress()));
}
void KScheduler::ClearPreviousThread(KThread *thread) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
for (size_t i = 0; i < cpu::NumCores; ++i) {
/* Get an atomic reference to the core scheduler's previous thread. */
const util::AtomicRef<KThread *> prev_thread(Kernel::GetScheduler(static_cast<s32>(i)).m_state.prev_thread);
/* Atomically clear the previous thread if it's our target. */
KThread *compare = thread;
prev_thread.CompareExchangeStrong(compare, nullptr);
}
}
void KScheduler::OnThreadStateChanged(KThread *thread, KThread::ThreadState old_state) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Check if the state has changed, because if it hasn't there's nothing to do. */
const KThread::ThreadState cur_state = thread->GetRawState();
if (cur_state == old_state) {
return;
}
/* Update the priority queues. */
if (old_state == KThread::ThreadState_Runnable) {
/* If we were previously runnable, then we're not runnable now, and we should remove. */
GetPriorityQueue().Remove(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
} else if (cur_state == KThread::ThreadState_Runnable) {
/* If we're now runnable, then we weren't previously, and we should add. */
GetPriorityQueue().PushBack(thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
}
}
void KScheduler::OnThreadPriorityChanged(KThread *thread, s32 old_priority) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* If the thread is runnable, we want to change its priority in the queue. */
if (thread->GetRawState() == KThread::ThreadState_Runnable) {
GetPriorityQueue().ChangePriority(old_priority, thread == GetCurrentThreadPointer(), thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
}
}
void KScheduler::OnThreadAffinityMaskChanged(KThread *thread, const KAffinityMask &old_affinity, s32 old_core) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* If the thread is runnable, we want to change its affinity in the queue. */
if (thread->GetRawState() == KThread::ThreadState_Runnable) {
GetPriorityQueue().ChangeAffinityMask(old_core, old_affinity, thread);
IncrementScheduledCount(thread);
SetSchedulerUpdateNeeded();
}
}
void KScheduler::RotateScheduledQueue(s32 core_id, s32 priority) {
MESOSPHERE_ASSERT(IsSchedulerLockedByCurrentThread());
/* Get a reference to the priority queue. */
auto &priority_queue = GetPriorityQueue();
/* Rotate the front of the queue to the end. */
KThread *top_thread = priority_queue.GetScheduledFront(core_id, priority);
KThread *next_thread = nullptr;
if (top_thread != nullptr) {
next_thread = priority_queue.MoveToScheduledBack(top_thread);
if (next_thread != top_thread) {
IncrementScheduledCount(top_thread);
IncrementScheduledCount(next_thread);
}
}
/* While we have a suggested thread, try to migrate it! */
{
KThread *suggested = priority_queue.GetSuggestedFront(core_id, priority);
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) {
/* If the next thread is a new thread that has been waiting longer than our suggestion, we prefer it to our suggestion. */
if (top_thread != next_thread && next_thread != nullptr && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()) {
suggested = nullptr;
break;
}
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */
if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
}
}
/* Get the next suggestion. */
suggested = priority_queue.GetSamePriorityNext(core_id, suggested);
}
}
/* Now that we might have migrated a thread with the same priority, check if we can do better. */
{
KThread *best_thread = priority_queue.GetScheduledFront(core_id);
if (best_thread == GetCurrentThreadPointer()) {
best_thread = priority_queue.GetScheduledNext(core_id, best_thread);
}
/* If the best thread we can choose has a priority the same or worse than ours, try to migrate a higher priority thread. */
if (best_thread != nullptr && best_thread->GetPriority() >= priority) {
KThread *suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* If the suggestion's priority is the same as ours, don't bother. */
if (suggested->GetPriority() >= best_thread->GetPriority()) {
break;
}
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) {
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */
if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
IncrementScheduledCount(suggested);
break;
}
}
/* Get the next suggestion. */
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
}
}
/* After a rotation, we need a scheduler update. */
SetSchedulerUpdateNeeded();
}
void KScheduler::YieldWithoutCoreMigration() {
/* Validate preconditions. */
MESOSPHERE_ASSERT(CanSchedule());
MESOSPHERE_ASSERT(GetCurrentProcessPointer() != nullptr);
/* Get the current thread and process. */
KThread &cur_thread = GetCurrentThread();
KProcess &cur_process = GetCurrentProcess();
/* If the thread's yield count matches, there's nothing for us to do. */
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
return;
}
/* Get a reference to the priority queue. */
auto &priority_queue = GetPriorityQueue();
/* Perform the yield. */
{
KScopedSchedulerLock sl;
const auto cur_state = cur_thread.GetRawState();
if (cur_state == KThread::ThreadState_Runnable) {
/* Put the current thread at the back of the queue. */
KThread *next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
/* If the next thread is different, we have an update to perform. */
if (next_thread != std::addressof(cur_thread)) {
SetSchedulerUpdateNeeded();
} else {
/* Otherwise, set the thread's yield count so that we won't waste work until the process is scheduled again. */
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
}
}
}
}
void KScheduler::YieldWithCoreMigration() {
/* Validate preconditions. */
MESOSPHERE_ASSERT(CanSchedule());
MESOSPHERE_ASSERT(GetCurrentProcessPointer() != nullptr);
/* Get the current thread and process. */
KThread &cur_thread = GetCurrentThread();
KProcess &cur_process = GetCurrentProcess();
/* If the thread's yield count matches, there's nothing for us to do. */
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
return;
}
/* Get a reference to the priority queue. */
auto &priority_queue = GetPriorityQueue();
/* Perform the yield. */
{
KScopedSchedulerLock sl;
const auto cur_state = cur_thread.GetRawState();
if (cur_state == KThread::ThreadState_Runnable) {
/* Get the current active core. */
const s32 core_id = cur_thread.GetActiveCore();
/* Put the current thread at the back of the queue. */
KThread *next_thread = priority_queue.MoveToScheduledBack(std::addressof(cur_thread));
IncrementScheduledCount(std::addressof(cur_thread));
/* While we have a suggested thread, try to migrate it! */
bool recheck = false;
KThread *suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* Check if the suggested thread is the thread running on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *running_on_suggested_core = (suggested_core >= 0) ? Kernel::GetScheduler(suggested_core).m_state.highest_priority_thread : nullptr; running_on_suggested_core != suggested) {
/* If the current thread's priority is higher than our suggestion's we prefer the next thread to the suggestion. */
/* We also prefer the next thread when the current thread's priority is equal to the suggestions, but the next thread has been waiting longer. */
if ((suggested->GetPriority() > cur_thread.GetPriority()) ||
(suggested->GetPriority() == cur_thread.GetPriority() && next_thread != std::addressof(cur_thread) && next_thread->GetLastScheduledTick() < suggested->GetLastScheduledTick()))
{
suggested = nullptr;
break;
}
/* If we're allowed to do a migration, do one. */
/* NOTE: Unlike migrations in UpdateHighestPriorityThread, this moves the suggestion to the front of the queue. */
if (running_on_suggested_core == nullptr || running_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested, true);
MESOSPHERE_KTRACE_CORE_MIGRATION(suggested->GetId(), suggested_core, core_id, 3);
IncrementScheduledCount(suggested);
break;
} else {
/* We couldn't perform a migration, but we should check again on a future yield. */
recheck = true;
}
}
/* Get the next suggestion. */
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
/* If we still have a suggestion or the next thread is different, we have an update to perform. */
if (suggested != nullptr || next_thread != std::addressof(cur_thread)) {
SetSchedulerUpdateNeeded();
} else if (!recheck) {
/* Otherwise if we don't need to re-check, set the thread's yield count so that we won't waste work until the process is scheduled again. */
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
}
}
}
}
void KScheduler::YieldToAnyThread() {
/* Validate preconditions. */
MESOSPHERE_ASSERT(CanSchedule());
MESOSPHERE_ASSERT(GetCurrentProcessPointer() != nullptr);
/* Get the current thread and process. */
KThread &cur_thread = GetCurrentThread();
KProcess &cur_process = GetCurrentProcess();
/* If the thread's yield count matches, there's nothing for us to do. */
if (cur_thread.GetYieldScheduleCount() == cur_process.GetScheduledCount()) {
return;
}
/* Get a reference to the priority queue. */
auto &priority_queue = GetPriorityQueue();
/* Perform the yield. */
{
KScopedSchedulerLock sl;
const auto cur_state = cur_thread.GetRawState();
if (cur_state == KThread::ThreadState_Runnable) {
/* Get the current active core. */
const s32 core_id = cur_thread.GetActiveCore();
/* Migrate the current thread to core -1. */
cur_thread.SetActiveCore(-1);
priority_queue.ChangeCore(core_id, std::addressof(cur_thread));
MESOSPHERE_KTRACE_CORE_MIGRATION(cur_thread.GetId(), core_id, -1, 4);
IncrementScheduledCount(std::addressof(cur_thread));
/* If there's nothing scheduled, we can try to perform a migration. */
if (priority_queue.GetScheduledFront(core_id) == nullptr) {
/* While we have a suggested thread, try to migrate it! */
KThread *suggested = priority_queue.GetSuggestedFront(core_id);
while (suggested != nullptr) {
/* Check if the suggested thread is the top thread on its core. */
const s32 suggested_core = suggested->GetActiveCore();
if (KThread *top_on_suggested_core = (suggested_core >= 0) ? priority_queue.GetScheduledFront(suggested_core) : nullptr; top_on_suggested_core != suggested) {
/* If we're allowed to do a migration, do one. */
if (top_on_suggested_core == nullptr || top_on_suggested_core->GetPriority() >= HighestCoreMigrationAllowedPriority) {
suggested->SetActiveCore(core_id);
priority_queue.ChangeCore(suggested_core, suggested);
MESOSPHERE_KTRACE_CORE_MIGRATION(suggested->GetId(), suggested_core, core_id, 5);
IncrementScheduledCount(suggested);
}
/* Regardless of whether we migrated, we had a candidate, so we're done. */
break;
}
/* Get the next suggestion. */
suggested = priority_queue.GetSuggestedNext(core_id, suggested);
}
/* If the suggestion is different from the current thread, we need to perform an update. */
if (suggested != std::addressof(cur_thread)) {
SetSchedulerUpdateNeeded();
} else {
/* Otherwise, set the thread's yield count so that we won't waste work until the process is scheduled again. */
cur_thread.SetYieldScheduleCount(cur_process.GetScheduledCount());
}
} else {
/* Otherwise, we have an update to perform. */
SetSchedulerUpdateNeeded();
}
}
}
}
}
#pragma GCC pop_options
| 30,104
|
C++
|
.cpp
| 507
| 44.475345
| 206
| 0.584833
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,960
|
kern_k_resource_limit.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_resource_limit.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr s64 DefaultTimeout = ams::svc::Tick(TimeSpan::FromSeconds(10));
}
void KResourceLimit::Initialize() {
m_waiter_count = 0;
std::memset(m_limit_values, 0, sizeof(m_limit_values));
std::memset(m_current_values, 0, sizeof(m_current_values));
std::memset(m_current_hints, 0, sizeof(m_current_hints));
std::memset(m_peak_values, 0, sizeof(m_peak_values));
}
void KResourceLimit::Finalize() {
/* ... */
}
s64 KResourceLimit::GetLimitValue(ams::svc::LimitableResource which) const {
MESOSPHERE_ASSERT_THIS();
s64 value;
{
KScopedLightLock lk(m_lock);
value = m_limit_values[which];
MESOSPHERE_ASSERT(value >= 0);
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
}
return value;
}
s64 KResourceLimit::GetCurrentValue(ams::svc::LimitableResource which) const {
MESOSPHERE_ASSERT_THIS();
s64 value;
{
KScopedLightLock lk(m_lock);
value = m_current_values[which];
MESOSPHERE_ASSERT(value >= 0);
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
}
return value;
}
s64 KResourceLimit::GetPeakValue(ams::svc::LimitableResource which) const {
MESOSPHERE_ASSERT_THIS();
s64 value;
{
KScopedLightLock lk(m_lock);
value = m_peak_values[which];
MESOSPHERE_ASSERT(value >= 0);
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
}
return value;
}
s64 KResourceLimit::GetFreeValue(ams::svc::LimitableResource which) const {
MESOSPHERE_ASSERT_THIS();
s64 value;
{
KScopedLightLock lk(m_lock);
MESOSPHERE_ASSERT(m_current_values[which] >= 0);
MESOSPHERE_ASSERT(m_current_values[which] <= m_peak_values[which]);
MESOSPHERE_ASSERT(m_peak_values[which] <= m_limit_values[which]);
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
value = m_limit_values[which] - m_current_values[which];
}
return value;
}
Result KResourceLimit::SetLimitValue(ams::svc::LimitableResource which, s64 value) {
MESOSPHERE_ASSERT_THIS();
KScopedLightLock lk(m_lock);
R_UNLESS(m_current_values[which] <= value, svc::ResultInvalidState());
m_limit_values[which] = value;
m_peak_values[which] = m_current_values[which];
R_SUCCEED();
}
void KResourceLimit::Add(ams::svc::LimitableResource which, s64 value) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KTargetSystem::IsDynamicResourceLimitsEnabled());
KScopedLightLock lk(m_lock);
/* Check that this is a true increase. */
MESOSPHERE_ABORT_UNLESS(value > 0);
/* Check that we can perform an increase. */
MESOSPHERE_ABORT_UNLESS(m_current_values[which] <= m_peak_values[which]);
MESOSPHERE_ABORT_UNLESS(m_peak_values[which] <= m_limit_values[which]);
MESOSPHERE_ABORT_UNLESS(m_current_hints[which] <= m_current_values[which]);
/* Check that the increase doesn't cause an overflow. */
const auto increased_limit = m_limit_values[which] + value;
const auto increased_current = m_current_values[which] + value;
const auto increased_hint = m_current_hints[which] + value;
MESOSPHERE_ABORT_UNLESS(m_limit_values[which] < increased_limit);
MESOSPHERE_ABORT_UNLESS(m_current_values[which] < increased_current);
MESOSPHERE_ABORT_UNLESS(m_current_hints[which] < increased_hint);
/* Add the value. */
m_limit_values[which] = increased_limit;
m_current_values[which] = increased_current;
m_current_hints[which] = increased_hint;
/* Update our peak. */
m_peak_values[which] = std::max(m_peak_values[which], increased_current);
}
bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value) {
return this->Reserve(which, value, KHardwareTimer::GetTick() + DefaultTimeout);
}
bool KResourceLimit::Reserve(ams::svc::LimitableResource which, s64 value, s64 timeout) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(value >= 0);
KScopedLightLock lk(m_lock);
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
if (m_current_hints[which] >= m_limit_values[which]) {
return false;
}
/* Loop until we reserve or run out of time. */
while (true) {
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
/* If we would overflow, don't allow to succeed. */
if (m_current_values[which] + value <= m_current_values[which]) {
break;
}
if (m_current_values[which] + value <= m_limit_values[which]) {
m_current_values[which] += value;
m_current_hints[which] += value;
m_peak_values[which] = std::max(m_peak_values[which], m_current_values[which]);
return true;
}
if (m_current_hints[which] + value <= m_limit_values[which] && (timeout < 0 || KHardwareTimer::GetTick() < timeout)) {
m_waiter_count++;
m_cond_var.Wait(std::addressof(m_lock), timeout, false);
m_waiter_count--;
if (GetCurrentThread().IsTerminationRequested()) {
return false;
}
} else {
break;
}
}
return false;
}
void KResourceLimit::Release(ams::svc::LimitableResource which, s64 value) {
this->Release(which, value, value);
}
void KResourceLimit::Release(ams::svc::LimitableResource which, s64 value, s64 hint) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(value >= 0);
MESOSPHERE_ASSERT(hint >= 0);
KScopedLightLock lk(m_lock);
MESOSPHERE_ASSERT(m_current_values[which] <= m_limit_values[which]);
MESOSPHERE_ASSERT(m_current_hints[which] <= m_current_values[which]);
MESOSPHERE_ASSERT(value <= m_current_values[which]);
MESOSPHERE_ASSERT(hint <= m_current_hints[which]);
m_current_values[which] -= value;
m_current_hints[which] -= hint;
if (m_waiter_count != 0) {
m_cond_var.Broadcast();
}
}
}
| 7,866
|
C++
|
.cpp
| 171
| 36.678363
| 130
| 0.620261
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,961
|
kern_k_scoped_disable_dispatch.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_scoped_disable_dispatch.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
KScopedDisableDispatch::~KScopedDisableDispatch() {
if (GetCurrentThread().GetDisableDispatchCount() <= 1) {
Kernel::GetScheduler().RescheduleCurrentCore();
} else {
GetCurrentThread().EnableDispatch();
}
}
}
| 951
|
C++
|
.cpp
| 25
| 34.12
| 76
| 0.719393
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,962
|
kern_k_memory_block_manager.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_memory_block_manager.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr const std::pair<KMemoryState, const char *> MemoryStateNames[] = {
{KMemoryState_Free , "----- Free -----"},
{KMemoryState_IoMemory , "IoMemory "},
{KMemoryState_IoRegister , "IoRegister "},
{KMemoryState_Static , "Static "},
{KMemoryState_Code , "Code "},
{KMemoryState_CodeData , "CodeData "},
{KMemoryState_Normal , "Normal "},
{KMemoryState_Shared , "Shared "},
{KMemoryState_AliasCode , "AliasCode "},
{KMemoryState_AliasCodeData , "AliasCodeData "},
{KMemoryState_Ipc , "Ipc "},
{KMemoryState_Stack , "Stack "},
{KMemoryState_ThreadLocal , "ThreadLocal "},
{KMemoryState_Transfered , "Transfered "},
{KMemoryState_SharedTransfered , "SharedTransfered"},
{KMemoryState_SharedCode , "SharedCode "},
{KMemoryState_Inaccessible , "Inaccessible "},
{KMemoryState_NonSecureIpc , "NonSecureIpc "},
{KMemoryState_NonDeviceIpc , "NonDeviceIpc "},
{KMemoryState_Kernel , "Kernel "},
{KMemoryState_GeneratedCode , "GeneratedCode "},
{KMemoryState_CodeOut , "CodeOut "},
{KMemoryState_Coverage , "Coverage "},
};
constexpr const char *GetMemoryStateName(KMemoryState state) {
for (size_t i = 0; i < util::size(MemoryStateNames); i++) {
if (std::get<0>(MemoryStateNames[i]) == state) {
return std::get<1>(MemoryStateNames[i]);
}
}
return "Unknown ";
}
constexpr const char *GetMemoryPermissionString(const KMemoryBlock &block) {
if (block.GetState() == KMemoryState_Free) {
return " ";
} else {
switch (block.GetPermission()) {
case KMemoryPermission_UserReadExecute:
return "r-x";
case KMemoryPermission_UserRead:
return "r--";
case KMemoryPermission_UserReadWrite:
return "rw-";
default:
return "---";
}
}
}
void DumpMemoryBlock(const KMemoryBlock &block) {
const char *state = GetMemoryStateName(block.GetState());
const char *perm = GetMemoryPermissionString(block);
const uintptr_t start = GetInteger(block.GetAddress());
const uintptr_t end = GetInteger(block.GetLastAddress());
const size_t kb = block.GetSize() / 1_KB;
const char l = (block.GetAttribute() & KMemoryAttribute_Locked) ? 'L' : '-';
const char i = (block.GetAttribute() & KMemoryAttribute_IpcLocked) ? 'I' : '-';
const char d = (block.GetAttribute() & KMemoryAttribute_DeviceShared) ? 'D' : '-';
const char u = (block.GetAttribute() & KMemoryAttribute_Uncached) ? 'U' : '-';
MESOSPHERE_LOG("0x%10lx - 0x%10lx (%9zu KB) %s %s %c%c%c%c [%d, %d]\n", start, end, kb, perm, state, l, i, d, u, block.GetIpcLockCount(), block.GetDeviceUseCount());
}
}
Result KMemoryBlockManager::Initialize(KProcessAddress st, KProcessAddress nd, KMemoryBlockSlabManager *slab_manager) {
/* Allocate a block to encapsulate the address space, insert it into the tree. */
KMemoryBlock *start_block = slab_manager->Allocate();
R_UNLESS(start_block != nullptr, svc::ResultOutOfResource());
/* Set our start and end. */
m_start_address = st;
m_end_address = nd;
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(m_start_address), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(m_end_address), PageSize));
/* Initialize and insert the block. */
start_block->Initialize(m_start_address, (m_end_address - m_start_address) / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None);
m_memory_block_tree.insert(*start_block);
R_SUCCEED();
}
void KMemoryBlockManager::Finalize(KMemoryBlockSlabManager *slab_manager) {
/* Erase every block until we have none left. */
auto it = m_memory_block_tree.begin();
while (it != m_memory_block_tree.end()) {
KMemoryBlock *block = std::addressof(*it);
it = m_memory_block_tree.erase(it);
slab_manager->Free(block);
}
MESOSPHERE_ASSERT(m_memory_block_tree.empty());
}
KProcessAddress KMemoryBlockManager::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
if (num_pages > 0) {
const KProcessAddress region_end = region_start + region_num_pages * PageSize;
const KProcessAddress region_last = region_end - 1;
for (const_iterator it = this->FindIterator(region_start); it != m_memory_block_tree.cend(); it++) {
if (region_last < it->GetAddress()) {
break;
}
if (it->GetState() != KMemoryState_Free) {
continue;
}
KProcessAddress area = (it->GetAddress() <= GetInteger(region_start)) ? region_start : it->GetAddress();
area += guard_pages * PageSize;
const KProcessAddress offset_area = util::AlignDown(GetInteger(area), alignment) + offset;
area = (area <= offset_area) ? offset_area : offset_area + alignment;
const KProcessAddress area_end = area + num_pages * PageSize + guard_pages * PageSize;
const KProcessAddress area_last = area_end - 1;
if (GetInteger(it->GetAddress()) <= GetInteger(area) && area < area_last && area_last <= region_last && GetInteger(area_last) <= GetInteger(it->GetLastAddress())) {
return area;
}
}
}
return Null<KProcessAddress>;
}
void KMemoryBlockManager::CoalesceForUpdate(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages) {
/* Find the iterator now that we've updated. */
iterator it = this->FindIterator(address);
if (address != m_start_address) {
it--;
}
/* Coalesce blocks that we can. */
while (true) {
iterator prev = it++;
if (it == m_memory_block_tree.end()) {
break;
}
if (prev->CanMergeWith(*it)) {
KMemoryBlock *block = std::addressof(*it);
m_memory_block_tree.erase(it);
prev->Add(*block);
allocator->Free(block);
it = prev;
}
if (address + num_pages * PageSize < it->GetEndAddress()) {
break;
}
}
}
void KMemoryBlockManager::Update(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
MESOSPHERE_ASSERT((attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0);
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
if (it->HasProperties(state, perm, attr)) {
/* If we already have the right properties, just advance. */
if (cur_address + remaining_size < it->GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
cur_address = it->GetEndAddress();
}
} else {
/* If we need to, create a new block before and insert it. */
if (it->GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_address = it->GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (it->GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
}
/* Update block state. */
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateIfMatch(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, KMemoryState test_state, KMemoryPermission test_perm, KMemoryAttribute test_attr, KMemoryState state, KMemoryPermission perm, KMemoryAttribute attr, KMemoryBlockDisableMergeAttribute set_disable_attr, KMemoryBlockDisableMergeAttribute clear_disable_attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
MESOSPHERE_ASSERT((attr & (KMemoryAttribute_IpcLocked | KMemoryAttribute_DeviceShared)) == 0);
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
if (it->HasProperties(test_state, test_perm, test_attr) && !it->HasProperties(state, perm, attr)) {
/* If we need to, create a new block before and insert it. */
if (it->GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_address = it->GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (it->GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
}
/* Update block state. */
it->Update(state, perm, attr, it->GetAddress() == address, set_disable_attr, clear_disable_attr);
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
} else {
/* If we already have the right properties, just advance. */
if (cur_address + remaining_size < it->GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
cur_address = it->GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateLock(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, MemoryBlockLockFunction lock_func, KMemoryPermission perm) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
const KProcessAddress end_address = address + (num_pages * PageSize);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
/* If we need to, create a new block before and insert it. */
if (it->GetAddress() != cur_address) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_address = it->GetAddress();
}
if (it->GetSize() > remaining_size) {
/* If we need to, create a new block after and insert it. */
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
}
/* Call the locked update function. */
(std::addressof(*it)->*lock_func)(perm, it->GetAddress() == address, it->GetEndAddress() == end_address);
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
void KMemoryBlockManager::UpdateAttribute(KMemoryBlockManagerUpdateAllocator *allocator, KProcessAddress address, size_t num_pages, u32 mask, u32 attr) {
/* Ensure for auditing that we never end up with an invalid tree. */
KScopedMemoryBlockManagerAuditor auditor(this);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(address), PageSize));
KProcessAddress cur_address = address;
size_t remaining_pages = num_pages;
iterator it = this->FindIterator(address);
while (remaining_pages > 0) {
const size_t remaining_size = remaining_pages * PageSize;
if ((it->GetAttribute() & mask) != attr) {
/* If we need to, create a new block before and insert it. */
if (it->GetAddress() != GetInteger(cur_address)) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address);
it = m_memory_block_tree.insert(*new_block);
it++;
cur_address = it->GetAddress();
}
/* If we need to, create a new block after and insert it. */
if (it->GetSize() > remaining_size) {
KMemoryBlock *new_block = allocator->Allocate();
it->Split(new_block, cur_address + remaining_size);
it = m_memory_block_tree.insert(*new_block);
}
/* Update block state. */
it->UpdateAttribute(mask, attr);
cur_address += it->GetSize();
remaining_pages -= it->GetNumPages();
} else {
/* If we already have the right attributes, just advance. */
if (cur_address + remaining_size < it->GetEndAddress()) {
remaining_pages = 0;
cur_address += remaining_size;
} else {
remaining_pages = (cur_address + remaining_size - it->GetEndAddress()) / PageSize;
cur_address = it->GetEndAddress();
}
}
it++;
}
this->CoalesceForUpdate(allocator, address, num_pages);
}
/* Debug. */
bool KMemoryBlockManager::CheckState() const {
/* If we fail, we should dump blocks. */
auto dump_guard = SCOPE_GUARD { this->DumpBlocks(); };
/* Loop over every block, ensuring that we are sorted and coalesced. */
auto it = m_memory_block_tree.cbegin();
auto prev = it++;
while (it != m_memory_block_tree.cend()) {
/* Sequential blocks which can be merged should be merged. */
if (prev->CanMergeWith(*it)) {
return false;
}
/* Sequential blocks should be sequential. */
if (prev->GetEndAddress() != it->GetAddress()) {
return false;
}
/* If the block is ipc locked, it must have a count. */
if ((it->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && it->GetIpcLockCount() == 0) {
return false;
}
/* If the block is device shared, it must have a count. */
if ((it->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && it->GetDeviceUseCount() == 0) {
return false;
}
/* Advance the iterator. */
prev = it++;
}
/* Our loop will miss checking the last block, potentially, so check it. */
if (prev != m_memory_block_tree.cend()) {
/* If the block is ipc locked, it must have a count. */
if ((prev->GetAttribute() & KMemoryAttribute_IpcLocked) != 0 && prev->GetIpcLockCount() == 0) {
return false;
}
/* If the block is device shared, it must have a count. */
if ((prev->GetAttribute() & KMemoryAttribute_DeviceShared) != 0 && prev->GetDeviceUseCount() == 0) {
return false;
}
}
/* We're valid, so no need to print. */
dump_guard.Cancel();
return true;
}
void KMemoryBlockManager::DumpBlocks() const {
/* Dump each block. */
for (const auto &block : m_memory_block_tree) {
DumpMemoryBlock(block);
}
}
}
| 19,534
|
C++
|
.cpp
| 361
| 40.963989
| 390
| 0.560425
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,963
|
kern_k_io_region.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_io_region.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KIoRegion::Initialize(KIoPool *pool, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
MESOSPHERE_ASSERT_THIS();
/* Set fields. */
m_physical_address = phys_addr;
m_size = size;
m_mapping = mapping;
m_permission = perm;
m_pool = pool;
m_is_mapped = false;
/* Add ourselves to our pool. */
R_TRY(m_pool->AddIoRegion(this));
/* Open a reference to our pool. */
m_pool->Open();
/* Mark ourselves as initialized. */
m_is_initialized = true;
R_SUCCEED();
}
void KIoRegion::Finalize() {
/* Remove ourselves from our pool. */
m_pool->RemoveIoRegion(this);
/* Close our reference to our pool. */
m_pool->Close();
}
Result KIoRegion::Map(KProcessAddress address, size_t size, ams::svc::MemoryPermission map_perm) {
MESOSPHERE_ASSERT_THIS();
/* Check that the desired perm is allowable. */
R_UNLESS((m_permission | map_perm) == m_permission, svc::ResultInvalidNewMemoryPermission());
/* Check that the size is correct. */
R_UNLESS(size == m_size, svc::ResultInvalidSize());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Check that we're not already mapped. */
R_UNLESS(!m_is_mapped, svc::ResultInvalidState());
/* Map ourselves. */
R_TRY(GetCurrentProcess().GetPageTable().MapIoRegion(address, m_physical_address, size, m_mapping, map_perm));
/* Add ourselves to the current process. */
GetCurrentProcess().AddIoRegion(this);
/* Note that we're mapped. */
m_is_mapped = true;
R_SUCCEED();
}
Result KIoRegion::Unmap(KProcessAddress address, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Check that the size is correct. */
R_UNLESS(size == m_size, svc::ResultInvalidSize());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Unmap ourselves. */
R_TRY(GetCurrentProcess().GetPageTable().UnmapIoRegion(address, m_physical_address, size, m_mapping));
/* Remove ourselves from the current process. */
GetCurrentProcess().RemoveIoRegion(this);
/* Note that we're unmapped. */
m_is_mapped = false;
R_SUCCEED();
}
}
| 3,136
|
C++
|
.cpp
| 73
| 35.821918
| 156
| 0.631544
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,964
|
kern_k_synchronization_object.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_synchronization_object.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class ThreadQueueImplForKSynchronizationObjectWait final : public KThreadQueueWithoutEndWait {
private:
using ThreadListNode = KSynchronizationObject::ThreadListNode;
private:
KSynchronizationObject **m_objects;
ThreadListNode *m_nodes;
s32 m_count;
public:
constexpr ThreadQueueImplForKSynchronizationObjectWait(KSynchronizationObject **o, ThreadListNode *n, s32 c) : m_objects(o), m_nodes(n), m_count(c) { /* ... */ }
virtual void NotifyAvailable(KThread *waiting_thread, KSynchronizationObject *signaled_object, Result wait_result) override {
/* Determine the sync index, and unlink all nodes. */
s32 sync_index = -1;
for (auto i = 0; i < m_count; ++i) {
/* Check if this is the signaled object. */
if (m_objects[i] == signaled_object && sync_index == -1) {
sync_index = i;
}
/* Unlink the current node from the current object. */
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
}
/* Set the waiting thread's sync index. */
waiting_thread->SetSyncedIndex(sync_index);
/* Set the waiting thread as not cancellable. */
waiting_thread->ClearCancellable();
/* Invoke the base end wait handler. */
KThreadQueue::EndWait(waiting_thread, wait_result);
}
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Remove all nodes from our list. */
for (auto i = 0; i < m_count; ++i) {
m_objects[i]->UnlinkNode(std::addressof(m_nodes[i]));
}
/* Set the waiting thread as not cancellable. */
waiting_thread->ClearCancellable();
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
void KSynchronizationObject::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* If auditing, ensure that the object has no waiters. */
#if defined(MESOSPHERE_BUILD_FOR_AUDITING)
{
KScopedSchedulerLock sl;
for (auto *cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
KThread *thread = cur_node->thread;
MESOSPHERE_LOG("KSynchronizationObject::Finalize(%p) with %p (id=%ld) waiting.\n", this, thread, thread->GetId());
}
}
#endif
/* NOTE: In Nintendo's kernel, the following is virtual and called here. */
/* this->OnFinalizeSynchronizationObject(); */
}
Result KSynchronizationObject::Wait(s32 *out_index, KSynchronizationObject **objects, const s32 num_objects, s64 timeout) {
/* Allocate space on stack for thread nodes. */
ThreadListNode *thread_nodes = static_cast<ThreadListNode *>(__builtin_alloca(sizeof(ThreadListNode) * num_objects));
/* Prepare for wait. */
KThread *thread = GetCurrentThreadPointer();
KHardwareTimer *timer;
ThreadQueueImplForKSynchronizationObjectWait wait_queue(objects, thread_nodes, num_objects);
{
/* Setup the scheduling lock and sleep. */
KScopedSchedulerLockAndSleep slp(std::addressof(timer), thread, timeout);
/* Check if the thread should terminate. */
if (thread->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Check if any of the objects are already signaled. */
for (auto i = 0; i < num_objects; ++i) {
MESOSPHERE_ASSERT(objects[i] != nullptr);
if (objects[i]->IsSignaled()) {
*out_index = i;
slp.CancelSleep();
R_SUCCEED();
}
}
/* Check if the timeout is zero. */
if (timeout == 0) {
slp.CancelSleep();
R_THROW(svc::ResultTimedOut());
}
/* Check if waiting was canceled. */
if (thread->IsWaitCancelled()) {
slp.CancelSleep();
thread->ClearWaitCancelled();
R_THROW(svc::ResultCancelled());
}
/* Add the waiters. */
for (auto i = 0; i < num_objects; ++i) {
thread_nodes[i].thread = thread;
thread_nodes[i].next = nullptr;
objects[i]->LinkNode(std::addressof(thread_nodes[i]));
}
/* Mark the thread as cancellable. */
thread->SetCancellable();
/* Clear the thread's synced index. */
thread->SetSyncedIndex(-1);
/* Wait for an object to be signaled. */
wait_queue.SetHardwareTimer(timer);
thread->BeginWait(std::addressof(wait_queue));
}
/* Set the output index. */
*out_index = thread->GetSyncedIndex();
/* Get the wait result. */
R_RETURN(thread->GetWaitResult());
}
void KSynchronizationObject::NotifyAvailable(Result result) {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock sl;
/* If we're not signaled, we've nothing to notify. */
if (!this->IsSignaled()) {
return;
}
/* Iterate over each thread. */
for (auto *cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
cur_node->thread->NotifyAvailable(this, result);
}
}
void KSynchronizationObject::DumpWaiters() {
MESOSPHERE_ASSERT_THIS();
/* If debugging, dump the list of waiters. */
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
{
KScopedSchedulerLock sl;
MESOSPHERE_RELEASE_LOG("Threads waiting on %p:\n", this);
for (auto *cur_node = m_thread_list_head; cur_node != nullptr; cur_node = cur_node->next) {
KThread *thread = cur_node->thread;
if (KProcess *process = thread->GetOwnerProcess(); process != nullptr) {
MESOSPHERE_RELEASE_LOG(" %p tid=%ld pid=%ld (%s)\n", thread, thread->GetId(), process->GetId(), process->GetName());
} else {
MESOSPHERE_RELEASE_LOG(" %p tid=%ld (Kernel)\n", thread, thread->GetId());
}
}
/* If we didn't have any waiters, print so. */
if (m_thread_list_head == nullptr) {
MESOSPHERE_RELEASE_LOG(" None\n");
}
}
#endif
}
}
| 7,775
|
C++
|
.cpp
| 161
| 35.31677
| 177
| 0.557184
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,965
|
kern_k_server_port.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_server_port.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KServerPort::Initialize(KPort *parent) {
/* Set member variables. */
m_parent = parent;
}
bool KServerPort::IsLight() const {
return this->GetParent()->IsLight();
}
void KServerPort::CleanupSessions() {
/* Ensure our preconditions are met. */
if (this->IsLight()) {
MESOSPHERE_ASSERT(m_session_list.empty());
} else {
MESOSPHERE_ASSERT(m_light_session_list.empty());
}
/* Cleanup the session list. */
while (true) {
/* Get the last session in the list. */
KServerSession *session = nullptr;
{
KScopedSchedulerLock sl;
if (!m_session_list.empty()) {
session = std::addressof(m_session_list.front());
m_session_list.pop_front();
}
}
/* Close the session. */
if (session != nullptr) {
session->Close();
} else {
break;
}
}
/* Cleanup the light session list. */
while (true) {
/* Get the last session in the list. */
KLightServerSession *session = nullptr;
{
KScopedSchedulerLock sl;
if (!m_light_session_list.empty()) {
session = std::addressof(m_light_session_list.front());
m_light_session_list.pop_front();
}
}
/* Close the session. */
if (session != nullptr) {
session->Close();
} else {
break;
}
}
}
void KServerPort::Destroy() {
/* Note with our parent that we're closed. */
m_parent->OnServerClosed();
/* Perform necessary cleanup of our session lists. */
this->CleanupSessions();
/* Close our reference to our parent. */
m_parent->Close();
}
bool KServerPort::IsSignaled() const {
MESOSPHERE_ASSERT_THIS();
if (this->IsLight()) {
return !m_light_session_list.empty();
} else {
return !m_session_list.empty();
}
}
void KServerPort::EnqueueSession(KServerSession *session) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(!this->IsLight());
KScopedSchedulerLock sl;
/* Add the session to our queue. */
m_session_list.push_back(*session);
if (m_session_list.size() == 1) {
this->NotifyAvailable();
}
}
void KServerPort::EnqueueSession(KLightServerSession *session) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->IsLight());
KScopedSchedulerLock sl;
/* Add the session to our queue. */
m_light_session_list.push_back(*session);
if (m_light_session_list.size() == 1) {
this->NotifyAvailable();
}
}
KServerSession *KServerPort::AcceptSession() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(!this->IsLight());
KScopedSchedulerLock sl;
/* Return the first session in the list. */
if (m_session_list.empty()) {
return nullptr;
}
KServerSession *session = std::addressof(m_session_list.front());
m_session_list.pop_front();
return session;
}
KLightServerSession *KServerPort::AcceptLightSession() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this->IsLight());
KScopedSchedulerLock sl;
/* Return the first session in the list. */
if (m_light_session_list.empty()) {
return nullptr;
}
KLightServerSession *session = std::addressof(m_light_session_list.front());
m_light_session_list.pop_front();
return session;
}
}
| 4,567
|
C++
|
.cpp
| 129
| 26.023256
| 84
| 0.570392
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,966
|
kern_debug_log_impl.board.nintendo_nx.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_debug_log_impl.board.nintendo_nx.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_debug_log_impl.hpp"
namespace ams::kern {
#if defined(MESOSPHERE_DEBUG_LOG_USE_UART)
namespace {
constexpr bool DoSaveAndRestore = false;
enum UartRegister {
UartRegister_THR = 0,
UartRegister_IER = 1,
UartRegister_FCR = 2,
UartRegister_LCR = 3,
UartRegister_LSR = 5,
UartRegister_IRDA_CSR = 8,
UartRegister_DLL = 0,
UartRegister_DLH = 1,
};
KVirtualAddress g_uart_address = 0;
[[maybe_unused]] constinit u32 g_saved_registers[5];
ALWAYS_INLINE u32 ReadUartRegister(UartRegister which) {
return GetPointer<volatile u32>(g_uart_address)[which];
}
ALWAYS_INLINE void WriteUartRegister(UartRegister which, u32 value) {
GetPointer<volatile u32>(g_uart_address)[which] = value;
}
}
bool KDebugLogImpl::Initialize() {
/* Get the uart memory region. */
const KMemoryRegion *uart_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_Uart);
if (uart_region == nullptr) {
return false;
}
/* Set the uart register base address. */
g_uart_address = uart_region->GetPairAddress();
if (g_uart_address == Null<KVirtualAddress>) {
return false;
}
/* NOTE: We assume here that UART init/config has been done by the Secure Monitor. */
/* As such, we only need to disable interrupts. */
WriteUartRegister(UartRegister_IER, 0x00);
return true;
}
void KDebugLogImpl::PutChar(char c) {
while (ReadUartRegister(UartRegister_LSR) & 0x100) {
/* While the FIFO is full, yield. */
cpu::Yield();
}
WriteUartRegister(UartRegister_THR, c);
cpu::DataSynchronizationBarrier();
}
void KDebugLogImpl::Flush() {
while ((ReadUartRegister(UartRegister_LSR) & 0x40) == 0) {
/* Wait for the TMTY bit to be one (transmit empty). */
}
}
void KDebugLogImpl::Save() {
if constexpr (DoSaveAndRestore) {
/* Save LCR, IER, FCR. */
g_saved_registers[0] = ReadUartRegister(UartRegister_LCR);
g_saved_registers[1] = ReadUartRegister(UartRegister_IER);
g_saved_registers[2] = ReadUartRegister(UartRegister_FCR);
/* Set Divisor Latch Access bit, to allow access to DLL/DLH */
WriteUartRegister(UartRegister_LCR, 0x80);
ReadUartRegister(UartRegister_LCR);
/* Save DLL/DLH. */
g_saved_registers[3] = ReadUartRegister(UartRegister_DLL);
g_saved_registers[4] = ReadUartRegister(UartRegister_DLH);
/* Restore Divisor Latch Access bit. */
WriteUartRegister(UartRegister_LCR, g_saved_registers[0]);
ReadUartRegister(UartRegister_LCR);
}
}
void KDebugLogImpl::Restore() {
if constexpr (DoSaveAndRestore) {
/* Set Divisor Latch Access bit, to allow access to DLL/DLH */
WriteUartRegister(UartRegister_LCR, 0x80);
ReadUartRegister(UartRegister_LCR);
/* Restore DLL/DLH. */
WriteUartRegister(UartRegister_DLL, g_saved_registers[3]);
WriteUartRegister(UartRegister_DLH, g_saved_registers[4]);
ReadUartRegister(UartRegister_DLH);
/* Restore Divisor Latch Access bit. */
WriteUartRegister(UartRegister_LCR, g_saved_registers[0]);
ReadUartRegister(UartRegister_LCR);
/* Restore IER and FCR. */
WriteUartRegister(UartRegister_IER, g_saved_registers[1]);
WriteUartRegister(UartRegister_FCR, g_saved_registers[2] | 2);
WriteUartRegister(UartRegister_IRDA_CSR, 0x02);
ReadUartRegister(UartRegister_FCR);
}
}
#elif defined(MESOSPHERE_DEBUG_LOG_USE_IRAM_RINGBUFFER)
namespace {
constinit KVirtualAddress g_debug_iram_address = 0;
constexpr size_t RingBufferSize = 0x5000;
constinit uintptr_t g_offset = 0;
constinit u8 g_saved_buffer[RingBufferSize];
}
bool KDebugLogImpl::Initialize() {
/* Set the base address. */
g_debug_iram_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsIram) + 0x38000;
std::memset(GetVoidPointer(g_debug_iram_address), 0xFF, RingBufferSize);
return true;
}
void KDebugLogImpl::PutChar(char c) {
GetPointer<char>(g_debug_iram_address)[g_offset++] = c;
if (g_offset == RingBufferSize) {
g_offset = 0;
}
}
void KDebugLogImpl::Flush() {
/* ... */
}
void KDebugLogImpl::Save() {
std::memcpy(g_saved_buffer, GetVoidPointer(g_debug_iram_address), RingBufferSize);
}
void KDebugLogImpl::Restore() {
std::memcpy(GetVoidPointer(g_debug_iram_address), g_saved_buffer, RingBufferSize);
}
#else
#error "Unknown Debug UART device!"
#endif
}
| 5,753
|
C++
|
.cpp
| 137
| 33.350365
| 129
| 0.639742
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,967
|
kern_k_client_port.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_client_port.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KClientPort::Initialize(KPort *parent, s32 max_sessions) {
/* Set member variables. */
m_num_sessions = 0;
m_peak_sessions = 0;
m_parent = parent;
m_max_sessions = max_sessions;
}
void KClientPort::OnSessionFinalized() {
KScopedSchedulerLock sl;
if (const auto prev = m_num_sessions--; prev == m_max_sessions) {
this->NotifyAvailable();
}
}
void KClientPort::OnServerClosed() {
MESOSPHERE_ASSERT_THIS();
}
bool KClientPort::IsLight() const {
return this->GetParent()->IsLight();
}
bool KClientPort::IsServerClosed() const {
return this->GetParent()->IsServerClosed();
}
void KClientPort::Destroy() {
/* Note with our parent that we're closed. */
m_parent->OnClientClosed();
/* Close our reference to our parent. */
m_parent->Close();
}
bool KClientPort::IsSignaled() const {
MESOSPHERE_ASSERT_THIS();
return m_num_sessions.Load() < m_max_sessions;
}
Result KClientPort::CreateSession(KClientSession **out) {
MESOSPHERE_ASSERT_THIS();
/* Declare the session we're going to allocate. */
KSession *session;
/* Reserve a new session from the resource limit. */
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
if (session_reservation.Succeeded()) {
/* Allocate a session normally. */
session = KSession::Create();
} else {
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
/* Try to allocate a session from unused slab memory. */
session = KSession::CreateFromUnusedSlabMemory();
R_UNLESS(session != nullptr, svc::ResultLimitReached());
ON_RESULT_FAILURE { session->Close(); };
/* We want to add two KSessionRequests to the heap, to prevent request exhaustion. */
for (size_t i = 0; i < 2; ++i) {
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
R_UNLESS(request != nullptr, svc::ResultLimitReached());
request->Close();
}
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
}
/* Check that we successfully created a session. */
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
/* Update the session counts. */
{
ON_RESULT_FAILURE { session->Close(); };
/* Atomically increment the number of sessions. */
s32 new_sessions;
{
const auto max = m_max_sessions;
auto cur_sessions = m_num_sessions.Load();
do {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1;
} while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
}
/* Atomically update the peak session tracking. */
{
auto peak = m_peak_sessions.Load();
do {
if (peak >= new_sessions) {
break;
}
} while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
}
}
/* Initialize the session. */
session->Initialize(this, m_parent->GetName());
/* Commit the session reservation. */
session_reservation.Commit();
/* Register the session. */
KSession::Register(session);
ON_RESULT_FAILURE {
session->GetClientSession().Close();
session->GetServerSession().Close();
};
/* Enqueue the session with our parent. */
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
/* We succeeded, so set the output. */
*out = std::addressof(session->GetClientSession());
R_SUCCEED();
}
Result KClientPort::CreateLightSession(KLightClientSession **out) {
MESOSPHERE_ASSERT_THIS();
/* Declare the session we're going to allocate. */
KLightSession *session;
/* Reserve a new session from the resource limit. */
KScopedResourceReservation session_reservation(GetCurrentProcessPointer(), ams::svc::LimitableResource_SessionCountMax);
if (session_reservation.Succeeded()) {
/* Allocate a session normally. */
session = KLightSession::Create();
} else {
/* We couldn't reserve a session. Check that we support dynamically expanding the resource limit. */
R_UNLESS(GetCurrentProcess().GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
/* Try to allocate a session from unused slab memory. */
session = KLightSession::CreateFromUnusedSlabMemory();
R_UNLESS(session != nullptr, svc::ResultLimitReached());
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
}
/* Check that we successfully created a session. */
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
/* Update the session counts. */
{
ON_RESULT_FAILURE { session->Close(); };
/* Atomically increment the number of sessions. */
s32 new_sessions;
{
const auto max = m_max_sessions;
auto cur_sessions = m_num_sessions.Load();
do {
R_UNLESS(cur_sessions < max, svc::ResultOutOfSessions());
new_sessions = cur_sessions + 1;
} while (!m_num_sessions.CompareExchangeWeak<std::memory_order_relaxed>(cur_sessions, new_sessions));
}
/* Atomically update the peak session tracking. */
{
auto peak = m_peak_sessions.Load();
do {
if (peak >= new_sessions) {
break;
}
} while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
}
}
/* Initialize the session. */
session->Initialize(this, m_parent->GetName());
/* Commit the session reservation. */
session_reservation.Commit();
/* Register the session. */
KLightSession::Register(session);
ON_RESULT_FAILURE {
session->GetClientSession().Close();
session->GetServerSession().Close();
};
/* Enqueue the session with our parent. */
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
/* We succeeded, so set the output. */
*out = std::addressof(session->GetClientSession());
R_SUCCEED();
}
}
| 8,453
|
C++
|
.cpp
| 177
| 37.158192
| 140
| 0.59915
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,968
|
kern_k_memory_layout.board.qemu_virt.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_memory_layout.board.qemu_virt.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr size_t ReservedEarlyDramSize = 0x00080000;
template<typename... T> requires (std::same_as<T, KMemoryRegionAttr> && ...)
constexpr ALWAYS_INLINE KMemoryRegionType GetMemoryRegionType(KMemoryRegionType base, T... attr) {
return util::FromUnderlying<KMemoryRegionType>(util::ToUnderlying(base) | (util::ToUnderlying<T>(attr) | ...));
}
void InsertPoolPartitionRegionIntoBothTrees(size_t start, size_t size, KMemoryRegionType phys_type, KMemoryRegionType virt_type, u32 &cur_attr) {
const u32 attr = cur_attr++;
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(start, size, phys_type, attr));
const KMemoryRegion *phys = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(phys_type, attr);
MESOSPHERE_INIT_ABORT_UNLESS(phys != nullptr);
MESOSPHERE_INIT_ABORT_UNLESS(phys->GetEndAddress() != 0);
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetVirtualMemoryRegionTree().Insert(phys->GetPairAddress(), size, virt_type, attr));
}
}
namespace init {
void SetupDevicePhysicalMemoryRegions() {
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x08000000, 0x10000, GetMemoryRegionType(KMemoryRegionType_InterruptDistributor, KMemoryRegionAttr_ShouldKernelMap)));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(0x08010000, 0x10000, GetMemoryRegionType(KMemoryRegionType_InterruptCpuInterface, KMemoryRegionAttr_ShouldKernelMap)));
}
void SetupDramPhysicalMemoryRegions() {
const size_t intended_memory_size = KSystemControl::Init::GetIntendedMemorySize();
const KPhysicalAddress physical_memory_base_address = KSystemControl::Init::GetKernelPhysicalBaseAddress(ams::kern::MainMemoryAddress);
/* Insert blocks into the tree. */
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), intended_memory_size, KMemoryRegionType_Dram));
MESOSPHERE_INIT_ABORT_UNLESS(KMemoryLayout::GetPhysicalMemoryRegionTree().Insert(GetInteger(physical_memory_base_address), ReservedEarlyDramSize, KMemoryRegionType_DramReservedEarly));
}
void SetupPoolPartitionMemoryRegions() {
/* Start by identifying the extents of the DRAM memory region. */
const auto dram_extents = KMemoryLayout::GetMainMemoryPhysicalExtents();
MESOSPHERE_INIT_ABORT_UNLESS(dram_extents.GetEndAddress() != 0);
/* Find the pool partitions region. */
const KMemoryRegion *pool_partitions_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindByTypeAndAttribute(KMemoryRegionType_DramPoolPartition, 0);
MESOSPHERE_INIT_ABORT_UNLESS(pool_partitions_region != nullptr);
const uintptr_t pool_partitions_start = pool_partitions_region->GetAddress();
/* Determine the end of the pool region. */
const uintptr_t pool_end = pool_partitions_region->GetEndAddress();
MESOSPHERE_INIT_ABORT_UNLESS(pool_end == dram_extents.GetEndAddress());
/* Find the start of the kernel DRAM region. */
const KMemoryRegion *kernel_dram_region = KMemoryLayout::GetPhysicalMemoryRegionTree().FindFirstDerived(KMemoryRegionType_DramKernelBase);
MESOSPHERE_INIT_ABORT_UNLESS(kernel_dram_region != nullptr);
/* Setup the pool partition layouts. */
/* Get Application and Applet pool sizes. */
const size_t application_pool_size = KSystemControl::Init::GetApplicationPoolSize();
const size_t applet_pool_size = KSystemControl::Init::GetAppletPoolSize();
const size_t unsafe_system_pool_min_size = KSystemControl::Init::GetMinimumNonSecureSystemPoolSize();
/* Decide on starting addresses for our pools. */
const uintptr_t application_pool_start = pool_end - application_pool_size;
const uintptr_t applet_pool_start = application_pool_start - applet_pool_size;
const uintptr_t unsafe_system_pool_start = util::AlignDown(applet_pool_start - unsafe_system_pool_min_size, PageSize);
const size_t unsafe_system_pool_size = applet_pool_start - unsafe_system_pool_start;
/* We want to arrange application pool depending on where the middle of dram is. */
const uintptr_t dram_midpoint = (dram_extents.GetAddress() + dram_extents.GetEndAddress()) / 2;
u32 cur_pool_attr = 0;
size_t total_overhead_size = 0;
/* Insert the application pool. */
if (application_pool_size > 0) {
if (dram_extents.GetEndAddress() <= dram_midpoint || dram_midpoint <= application_pool_start) {
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(application_pool_size);
} else {
const size_t first_application_pool_size = dram_midpoint - application_pool_start;
const size_t second_application_pool_size = application_pool_start + application_pool_size - dram_midpoint;
InsertPoolPartitionRegionIntoBothTrees(application_pool_start, first_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
InsertPoolPartitionRegionIntoBothTrees(dram_midpoint, second_application_pool_size, KMemoryRegionType_DramApplicationPool, KMemoryRegionType_VirtualDramApplicationPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(first_application_pool_size);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(second_application_pool_size);
}
}
/* Insert the applet pool. */
if (applet_pool_size > 0) {
InsertPoolPartitionRegionIntoBothTrees(applet_pool_start, applet_pool_size, KMemoryRegionType_DramAppletPool, KMemoryRegionType_VirtualDramAppletPool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(applet_pool_size);
}
/* Insert the nonsecure system pool. */
if (unsafe_system_pool_size > 0) {
InsertPoolPartitionRegionIntoBothTrees(unsafe_system_pool_start, unsafe_system_pool_size, KMemoryRegionType_DramSystemNonSecurePool, KMemoryRegionType_VirtualDramSystemNonSecurePool, cur_pool_attr);
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize(unsafe_system_pool_size);
}
/* Determine final total overhead size. */
total_overhead_size += KMemoryManager::CalculateManagementOverheadSize((unsafe_system_pool_start - pool_partitions_start) - total_overhead_size);
/* NOTE: Nintendo's kernel has layout [System, Management] but we have [Management, System]. This ensures the four UserPool regions are contiguous. */
/* Insert the system pool. */
const uintptr_t system_pool_start = pool_partitions_start + total_overhead_size;
const size_t system_pool_size = unsafe_system_pool_start - system_pool_start;
InsertPoolPartitionRegionIntoBothTrees(system_pool_start, system_pool_size, KMemoryRegionType_DramSystemPool, KMemoryRegionType_VirtualDramSystemPool, cur_pool_attr);
/* Insert the pool management region. */
const uintptr_t pool_management_start = pool_partitions_start;
const size_t pool_management_size = total_overhead_size;
u32 pool_management_attr = 0;
InsertPoolPartitionRegionIntoBothTrees(pool_management_start, pool_management_size, KMemoryRegionType_DramPoolManagement, KMemoryRegionType_VirtualDramPoolManagement, pool_management_attr);
}
}
}
| 9,017
|
C++
|
.cpp
| 111
| 69.702703
| 214
| 0.707062
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,969
|
kern_k_worker_task_manager.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_worker_task_manager.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class ThreadQueueImplForKWorkerTaskManager final : public KThreadQueue {
private:
KThread **m_waiting_thread;
public:
constexpr ThreadQueueImplForKWorkerTaskManager(KThread **t) : KThreadQueue(), m_waiting_thread(t) { /* ... */ }
virtual void EndWait(KThread *waiting_thread, Result wait_result) override {
/* Clear our waiting thread. */
*m_waiting_thread = nullptr;
/* Invoke the base end wait handler. */
KThreadQueue::EndWait(waiting_thread, wait_result);
}
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
MESOSPHERE_UNUSED(waiting_thread, wait_result, cancel_timer_task);
MESOSPHERE_PANIC("ThreadQueueImplForKWorkerTaskManager::CancelWait\n");
}
};
}
void KWorkerTask::DoWorkerTask() {
if (auto * const thread = this->DynamicCast<KThread *>(); thread != nullptr) {
return thread->DoWorkerTaskImpl();
} else {
auto * const process = this->DynamicCast<KProcess *>();
MESOSPHERE_ABORT_UNLESS(process != nullptr);
return process->DoWorkerTaskImpl();
}
}
void KWorkerTaskManager::Initialize(s32 priority) {
/* Reserve a thread from the system limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
/* Create a new thread. */
KThread *thread = KThread::Create();
MESOSPHERE_ABORT_UNLESS(thread != nullptr);
/* Launch the new thread. */
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(thread, ThreadFunction, reinterpret_cast<uintptr_t>(this), priority, cpu::NumCores - 1));
/* Register the new thread. */
KThread::Register(thread);
/* Run the thread. */
thread->Run();
}
void KWorkerTaskManager::AddTask(WorkerType type, KWorkerTask *task) {
MESOSPHERE_ASSERT(type <= WorkerType_Count);
Kernel::GetWorkerTaskManager(type).AddTask(task);
}
void KWorkerTaskManager::ThreadFunction(uintptr_t arg) {
reinterpret_cast<KWorkerTaskManager *>(arg)->ThreadFunctionImpl();
}
void KWorkerTaskManager::ThreadFunctionImpl() {
/* Create wait queue. */
ThreadQueueImplForKWorkerTaskManager wait_queue(std::addressof(m_waiting_thread));
while (true) {
KWorkerTask *task;
/* Get a worker task. */
{
KScopedSchedulerLock sl;
task = this->GetTask();
if (task == nullptr) {
/* Wait to have a task. */
m_waiting_thread = GetCurrentThreadPointer();
GetCurrentThread().BeginWait(std::addressof(wait_queue));
continue;
}
}
/* Do the task. */
task->DoWorkerTask();
/* Destroy any objects we may need to close. */
GetCurrentThread().DestroyClosedObjects();
}
}
KWorkerTask *KWorkerTaskManager::GetTask() {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
KWorkerTask *next = m_head_task;
if (next != nullptr) {
/* Advance the list. */
if (m_head_task == m_tail_task) {
m_head_task = nullptr;
m_tail_task = nullptr;
} else {
m_head_task = m_head_task->GetNextTask();
}
/* Clear the next task's next. */
next->SetNextTask(nullptr);
}
return next;
}
void KWorkerTaskManager::AddTask(KWorkerTask *task) {
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(task->GetNextTask() == nullptr);
/* Insert the task. */
if (m_tail_task) {
m_tail_task->SetNextTask(task);
m_tail_task = task;
} else {
m_head_task = task;
m_tail_task = task;
/* Make ourselves active if we need to. */
if (m_waiting_thread != nullptr) {
m_waiting_thread->EndWait(ResultSuccess());
}
}
}
}
| 5,087
|
C++
|
.cpp
| 119
| 32.252101
| 155
| 0.594611
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,970
|
kern_initial_process.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_initial_process.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
struct InitialProcessInfo {
KProcess *process;
size_t stack_size;
s32 priority;
};
constinit KPhysicalAddress g_initial_process_binary_phys_addr = Null<KPhysicalAddress>;
constinit KVirtualAddress g_initial_process_binary_address = Null<KVirtualAddress>;
constinit size_t g_initial_process_binary_size = 0;
constinit InitialProcessBinaryHeader g_initial_process_binary_header = {};
constinit size_t g_initial_process_secure_memory_size = 0;
constinit u64 g_initial_process_id_min = std::numeric_limits<u64>::max();
constinit u64 g_initial_process_id_max = std::numeric_limits<u64>::min();
void LoadInitialProcessBinaryHeader() {
if (g_initial_process_binary_header.magic != InitialProcessBinaryMagic) {
/* Get the virtual address. */
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null<KPhysicalAddress>);
const KVirtualAddress virt_addr = KMemoryLayout::GetLinearVirtualAddress(g_initial_process_binary_phys_addr);
/* Copy and validate the header. */
g_initial_process_binary_header = *GetPointer<InitialProcessBinaryHeader>(virt_addr);
MESOSPHERE_ABORT_UNLESS(g_initial_process_binary_header.magic == InitialProcessBinaryMagic);
MESOSPHERE_ABORT_UNLESS(g_initial_process_binary_header.num_processes <= init::GetSlabResourceCounts().num_KProcess);
/* Set the image address. */
g_initial_process_binary_address = virt_addr;
/* Process/calculate the secure memory size. */
KVirtualAddress current = g_initial_process_binary_address + sizeof(InitialProcessBinaryHeader);
const KVirtualAddress end = g_initial_process_binary_address + g_initial_process_binary_header.size;
const size_t num_processes = g_initial_process_binary_header.num_processes;
for (size_t i = 0; i < num_processes; ++i) {
/* Validate that we can read the current KIP. */
MESOSPHERE_ABORT_UNLESS(current <= end - sizeof(KInitialProcessHeader));
/* Attach to the current KIP. */
KInitialProcessReader reader;
KVirtualAddress data = reader.Attach(current);
MESOSPHERE_ABORT_UNLESS(data != Null<KVirtualAddress>);
/* If the process uses secure memory, account for that. */
if (reader.UsesSecureMemory()) {
g_initial_process_secure_memory_size += reader.GetSize() + util::AlignUp(reader.GetStackSize(), PageSize);
}
/* Advance to the next KIP. */
current = data + reader.GetBinarySize();
}
}
}
void CreateProcesses(InitialProcessInfo *infos) {
/* Determine process image extents. */
KVirtualAddress current = g_initial_process_binary_address + sizeof(InitialProcessBinaryHeader);
KVirtualAddress end = g_initial_process_binary_address + g_initial_process_binary_header.size;
/* Decide on pools to use. */
const auto unsafe_pool = static_cast<KMemoryManager::Pool>(KSystemControl::GetCreateProcessMemoryPool());
const auto secure_pool = (GetTargetFirmware() >= TargetFirmware_2_0_0) ? KMemoryManager::Pool_Secure : unsafe_pool;
const size_t num_processes = g_initial_process_binary_header.num_processes;
for (size_t i = 0; i < num_processes; ++i) {
/* Validate that we can read the current KIP header. */
MESOSPHERE_ABORT_UNLESS(current <= end - sizeof(KInitialProcessHeader));
/* Attach to the current kip. */
KInitialProcessReader reader;
KVirtualAddress data = reader.Attach(current);
MESOSPHERE_ABORT_UNLESS(data != Null<KVirtualAddress>);
/* Ensure that the remainder of our parse is page aligned. */
if (!util::IsAligned(GetInteger(data), PageSize)) {
const KVirtualAddress aligned_data = util::AlignDown(GetInteger(data), PageSize);
std::memmove(GetVoidPointer(aligned_data), GetVoidPointer(data), end - data);
data = aligned_data;
end -= (data - aligned_data);
}
/* If we crossed a page boundary, free the pages we're done using. */
if (KVirtualAddress aligned_current = util::AlignDown(GetInteger(current), PageSize); aligned_current != data) {
const size_t freed_size = data - aligned_current;
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(aligned_current), freed_size / PageSize);
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, freed_size);
}
/* Parse process parameters. */
ams::svc::CreateProcessParameter params;
MESOSPHERE_R_ABORT_UNLESS(reader.MakeCreateProcessParameter(std::addressof(params), true));
/* Get the binary size for the kip. */
const size_t binary_size = reader.GetBinarySize();
const size_t binary_pages = binary_size / PageSize;
/* Get the pool for both the current (compressed) image, and the decompressed process. */
const auto src_pool = Kernel::GetMemoryManager().GetPool(KMemoryLayout::GetLinearPhysicalAddress(data));
const auto dst_pool = reader.UsesSecureMemory() ? secure_pool : unsafe_pool;
/* Determine the process size, and how much memory isn't already reserved. */
const size_t process_size = params.code_num_pages * PageSize;
const size_t unreserved_size = process_size - (src_pool == dst_pool ? util::AlignDown(binary_size, PageSize) : 0);
/* Reserve however much memory we need to reserve. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, unreserved_size));
/* Create the process. */
KProcess *new_process = nullptr;
{
/* Make page groups to represent the data. */
KPageGroup pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
KPageGroup workaround_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
/* Populate the page group to represent the data. */
{
/* Allocate the previously unreserved pages. */
KPageGroup unreserve_pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(unreserve_pg), unreserved_size / PageSize, 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Add the previously reserved pages. */
if (src_pool == dst_pool && binary_pages != 0) {
/* NOTE: Nintendo does not check the result of this operation. */
pg.AddBlock(KMemoryLayout::GetLinearPhysicalAddress(data), binary_pages);
}
/* Add the previously unreserved pages. */
for (const auto &block : unreserve_pg) {
/* NOTE: Nintendo does not check the result of this operation. */
pg.AddBlock(block.GetAddress(), block.GetNumPages());
}
}
MESOSPHERE_ABORT_UNLESS(pg.GetNumPages() == static_cast<size_t>(params.code_num_pages));
/* Ensure that we do not leak pages. */
KPageGroup *process_pg = std::addressof(pg);
ON_SCOPE_EXIT { process_pg->Close(); };
/* Load the process. */
reader.Load(pg, data);
/* If necessary, close/release the aligned part of the data we just loaded. */
if (const size_t aligned_bin_size = util::AlignDown(binary_size, PageSize); aligned_bin_size != 0 && src_pool != dst_pool) {
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(data), aligned_bin_size / PageSize);
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_bin_size);
}
/* Create a KProcess object. */
new_process = KProcess::Create();
MESOSPHERE_ABORT_UNLESS(new_process != nullptr);
/* Ensure the page group is usable for the process. */
/* If the pool is the same, we need to use the workaround page group. */
if (src_pool == dst_pool) {
/* Allocate a new, usable group for the process. */
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetMemoryManager().AllocateAndOpen(std::addressof(workaround_pg), static_cast<size_t>(params.code_num_pages), 1, KMemoryManager::EncodeOption(dst_pool, KMemoryManager::Direction_FromFront)));
/* Copy data from the working page group to the usable one. */
auto work_it = pg.begin();
MESOSPHERE_ABORT_UNLESS(work_it != pg.end());
{
auto work_address = work_it->GetAddress();
auto work_remaining = work_it->GetNumPages();
for (const auto &block : workaround_pg) {
auto block_address = block.GetAddress();
auto block_remaining = block.GetNumPages();
while (block_remaining > 0) {
if (work_remaining == 0) {
++work_it;
work_address = work_it->GetAddress();
work_remaining = work_it->GetNumPages();
}
const size_t cur_pages = std::min(block_remaining, work_remaining);
const size_t cur_size = cur_pages * PageSize;
std::memcpy(GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(block_address)), GetVoidPointer(KMemoryLayout::GetLinearVirtualAddress(work_address)), cur_size);
block_address += cur_size;
work_address += cur_size;
block_remaining -= cur_pages;
work_remaining -= cur_pages;
}
}
++work_it;
}
MESOSPHERE_ABORT_UNLESS(work_it == pg.end());
/* We want to use the new page group. */
process_pg = std::addressof(workaround_pg);
pg.Close();
}
/* Initialize the process. */
MESOSPHERE_R_ABORT_UNLESS(new_process->Initialize(params, *process_pg, reader.GetCapabilities(), reader.GetNumCapabilities(), std::addressof(Kernel::GetSystemResourceLimit()), dst_pool, reader.IsImmortal()));
}
/* Set the process's memory permissions. */
MESOSPHERE_R_ABORT_UNLESS(reader.SetMemoryPermissions(new_process->GetPageTable(), params));
/* Register the process. */
KProcess::Register(new_process);
/* Set the ideal core id. */
new_process->SetIdealCoreId(reader.GetIdealCoreId());
/* Save the process info. */
infos[i].process = new_process;
infos[i].stack_size = reader.GetStackSize();
infos[i].priority = reader.GetPriority();
/* Advance the reader. */
current = data + binary_size;
}
/* Release remaining memory used by the image. */
{
const size_t remaining_size = util::AlignUp(GetInteger(g_initial_process_binary_address) + g_initial_process_binary_header.size, PageSize) - util::AlignDown(GetInteger(current), PageSize);
const size_t remaining_pages = remaining_size / PageSize;
Kernel::GetMemoryManager().Close(KMemoryLayout::GetLinearPhysicalAddress(util::AlignDown(GetInteger(current), PageSize)), remaining_pages);
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_PhysicalMemoryMax, remaining_size);
}
}
}
void SetInitialProcessBinaryPhysicalAddress(KPhysicalAddress phys_addr, size_t size) {
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr == Null<KPhysicalAddress>);
g_initial_process_binary_phys_addr = phys_addr;
g_initial_process_binary_size = size;
}
KPhysicalAddress GetInitialProcessBinaryPhysicalAddress() {
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null<KPhysicalAddress>);
return g_initial_process_binary_phys_addr;
}
size_t GetInitialProcessBinarySize() {
MESOSPHERE_INIT_ABORT_UNLESS(g_initial_process_binary_phys_addr != Null<KPhysicalAddress>);
return g_initial_process_binary_size;
}
u64 GetInitialProcessIdMin() {
return g_initial_process_id_min;
}
u64 GetInitialProcessIdMax() {
return g_initial_process_id_max;
}
size_t GetInitialProcessesSecureMemorySize() {
LoadInitialProcessBinaryHeader();
return g_initial_process_secure_memory_size;
}
size_t CopyInitialProcessBinaryToKernelMemory() {
LoadInitialProcessBinaryHeader();
if (g_initial_process_binary_header.num_processes > 0) {
/* Ensure that we have a non-zero size. */
const size_t expected_size = g_initial_process_binary_size;
MESOSPHERE_INIT_ABORT_UNLESS(expected_size != 0);
/* Ensure that the size we need to reserve is as we expect it to be. */
const size_t total_size = util::AlignUp(g_initial_process_binary_header.size, PageSize);
MESOSPHERE_ABORT_UNLESS(total_size == expected_size);
MESOSPHERE_ABORT_UNLESS(total_size <= InitialProcessBinarySizeMax);
/* Reserve pages for the initial process binary from the system resource limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_PhysicalMemoryMax, total_size));
return total_size;
} else {
return 0;
}
}
void CreateAndRunInitialProcesses() {
/* Allocate space for the processes. */
InitialProcessInfo *infos = static_cast<InitialProcessInfo *>(__builtin_alloca(sizeof(InitialProcessInfo) * g_initial_process_binary_header.num_processes));
/* Create the processes. */
CreateProcesses(infos);
/* Determine the initial process id range. */
for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) {
const auto pid = infos[i].process->GetId();
g_initial_process_id_min = std::min(g_initial_process_id_min, pid);
g_initial_process_id_max = std::max(g_initial_process_id_max, pid);
}
/* Run the processes. */
for (size_t i = 0; i < g_initial_process_binary_header.num_processes; i++) {
MESOSPHERE_R_ABORT_UNLESS(infos[i].process->Run(infos[i].priority, infos[i].stack_size));
infos[i].process->Close();
}
}
}
| 16,998
|
C++
|
.cpp
| 258
| 49.209302
| 249
| 0.59014
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,971
|
kern_k_session.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_session.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KSession::Initialize(KClientPort *client_port, uintptr_t name) {
MESOSPHERE_ASSERT_THIS();
/* Increment reference count. */
/* Because reference count is one on creation, this will result */
/* in a reference count of two. Thus, when both server and client are closed */
/* this object will be destroyed. */
this->Open();
/* Create our sub sessions. */
KAutoObject::Create<KServerSession>(std::addressof(m_server));
KAutoObject::Create<KClientSession>(std::addressof(m_client));
/* Initialize our sub sessions. */
m_server.Initialize(this);
m_client.Initialize(this);
/* Set state and name. */
this->SetState(State::Normal);
m_name = name;
/* Set our owner process. */
m_process = GetCurrentProcessPointer();
m_process->Open();
/* Set our port. */
m_port = client_port;
if (m_port != nullptr) {
m_port->Open();
}
/* Mark initialized. */
m_initialized = true;
}
void KSession::Finalize() {
if (m_port != nullptr) {
m_port->OnSessionFinalized();
m_port->Close();
}
}
void KSession::OnServerClosed() {
MESOSPHERE_ASSERT_THIS();
if (this->GetState() == State::Normal) {
this->SetState(State::ServerClosed);
m_client.OnServerClosed();
}
}
void KSession::OnClientClosed() {
MESOSPHERE_ASSERT_THIS();
if (this->GetState() == State::Normal) {
this->SetState(State::ClientClosed);
m_server.OnClientClosed();
}
}
void KSession::PostDestroy(uintptr_t arg) {
/* Release the session count resource the owner process holds. */
KProcess *owner = reinterpret_cast<KProcess *>(arg);
owner->ReleaseResource(ams::svc::LimitableResource_SessionCountMax, 1);
owner->Close();
}
}
| 2,675
|
C++
|
.cpp
| 71
| 30.450704
| 87
| 0.62558
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,972
|
kern_k_light_session.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_light_session.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KLightSession::Initialize(KClientPort *client_port, uintptr_t name) {
MESOSPHERE_ASSERT_THIS();
/* Increment reference count. */
/* Because reference count is one on creation, this will result */
/* in a reference count of two. Thus, when both server and client are closed */
/* this object will be destroyed. */
this->Open();
/* Create our sub sessions. */
KAutoObject::Create<KLightServerSession>(std::addressof(m_server));
KAutoObject::Create<KLightClientSession>(std::addressof(m_client));
/* Initialize our sub sessions. */
m_server.Initialize(this);
m_client.Initialize(this);
/* Set state and name. */
m_state = State::Normal;
m_name = name;
/* Set our owner process. */
m_process = GetCurrentProcessPointer();
m_process->Open();
/* Set our port. */
m_port = client_port;
if (m_port != nullptr) {
m_port->Open();
}
/* Mark initialized. */
m_initialized = true;
}
void KLightSession::Finalize() {
if (m_port != nullptr) {
m_port->OnSessionFinalized();
m_port->Close();
}
}
void KLightSession::OnServerClosed() {
MESOSPHERE_ASSERT_THIS();
if (m_state == State::Normal) {
m_state = State::ServerClosed;
m_client.OnServerClosed();
}
this->Close();
}
void KLightSession::OnClientClosed() {
MESOSPHERE_ASSERT_THIS();
if (m_state == State::Normal) {
m_state = State::ClientClosed;
m_server.OnClientClosed();
}
this->Close();
}
void KLightSession::PostDestroy(uintptr_t arg) {
/* Release the session count resource the owner process holds. */
KProcess *owner = reinterpret_cast<KProcess *>(arg);
owner->ReleaseResource(ams::svc::LimitableResource_SessionCountMax, 1);
owner->Close();
}
}
| 2,722
|
C++
|
.cpp
| 73
| 29.986301
| 87
| 0.624097
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,973
|
kern_k_device_address_space.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_device_address_space.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
/* Static initializer. */
void KDeviceAddressSpace::Initialize() {
/* This just forwards to the device page table manager. */
KDevicePageTable::Initialize();
}
/* Member functions. */
Result KDeviceAddressSpace::Initialize(u64 address, u64 size) {
MESOSPHERE_ASSERT_THIS();
/* Initialize the device page table. */
R_TRY(m_table.Initialize(address, size));
/* Set member variables. */
m_space_address = address;
m_space_size = size;
m_is_initialized = true;
R_SUCCEED();
}
void KDeviceAddressSpace::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Finalize the table. */
m_table.Finalize();
}
Result KDeviceAddressSpace::Attach(ams::svc::DeviceName device_name) {
/* Lock the address space. */
KScopedLightLock lk(m_lock);
/* Attach. */
R_RETURN(m_table.Attach(device_name, m_space_address, m_space_size));
}
Result KDeviceAddressSpace::Detach(ams::svc::DeviceName device_name) {
/* Lock the address space. */
KScopedLightLock lk(m_lock);
/* Detach. */
R_RETURN(m_table.Detach(device_name));
}
Result KDeviceAddressSpace::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, u32 option, bool is_aligned) {
/* Check that the address falls within the space. */
R_UNLESS((m_space_address <= device_address && device_address + size - 1 <= m_space_address + m_space_size - 1), svc::ResultInvalidCurrentMemory());
/* Decode the option. */
const util::BitPack32 option_pack = { option };
const auto device_perm = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Permission>();
const auto flags = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Flags>();
const auto reserved = option_pack.Get<ams::svc::MapDeviceAddressSpaceOption::Reserved>();
/* Validate the option. */
/* TODO: It is likely that this check for flags == none is only on NX board. */
R_UNLESS(flags == ams::svc::MapDeviceAddressSpaceFlag_None, svc::ResultInvalidEnumValue());
R_UNLESS(reserved == 0, svc::ResultInvalidEnumValue());
/* Lock the address space. */
KScopedLightLock lk(m_lock);
/* Lock the page table to prevent concurrent device mapping operations. */
KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
/* Lock the pages. */
bool is_io{};
R_TRY(page_table->LockForMapDeviceAddressSpace(std::addressof(is_io), process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned, true));
/* Ensure that if we fail, we don't keep unmapped pages locked. */
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
/* Check that the io status is allowable. */
if (is_io) {
R_UNLESS((flags & ams::svc::MapDeviceAddressSpaceFlag_NotIoRegister) == 0, svc::ResultInvalidCombination());
}
/* Map the pages. */
{
/* Perform the mapping. */
R_TRY(m_table.Map(page_table, process_address, size, device_address, device_perm, is_aligned, is_io));
/* Ensure that we unmap the pages if we fail to update the protections. */
/* NOTE: Nintendo does not check the result of this unmap call. */
ON_RESULT_FAILURE { m_table.Unmap(device_address, size); };
/* Update the protections in accordance with how much we mapped. */
R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size));
}
/* We succeeded. */
R_SUCCEED();
}
Result KDeviceAddressSpace::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address) {
/* Check that the address falls within the space. */
R_UNLESS((m_space_address <= device_address && device_address + size - 1 <= m_space_address + m_space_size - 1), svc::ResultInvalidCurrentMemory());
/* Lock the address space. */
KScopedLightLock lk(m_lock);
/* Lock the page table to prevent concurrent device mapping operations. */
KScopedLightLock pt_lk = page_table->AcquireDeviceMapLock();
/* Lock the pages. */
R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size, true));
/* Unmap the pages. */
{
/* If we fail to unmap, we want to do a partial unlock. */
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size)); };
/* Perform the unmap. */
R_TRY(m_table.Unmap(page_table, process_address, size, device_address));
}
/* Unlock the pages. */
MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size));
R_SUCCEED();
}
}
| 5,771
|
C++
|
.cpp
| 109
| 44.981651
| 163
| 0.652869
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,974
|
kern_k_address_space_info.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_address_space_info.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr uintptr_t Invalid = std::numeric_limits<uintptr_t>::max();
constinit KAddressSpaceInfo AddressSpaceInfos[] = {
{ 32, ams::svc::AddressSmallMap32Start, ams::svc::AddressSmallMap32Size, KAddressSpaceInfo::Type_MapSmall, },
{ 32, ams::svc::AddressLargeMap32Start, ams::svc::AddressLargeMap32Size, KAddressSpaceInfo::Type_MapLarge, },
{ 32, Invalid, ams::svc::AddressMemoryRegionHeap32Size, KAddressSpaceInfo::Type_Heap, },
{ 32, Invalid, ams::svc::AddressMemoryRegionAlias32Size, KAddressSpaceInfo::Type_Alias, },
{ 36, ams::svc::AddressSmallMap36Start, ams::svc::AddressSmallMap36Size, KAddressSpaceInfo::Type_MapSmall, },
{ 36, ams::svc::AddressLargeMap36Start, ams::svc::AddressLargeMap36Size, KAddressSpaceInfo::Type_MapLarge, },
{ 36, Invalid, ams::svc::AddressMemoryRegionHeap36Size, KAddressSpaceInfo::Type_Heap, },
{ 36, Invalid, ams::svc::AddressMemoryRegionAlias36Size, KAddressSpaceInfo::Type_Alias, },
{ 39, ams::svc::AddressMap39Start, ams::svc::AddressMap39Size, KAddressSpaceInfo::Type_Map39Bit, },
{ 39, Invalid, ams::svc::AddressMemoryRegionSmall39Size, KAddressSpaceInfo::Type_MapSmall, },
{ 39, Invalid, ams::svc::AddressMemoryRegionHeap39Size, KAddressSpaceInfo::Type_Heap, },
{ 39, Invalid, ams::svc::AddressMemoryRegionAlias39Size, KAddressSpaceInfo::Type_Alias, },
{ 39, Invalid, ams::svc::AddressMemoryRegionStack39Size, KAddressSpaceInfo::Type_Stack, },
};
constexpr u8 FlagsToAddressSpaceWidthTable[4] = {
32, 36, 32, 39
};
constexpr size_t GetAddressSpaceWidth(ams::svc::CreateProcessFlag flags) {
/* Convert the input flags to an array index. */
const size_t idx = (flags & ams::svc::CreateProcessFlag_AddressSpaceMask) >> ams::svc::CreateProcessFlag_AddressSpaceShift;
MESOSPHERE_ABORT_UNLESS(idx < sizeof(FlagsToAddressSpaceWidthTable));
/* Return the width. */
return FlagsToAddressSpaceWidthTable[idx];
}
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace32Bit) == 32);
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated) == 36);
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias) == 32);
static_assert(GetAddressSpaceWidth(ams::svc::CreateProcessFlag_AddressSpace64Bit) == 39);
KAddressSpaceInfo &GetAddressSpaceInfo(size_t width, KAddressSpaceInfo::Type type) {
for (auto &info : AddressSpaceInfos) {
if (info.GetWidth() == width && info.GetType() == type) {
return info;
}
}
MESOSPHERE_PANIC("Could not find AddressSpaceInfo");
}
}
uintptr_t KAddressSpaceInfo::GetAddressSpaceStart(ams::svc::CreateProcessFlag flags, KAddressSpaceInfo::Type type) {
return GetAddressSpaceInfo(GetAddressSpaceWidth(flags), type).GetAddress();
}
size_t KAddressSpaceInfo::GetAddressSpaceSize(ams::svc::CreateProcessFlag flags, KAddressSpaceInfo::Type type) {
return GetAddressSpaceInfo(GetAddressSpaceWidth(flags), type).GetSize();
}
void KAddressSpaceInfo::SetAddressSpaceSize(size_t width, Type type, size_t size) {
GetAddressSpaceInfo(width, type).SetSize(size);
}
}
| 4,468
|
C++
|
.cpp
| 67
| 58.164179
| 135
| 0.654889
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,975
|
kern_k_interrupt_event.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_interrupt_event.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KInterruptEvent::Initialize(int32_t interrupt_name, ams::svc::InterruptType type) {
MESOSPHERE_ASSERT_THIS();
/* Verify the interrupt is defined and global. */
R_UNLESS(Kernel::GetInterruptManager().IsInterruptDefined(interrupt_name), svc::ResultOutOfRange());
R_UNLESS(Kernel::GetInterruptManager().IsGlobal(interrupt_name), svc::ResultOutOfRange());
/* Set interrupt id. */
m_interrupt_id = interrupt_name;
/* Set core id. */
m_core_id = GetCurrentCoreId();
/* Initialize readable event base. */
KReadableEvent::Initialize(nullptr);
/* Bind ourselves as the handler for our interrupt id. */
R_TRY(Kernel::GetInterruptManager().BindHandler(this, m_interrupt_id, m_core_id, KInterruptController::PriorityLevel_High, true, type == ams::svc::InterruptType_Level));
/* Mark initialized. */
m_is_initialized = true;
R_SUCCEED();
}
void KInterruptEvent::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Unbind ourselves as the handler for our interrupt id. */
Kernel::GetInterruptManager().UnbindHandler(m_interrupt_id, m_core_id);
/* Synchronize the unbind on all cores, before proceeding. */
KDpcManager::Sync();
/* Perform inherited finalization. */
KReadableEvent::Finalize();
}
Result KInterruptEvent::Reset() {
MESOSPHERE_ASSERT_THIS();
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Clear the event. */
R_TRY(KReadableEvent::Reset());
/* Clear the interrupt. */
Kernel::GetInterruptManager().ClearInterrupt(m_interrupt_id, m_core_id);
R_SUCCEED();
}
KInterruptTask *KInterruptEvent::OnInterrupt(s32 interrupt_id) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_UNUSED(interrupt_id);
return this;
}
void KInterruptEvent::DoTask() {
MESOSPHERE_ASSERT_THIS();
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Signal. */
this->Signal();
}
}
| 2,800
|
C++
|
.cpp
| 66
| 35.681818
| 177
| 0.664947
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,976
|
kern_panic.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_panic.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
extern "C" void _start();
namespace ams::result::impl {
NORETURN void OnResultAssertion(Result result) {
MESOSPHERE_PANIC("OnResultAssertion(2%03d-%04d)", result.GetModule(), result.GetDescription());
}
}
namespace ams::kern {
/* NOTE: This is not exposed via a header, but is referenced via assembly. */
/* NOTE: Nintendo does not save register contents on panic; we use this */
/* to generate an atmosphere fatal report on panic. */
constinit KExceptionContext g_panic_exception_contexts[cpu::NumCores];
namespace {
constexpr std::array<s32, cpu::NumCores> NegativeArray = [] {
std::array<s32, cpu::NumCores> arr = {};
for (size_t i = 0; i < arr.size(); i++) {
arr[i] = -1;
}
return arr;
}();
constinit util::Atomic<s32> g_next_ticket = 0;
constinit util::Atomic<s32> g_current_ticket = 0;
constinit std::array<s32, cpu::NumCores> g_core_tickets = NegativeArray;
s32 GetCoreTicket() {
const s32 core_id = GetCurrentCoreId();
if (g_core_tickets[core_id] == -1) {
g_core_tickets[core_id] = 2 * (g_next_ticket++);
}
return g_core_tickets[core_id];
}
void WaitCoreTicket() {
const s32 expected = GetCoreTicket();
const s32 desired = expected + 1;
s32 compare = g_current_ticket.Load<std::memory_order_relaxed>();
do {
if (compare == desired) {
break;
}
compare = expected;
} while (!g_current_ticket.CompareExchangeWeak(compare, desired));
}
void ReleaseCoreTicket() {
const s32 expected = GetCoreTicket() + 1;
const s32 desired = expected + 1;
s32 compare = expected;
g_current_ticket.CompareExchangeStrong(compare, desired);
}
ALWAYS_INLINE KExceptionContext *GetPanicExceptionContext(int core_id) {
#if defined(MESOSPHERE_ENABLE_PANIC_REGISTER_DUMP)
return std::addressof(g_panic_exception_contexts[core_id]);
#else
return nullptr;
#endif
}
[[gnu::unused]] void PrintCurrentState() {
/* Wait for it to be our turn to print. */
WaitCoreTicket();
/* Get the current exception context. */
const s32 core_id = GetCurrentCoreId();
const auto *core_ctx = GetPanicExceptionContext(core_id);
/* Print the state. */
MESOSPHERE_RELEASE_LOG("Core[%d] Current State:\n", core_id);
/* Print kernel state. */
#ifdef ATMOSPHERE_ARCH_ARM64
MESOSPHERE_RELEASE_LOG("Kernel Registers:\n");
for (size_t i = 0; i < 31; i++) {
MESOSPHERE_RELEASE_LOG(" X[%02zu] = %016lx\n", i, core_ctx->x[i]);
}
MESOSPHERE_RELEASE_LOG(" SP = %016lx\n", core_ctx->sp);
/* Print kernel backtrace. */
MESOSPHERE_RELEASE_LOG("Kernel Backtrace:\n");
uintptr_t fp = core_ctx != nullptr ? core_ctx->x[29] : reinterpret_cast<uintptr_t>(__builtin_frame_address(0));
for (size_t i = 0; i < 32 && fp && util::IsAligned(fp, 0x10) && cpu::GetPhysicalAddressWritable(nullptr, fp, true); i++) {
struct {
uintptr_t fp;
uintptr_t lr;
} *stack_frame = reinterpret_cast<decltype(stack_frame)>(fp);
MESOSPHERE_RELEASE_LOG(" [%02zx]: %p\n", i, reinterpret_cast<void *>(stack_frame->lr));
fp = stack_frame->fp;
}
#endif
/* Print registers and user backtrace. */
KDebug::PrintRegister();
KDebug::PrintBacktrace();
MESOSPHERE_RELEASE_LOG("\n");
/* Allow the next core to print. */
ReleaseCoreTicket();
}
NORETURN void StopSystem() {
#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING
/* Print the current core. */
PrintCurrentState();
#endif
KSystemControl::StopSystem(GetPanicExceptionContext(GetCurrentCoreId()));
}
}
NORETURN WEAK_SYMBOL void PanicImpl(const char *file, int line, const char *format, ...) {
#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING
/* Wait for it to be our turn to print. */
WaitCoreTicket();
::std::va_list vl;
va_start(vl, format);
MESOSPHERE_RELEASE_LOG("Core[%d]: Kernel Panic at %s:%d\n", GetCurrentCoreId(), file, line);
if (KProcess *cur_process = GetCurrentProcessPointer(); cur_process != nullptr) {
MESOSPHERE_RELEASE_LOG("Core[%d]: Current Process: %s\n", GetCurrentCoreId(), cur_process->GetName());
}
MESOSPHERE_RELEASE_VLOG(format, vl);
MESOSPHERE_RELEASE_LOG("\n");
va_end(vl);
#else
MESOSPHERE_UNUSED(file, line, format);
#endif
StopSystem();
}
NORETURN WEAK_SYMBOL void PanicImpl() {
StopSystem();
}
}
| 5,981
|
C++
|
.cpp
| 133
| 33.827068
| 138
| 0.569538
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,977
|
kern_main.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_main.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
template<typename F>
ALWAYS_INLINE void DoOnEachCoreInOrder(s32 core_id, F f) {
cpu::SynchronizeAllCores();
for (size_t i = 0; i < cpu::NumCores; i++) {
if (static_cast<s32>(i) == core_id) {
f();
}
cpu::SynchronizeAllCores();
}
}
}
NORETURN void HorizonKernelMain(s32 core_id) {
/* Setup the Core Local Region, and note that we're initializing. */
Kernel::InitializeCoreLocalRegion(core_id);
Kernel::SetState(Kernel::State::Initializing);
/* Ensure that all cores get to this point before proceeding. */
cpu::SynchronizeAllCores();
/* Initialize the main and idle thread for each core. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
Kernel::InitializeMainAndIdleThreads(core_id);
});
if (core_id == 0) {
/* Initialize the carveout and the system resource limit. */
KSystemControl::InitializePhase1();
/* Synchronize all cores before proceeding, to ensure access to the global rng is consistent. */
cpu::SynchronizeAllCores();
/* Initialize the memory manager and the KPageBuffer slabheap. */
{
const auto &management_region = KMemoryLayout::GetPoolManagementRegion();
MESOSPHERE_ABORT_UNLESS(management_region.GetEndAddress() != 0);
static_assert(util::size(MinimumMemoryManagerAlignmentShifts) == KMemoryManager::Pool_Count);
Kernel::GetMemoryManager().Initialize(management_region.GetAddress(), management_region.GetSize(), MinimumMemoryManagerAlignmentShifts);
}
/* Copy the Initial Process Binary to safe memory. */
CopyInitialProcessBinaryToKernelMemory();
/* Print out information about the kernel. */
Kernel::PrintLayout();
/* Initialize the KObject Slab Heaps. */
init::InitializeSlabHeaps();
/* Initialize the Dynamic Slab Heaps. */
{
const auto &pt_heap_region = KMemoryLayout::GetPageTableHeapRegion();
MESOSPHERE_ABORT_UNLESS(pt_heap_region.GetEndAddress() != 0);
Kernel::InitializeResourceManagers(pt_heap_region.GetAddress(), pt_heap_region.GetSize());
}
} else {
/* Synchronize all cores before proceeding, to ensure access to the global rng is consistent. */
cpu::SynchronizeAllCores();
}
/* Initialize the supervisor page table for each core. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
KPageTable::Initialize(core_id);
Kernel::GetKernelPageTable().Initialize(core_id);
});
/* Activate the supervisor page table for each core. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
Kernel::GetKernelPageTable().ActivateForInit();
});
/* NOTE: Kernel calls on each core a nullsub here on retail kernel. */
/* Register the main/idle threads and initialize the interrupt task manager. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
KThread::Register(std::addressof(Kernel::GetMainThread(core_id)));
KThread::Register(std::addressof(Kernel::GetIdleThread(core_id)));
});
/* Activate the scheduler and enable interrupts. */
DoOnEachCoreInOrder(core_id, [=]() ALWAYS_INLINE_LAMBDA {
Kernel::GetScheduler().Activate();
KInterruptManager::EnableInterrupts();
});
/* Initialize cpu interrupt threads. */
cpu::InitializeInterruptThreads(core_id);
/* Initialize the DPC manager. */
KDpcManager::Initialize();
cpu::SynchronizeAllCores();
/* Perform more core-0 specific initialization. */
if (core_id == 0) {
/* Initialize the exit worker managers, so that threads and processes may exit cleanly. */
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_ExitThread).Initialize(KWorkerTaskManager::ExitWorkerPriority);
Kernel::GetWorkerTaskManager(KWorkerTaskManager::WorkerType_ExitProcess).Initialize(KWorkerTaskManager::ExitWorkerPriority);
/* Setup so that we may sleep later, and reserve memory for secure applets. */
KSystemControl::InitializePhase2();
/* Initialize the SMMU. */
KDeviceAddressSpace::Initialize();
/* Load the initial processes. */
CreateAndRunInitialProcesses();
/* We're done initializing! */
Kernel::SetState(Kernel::State::Initialized);
/* Resume all threads suspended while we initialized. */
KThread::ResumeThreadsSuspendedForInit();
/* Validate that all reserved dram blocks are valid. */
for (const auto ®ion : KMemoryLayout::GetPhysicalMemoryRegionTree()) {
if (region.IsDerivedFrom(KMemoryRegionType_DramReservedBase)) {
MESOSPHERE_ABORT_UNLESS(region.GetEndAddress() != 0);
}
}
}
cpu::SynchronizeAllCores();
/* Set the current thread priority to idle. */
GetCurrentThread().SetPriorityToIdle();
/* Exit the main thread. */
{
auto &main_thread = Kernel::GetMainThread(core_id);
main_thread.Open();
main_thread.Exit();
}
/* Main() is done, and we should never get to this point. */
MESOSPHERE_PANIC("Main Thread continued after exit.");
AMS_INFINITE_LOOP();
}
}
| 6,463
|
C++
|
.cpp
| 128
| 39.929688
| 152
| 0.630852
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,978
|
kern_kernel.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_kernel.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
KDynamicPageManager g_resource_manager_page_manager;
template<typename T>
ALWAYS_INLINE void PrintMemoryRegion(const char *prefix, const T &extents) {
static_assert(std::is_same<decltype(extents.GetAddress()), uintptr_t>::value);
static_assert(std::is_same<decltype(extents.GetLastAddress()), uintptr_t>::value);
if constexpr (std::is_same<uintptr_t, unsigned int>::value) {
MESOSPHERE_LOG("%-24s0x%08x - 0x%08x\n", prefix, extents.GetAddress(), extents.GetLastAddress());
} else if constexpr (std::is_same<uintptr_t, unsigned long>::value) {
MESOSPHERE_LOG("%-24s0x%016lx - 0x%016lx\n", prefix, extents.GetAddress(), extents.GetLastAddress());
} else if constexpr (std::is_same<uintptr_t, unsigned long long>::value) {
MESOSPHERE_LOG("%-24s0x%016llx - 0x%016llx\n", prefix, extents.GetAddress(), extents.GetLastAddress());
} else {
static_assert(!std::is_same<T, T>::value, "Unknown uintptr_t width!");
}
}
}
void Kernel::InitializeCoreLocalRegion(s32 core_id) {
/* The core local region no longer exists, so just clear the current thread. */
AMS_UNUSED(core_id);
SetCurrentThread(nullptr);
}
void Kernel::InitializeMainAndIdleThreads(s32 core_id) {
/* This function wants to setup the main thread and the idle thread. */
KThread *main_thread = std::addressof(Kernel::GetMainThread(core_id));
void *main_thread_stack = GetVoidPointer(KMemoryLayout::GetMainStackTopAddress(core_id));
KThread *idle_thread = std::addressof(Kernel::GetIdleThread(core_id));
void *idle_thread_stack = GetVoidPointer(KMemoryLayout::GetIdleStackTopAddress(core_id));
KAutoObject::Create<KThread>(main_thread);
KAutoObject::Create<KThread>(idle_thread);
main_thread->Initialize(nullptr, 0, main_thread_stack, 0, KThread::MainThreadPriority, core_id, nullptr, KThread::ThreadType_Main);
idle_thread->Initialize(nullptr, 0, idle_thread_stack, 0, KThread::IdleThreadPriority, core_id, nullptr, KThread::ThreadType_Main);
/* Set the current thread to be the main thread, and we have no processes running yet. */
SetCurrentThread(main_thread);
/* Initialize the interrupt manager, hardware timer, and scheduler */
GetInterruptManager().Initialize(core_id);
GetHardwareTimer().Initialize();
GetScheduler().Initialize(idle_thread);
}
void Kernel::InitializeResourceManagers(KVirtualAddress address, size_t size) {
/* Ensure that the buffer is suitable for our use. */
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), PageSize));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
/* Ensure that we have space for our reference counts. */
const size_t rc_size = util::AlignUp(KPageTableSlabHeap::CalculateReferenceCountSize(size), PageSize);
MESOSPHERE_ABORT_UNLESS(rc_size < size);
size -= rc_size;
/* Initialize the resource managers' shared page manager. */
g_resource_manager_page_manager.Initialize(address, size, std::max<size_t>(PageSize, KPageBufferSlabHeap::BufferSize));
/* Initialize the KPageBuffer slab heap. */
KPageBuffer::InitializeSlabHeap(g_resource_manager_page_manager);
/* Initialize the fixed-size slabheaps. */
s_app_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), ApplicationMemoryBlockSlabHeapSize);
s_sys_memory_block_heap.Initialize(std::addressof(g_resource_manager_page_manager), SystemMemoryBlockSlabHeapSize);
s_block_info_heap.Initialize(std::addressof(g_resource_manager_page_manager), BlockInfoSlabHeapSize);
/* Reserve all but a fixed number of remaining pages for the page table heap. */
const size_t num_pt_pages = g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() - ReservedDynamicPageCount;
s_page_table_heap.Initialize(std::addressof(g_resource_manager_page_manager), num_pt_pages, GetPointer<KPageTableManager::RefCount>(address + size));
/* Setup the slab managers. */
KDynamicPageManager * const app_dynamic_page_manager = nullptr;
KDynamicPageManager * const sys_dynamic_page_manager = KTargetSystem::IsDynamicResourceLimitsEnabled() ? std::addressof(g_resource_manager_page_manager) : nullptr;
s_app_memory_block_manager.Initialize(app_dynamic_page_manager, std::addressof(s_app_memory_block_heap));
s_sys_memory_block_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_sys_memory_block_heap));
s_app_block_info_manager.Initialize(app_dynamic_page_manager, std::addressof(s_block_info_heap));
s_sys_block_info_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_block_info_heap));
s_app_page_table_manager.Initialize(app_dynamic_page_manager, std::addressof(s_page_table_heap));
s_sys_page_table_manager.Initialize(sys_dynamic_page_manager, std::addressof(s_page_table_heap));
/* Check that we have the correct number of dynamic pages available. */
MESOSPHERE_ABORT_UNLESS(g_resource_manager_page_manager.GetCount() - g_resource_manager_page_manager.GetUsed() == ReservedDynamicPageCount);
/* Create the system page table managers. */
KAutoObject::Create<KSystemResource>(std::addressof(s_app_system_resource));
KAutoObject::Create<KSystemResource>(std::addressof(s_sys_system_resource));
/* Set the managers for the system resources. */
s_app_system_resource.SetManagers(s_app_memory_block_manager, s_app_block_info_manager, s_app_page_table_manager);
s_sys_system_resource.SetManagers(s_sys_memory_block_manager, s_sys_block_info_manager, s_sys_page_table_manager);
}
void Kernel::PrintLayout() {
const auto target_fw = kern::GetTargetFirmware();
/* Print out the kernel version. */
MESOSPHERE_LOG("Horizon Kernel (Mesosphere)\n");
MESOSPHERE_LOG("Built: %s %s\n", __DATE__, __TIME__);
MESOSPHERE_LOG("Atmosphere version: %d.%d.%d-%s\n", ATMOSPHERE_RELEASE_VERSION, ATMOSPHERE_GIT_REVISION);
MESOSPHERE_LOG("Target Firmware: %d.%d.%d\n", (target_fw >> 24) & 0xFF, (target_fw >> 16) & 0xFF, (target_fw >> 8) & 0xFF);
MESOSPHERE_LOG("Supported OS version: %d.%d.%d\n", ATMOSPHERE_SUPPORTED_HOS_VERSION_MAJOR, ATMOSPHERE_SUPPORTED_HOS_VERSION_MINOR, ATMOSPHERE_SUPPORTED_HOS_VERSION_MICRO);
MESOSPHERE_LOG("\n");
/* Print relative memory usage. */
const auto [total, kernel] = KMemoryLayout::GetTotalAndKernelMemorySizes();
MESOSPHERE_LOG("Kernel Memory Usage: %zu/%zu MB\n", util::AlignUp(kernel, 1_MB) / 1_MB, util::AlignUp(total, 1_MB) / 1_MB);
MESOSPHERE_LOG("\n");
/* Print out important memory layout regions. */
MESOSPHERE_LOG("Virtual Memory Layout\n");
PrintMemoryRegion(" KernelRegion", KMemoryLayout::GetKernelRegionExtents());
PrintMemoryRegion(" Code", KMemoryLayout::GetKernelCodeRegionExtents());
PrintMemoryRegion(" Stack", KMemoryLayout::GetKernelStackRegionExtents());
PrintMemoryRegion(" Misc", KMemoryLayout::GetKernelMiscRegionExtents());
PrintMemoryRegion(" Slab", KMemoryLayout::GetKernelSlabRegionExtents());
PrintMemoryRegion(" LinearRegion", KMemoryLayout::GetLinearRegionVirtualExtents());
MESOSPHERE_LOG("\n");
MESOSPHERE_LOG("Physical Memory Layout\n");
PrintMemoryRegion(" LinearRegion", KMemoryLayout::GetLinearRegionPhysicalExtents());
PrintMemoryRegion(" CarveoutRegion", KMemoryLayout::GetCarveoutRegionExtents());
MESOSPHERE_LOG("\n");
PrintMemoryRegion(" KernelRegion", KMemoryLayout::GetKernelRegionPhysicalExtents());
PrintMemoryRegion(" Code", KMemoryLayout::GetKernelCodeRegionPhysicalExtents());
PrintMemoryRegion(" Slab", KMemoryLayout::GetKernelSlabRegionPhysicalExtents());
if constexpr (KSystemControl::SecureAppletMemorySize > 0) {
PrintMemoryRegion(" SecureApplet", KMemoryLayout::GetKernelSecureAppletMemoryRegionPhysicalExtents());
}
PrintMemoryRegion(" PageTableHeap", KMemoryLayout::GetKernelPageTableHeapRegionPhysicalExtents());
PrintMemoryRegion(" InitPageTable", KMemoryLayout::GetKernelInitPageTableRegionPhysicalExtents());
if constexpr (IsKTraceEnabled) {
MESOSPHERE_LOG(" DebugRegion\n");
PrintMemoryRegion(" Trace Buffer", KMemoryLayout::GetKernelTraceBufferRegionPhysicalExtents());
}
PrintMemoryRegion(" MemoryPoolRegion", KMemoryLayout::GetKernelPoolPartitionRegionPhysicalExtents());
if (GetTargetFirmware() >= TargetFirmware_5_0_0) {
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
PrintMemoryRegion(" System", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
if (KMemoryLayout::HasKernelSystemNonSecurePoolRegion()) {
PrintMemoryRegion(" SystemUnsafe", KMemoryLayout::GetKernelSystemNonSecurePoolRegionPhysicalExtents());
}
if (KMemoryLayout::HasKernelAppletPoolRegion()) {
PrintMemoryRegion(" Applet", KMemoryLayout::GetKernelAppletPoolRegionPhysicalExtents());
}
if (KMemoryLayout::HasKernelApplicationPoolRegion()) {
PrintMemoryRegion(" Application", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
}
} else {
PrintMemoryRegion(" Management", KMemoryLayout::GetKernelPoolManagementRegionPhysicalExtents());
PrintMemoryRegion(" Secure", KMemoryLayout::GetKernelSystemPoolRegionPhysicalExtents());
PrintMemoryRegion(" Unsafe", KMemoryLayout::GetKernelApplicationPoolRegionPhysicalExtents());
}
MESOSPHERE_LOG("\n");
}
}
| 11,106
|
C++
|
.cpp
| 152
| 64.085526
| 181
| 0.685458
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,979
|
kern_debug_log_impl.board.qemu_virt.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_debug_log_impl.board.qemu_virt.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_debug_log_impl.hpp"
namespace ams::kern {
#if defined(MESOSPHERE_DEBUG_LOG_USE_SEMIHOSTING)
bool KDebugLogImpl::Initialize() {
return true;
}
void KDebugLogImpl::PutChar(char c) {
/* TODO */
AMS_UNUSED(c);
}
void KDebugLogImpl::Flush() {
/* ... */
}
void KDebugLogImpl::Save() {
/* ... */
}
void KDebugLogImpl::Restore() {
/* ... */
}
#else
#error "Unknown Debug device!"
#endif
}
| 1,160
|
C++
|
.cpp
| 39
| 25.74359
| 76
| 0.676577
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,980
|
kern_k_session_request.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_session_request.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KSessionRequest::SessionMappings::PushMap(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state, size_t index) {
/* At most 15 buffers of each type (4-bit descriptor counts). */
MESOSPHERE_ASSERT(index < NumMappings);
/* Get the mapping. */
Mapping *mapping;
if (index < NumStaticMappings) {
mapping = std::addressof(m_static_mappings[index]);
} else {
/* Allocate dynamic mappings as necessary. */
if (m_dynamic_mappings == nullptr) {
m_dynamic_mappings = DynamicMappings::Allocate();
R_UNLESS(m_dynamic_mappings != nullptr, svc::ResultOutOfMemory());
}
mapping = std::addressof(m_dynamic_mappings->Get(index - NumStaticMappings));
}
/* Set the mapping. */
mapping->Set(client, server, size, state);
R_SUCCEED();
}
Result KSessionRequest::SessionMappings::PushSend(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
MESOSPHERE_ASSERT(m_num_recv == 0);
MESOSPHERE_ASSERT(m_num_exch == 0);
R_RETURN(this->PushMap(client, server, size, state, m_num_send++));
}
Result KSessionRequest::SessionMappings::PushReceive(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
MESOSPHERE_ASSERT(m_num_exch == 0);
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv++));
}
Result KSessionRequest::SessionMappings::PushExchange(KProcessAddress client, KProcessAddress server, size_t size, KMemoryState state) {
R_RETURN(this->PushMap(client, server, size, state, m_num_send + m_num_recv + m_num_exch++));
}
void KSessionRequest::SessionMappings::Finalize() {
if (m_dynamic_mappings) {
DynamicMappings::Free(m_dynamic_mappings);
m_dynamic_mappings = nullptr;
}
}
}
| 2,663
|
C++
|
.cpp
| 55
| 41.509091
| 149
| 0.676935
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,981
|
kern_k_transfer_memory.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_transfer_memory.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
Result KTransferMemory::Initialize(KProcessAddress addr, size_t size, ams::svc::MemoryPermission own_perm) {
MESOSPHERE_ASSERT_THIS();
/* Set members. */
m_owner = GetCurrentProcessPointer();
/* Get the owner page table. */
auto &page_table = m_owner->GetPageTable();
/* Construct the page group, guarding to make sure our state is valid on exit. */
auto pg_guard = util::ConstructAtGuarded(m_page_group, page_table.GetBlockInfoManager());
/* Lock the memory. */
R_TRY(page_table.LockForTransferMemory(GetPointer(m_page_group), addr, size, ConvertToKMemoryPermission(own_perm)));
/* Set remaining tracking members. */
m_owner->Open();
m_owner_perm = own_perm;
m_address = addr;
m_is_initialized = true;
m_is_mapped = false;
/* We succeeded. */
pg_guard.Cancel();
R_SUCCEED();
}
void KTransferMemory::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Unlock. */
if (!m_is_mapped) {
const size_t size = GetReference(m_page_group).GetNumPages() * PageSize;
MESOSPHERE_R_ABORT_UNLESS(m_owner->GetPageTable().UnlockForTransferMemory(m_address, size, GetReference(m_page_group)));
}
/* Close the page group. */
GetReference(m_page_group).Close();
GetReference(m_page_group).Finalize();
}
void KTransferMemory::PostDestroy(uintptr_t arg) {
KProcess *owner = reinterpret_cast<KProcess *>(arg);
owner->ReleaseResource(ams::svc::LimitableResource_TransferMemoryCountMax, 1);
owner->Close();
}
Result KTransferMemory::Map(KProcessAddress address, size_t size, ams::svc::MemoryPermission map_perm) {
MESOSPHERE_ASSERT_THIS();
/* Validate the size. */
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Validate the permission. */
R_UNLESS(m_owner_perm == map_perm, svc::ResultInvalidState());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Ensure we're not already mapped. */
R_UNLESS(!m_is_mapped, svc::ResultInvalidState());
/* Map the memory. */
const KMemoryState state = (m_owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered;
R_TRY(GetCurrentProcess().GetPageTable().MapPageGroup(address, GetReference(m_page_group), state, KMemoryPermission_UserReadWrite));
/* Mark ourselves as mapped. */
m_is_mapped = true;
R_SUCCEED();
}
Result KTransferMemory::Unmap(KProcessAddress address, size_t size) {
MESOSPHERE_ASSERT_THIS();
/* Validate the size. */
R_UNLESS(GetReference(m_page_group).GetNumPages() == util::DivideUp(size, PageSize), svc::ResultInvalidSize());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Unmap the memory. */
const KMemoryState state = (m_owner_perm == ams::svc::MemoryPermission_None) ? KMemoryState_Transfered : KMemoryState_SharedTransfered;
R_TRY(GetCurrentProcess().GetPageTable().UnmapPageGroup(address, GetReference(m_page_group), state));
/* Mark ourselves as unmapped. */
MESOSPHERE_ASSERT(m_is_mapped);
m_is_mapped = false;
R_SUCCEED();
}
}
| 4,116
|
C++
|
.cpp
| 85
| 41.011765
| 143
| 0.66009
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,982
|
kern_k_io_pool.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_io_pool.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constinit KLightLock g_io_pool_lock;
constinit bool g_pool_used[ams::svc::IoPoolType_Count];
struct IoRegionExtents {
KPhysicalAddress address;
size_t size;
};
#if defined(ATMOSPHERE_BOARD_NINTENDO_NX)
#include "board/nintendo/nx/kern_k_io_pool.board.nintendo_nx.inc"
#elif defined(AMS_SVC_IO_POOL_NOT_SUPPORTED)
#include "kern_k_io_pool.unsupported.inc"
#else
#error "Unknown context for IoPoolType!"
#endif
constexpr bool IsValidIoRegionImpl(ams::svc::IoPoolType pool_type, KPhysicalAddress address, size_t size) {
/* NOTE: It seems likely this depends on pool type, but this isn't confirmable as of now. */
MESOSPHERE_UNUSED(pool_type);
/* Check if the address/size falls within any allowable extents. */
for (const auto &extents : g_io_region_extents) {
if (extents.size != 0 && extents.address <= address && address + size - 1 <= extents.address + extents.size - 1) {
return true;
}
}
return false;
}
}
bool KIoPool::IsValidIoPoolType(ams::svc::IoPoolType pool_type) {
return IsValidIoPoolTypeImpl(pool_type);
}
Result KIoPool::Initialize(ams::svc::IoPoolType pool_type) {
MESOSPHERE_ASSERT_THIS();
/* Register the pool type. */
{
/* Lock the pool used table. */
KScopedLightLock lk(g_io_pool_lock);
/* Check that the pool isn't already used. */
R_UNLESS(!g_pool_used[pool_type], svc::ResultBusy());
/* Set the pool as used. */
g_pool_used[pool_type] = true;
}
/* Set our fields. */
m_pool_type = pool_type;
m_is_initialized = true;
R_SUCCEED();
}
void KIoPool::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* Lock the pool used table. */
KScopedLightLock lk(g_io_pool_lock);
/* Check that the pool is used. */
MESOSPHERE_ASSERT(g_pool_used[m_pool_type]);
/* Set the pool as unused. */
g_pool_used[m_pool_type] = false;
}
Result KIoPool::AddIoRegion(KIoRegion *new_region) {
MESOSPHERE_ASSERT_THIS();
/* Check that the region is allowed. */
R_UNLESS(IsValidIoRegionImpl(m_pool_type, new_region->GetAddress(), new_region->GetSize()), svc::ResultInvalidMemoryRegion());
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Check that the desired range isn't already in our pool. */
{
/* Get the lowest region with address >= the new region that's already in our tree. */
auto lowest_after = m_io_region_tree.nfind_key(new_region->GetAddress());
if (lowest_after != m_io_region_tree.end()) {
R_UNLESS(!lowest_after->Overlaps(new_region->GetAddress(), new_region->GetSize()), svc::ResultBusy());
}
/* There is no region with address >= the new region already in our tree, but we also need to check */
/* for a region with address < the new region already in our tree. */
if (lowest_after != m_io_region_tree.begin()) {
auto highest_before = --lowest_after;
R_UNLESS(!highest_before->Overlaps(new_region->GetAddress(), new_region->GetSize()), svc::ResultBusy());
}
}
/* Add the region to our pool. */
m_io_region_tree.insert(*new_region);
R_SUCCEED();
}
void KIoPool::RemoveIoRegion(KIoRegion *region) {
MESOSPHERE_ASSERT_THIS();
/* Lock ourselves. */
KScopedLightLock lk(m_lock);
/* Remove the region from our tree. */
m_io_region_tree.erase(m_io_region_tree.iterator_to(*region));
}
}
| 4,603
|
C++
|
.cpp
| 103
| 35.582524
| 134
| 0.611696
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,983
|
kern_k_thread.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_thread.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
constexpr inline s32 TerminatingThreadPriority = ams::svc::SystemThreadPriorityHighest - 1;
constinit util::Atomic<u64> g_thread_id = 0;
constexpr ALWAYS_INLINE bool IsKernelAddressKey(KProcessAddress key) {
const uintptr_t key_uptr = GetInteger(key);
return KernelVirtualAddressSpaceBase <= key_uptr && key_uptr <= KernelVirtualAddressSpaceLast && (key_uptr & 1) == 0;
}
void InitializeKernelStack(uintptr_t stack_top) {
#if defined(MESOSPHERE_ENABLE_KERNEL_STACK_USAGE)
const uintptr_t stack_bottom = stack_top - PageSize;
std::memset(reinterpret_cast<void *>(stack_bottom), 0xCC, PageSize - sizeof(KThread::StackParameters));
#else
MESOSPHERE_UNUSED(stack_top);
#endif
}
void CleanupKernelStack(uintptr_t stack_top) {
const uintptr_t stack_bottom = stack_top - PageSize;
KPhysicalAddress stack_paddr = Null<KPhysicalAddress>;
MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(stack_paddr), stack_bottom));
MESOSPHERE_R_ABORT_UNLESS(Kernel::GetKernelPageTable().UnmapPages(stack_bottom, 1, KMemoryState_Kernel));
/* Free the stack page. */
KPageBuffer::FreeChecked<PageSize>(KPageBuffer::FromPhysicalAddress(stack_paddr));
}
class ThreadQueueImplForKThreadSleep final : public KThreadQueueWithoutEndWait { /* ... */ };
class ThreadQueueImplForKThreadSetProperty final : public KThreadQueue {
private:
KThread::WaiterList *m_wait_list;
public:
constexpr ThreadQueueImplForKThreadSetProperty(KThread::WaiterList *wl) : m_wait_list(wl) { /* ... */ }
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Remove the thread from the wait list. */
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
ALWAYS_INLINE void KThread::SetPinnedSvcPermissions() {
/* Get our stack parameters. */
auto &sp = this->GetStackParameters();
/* Get our parent's svc permissions. */
MESOSPHERE_ASSERT(m_parent != nullptr);
const auto &svc_permissions = m_parent->GetSvcPermissions();
/* Get whether we have access to return from exception. */
const bool return_from_exception = sp.svc_access_flags[svc::SvcId_ReturnFromException];
/* Clear all permissions. */
sp.svc_access_flags.Reset();
/* Set SynchronizePreemptionState if allowed. */
if (svc_permissions[svc::SvcId_SynchronizePreemptionState]) {
sp.svc_access_flags[svc::SvcId_SynchronizePreemptionState] = true;
}
/* If we previously had ReturnFromException, potentially grant it and GetInfo. */
if (return_from_exception) {
/* Set ReturnFromException (guaranteed allowed, if we're here). */
sp.svc_access_flags[svc::SvcId_ReturnFromException] = true;
/* Set GetInfo if allowed. */
if (svc_permissions[svc::SvcId_GetInfo]) {
sp.svc_access_flags[svc::SvcId_GetInfo] = true;
}
}
}
ALWAYS_INLINE void KThread::SetUnpinnedSvcPermissions() {
/* Get our stack parameters. */
auto &sp = this->GetStackParameters();
/* Get our parent's svc permissions. */
MESOSPHERE_ASSERT(m_parent != nullptr);
const auto &svc_permissions = m_parent->GetSvcPermissions();
/* Get whether we have access to return from exception. */
const bool return_from_exception = sp.svc_access_flags[svc::SvcId_ReturnFromException];
/* Copy permissions. */
sp.svc_access_flags = svc_permissions;
/* Clear specific SVCs based on our state. */
sp.svc_access_flags[svc::SvcId_SynchronizePreemptionState] = false;
if (!return_from_exception) {
sp.svc_access_flags[svc::SvcId_ReturnFromException] = false;
}
}
ALWAYS_INLINE void KThread::SetUsermodeExceptionSvcPermissions() {
/* Get our stack parameters. */
auto &sp = this->GetStackParameters();
/* Get our parent's svc permissions. */
MESOSPHERE_ASSERT(m_parent != nullptr);
const auto &svc_permissions = m_parent->GetSvcPermissions();
/* Set ReturnFromException if allowed. */
if (svc_permissions[svc::SvcId_ReturnFromException]) {
sp.svc_access_flags[svc::SvcId_ReturnFromException] = true;
}
/* Set GetInfo if allowed. */
if (svc_permissions[svc::SvcId_GetInfo]) {
sp.svc_access_flags[svc::SvcId_GetInfo] = true;
}
}
ALWAYS_INLINE void KThread::ClearUsermodeExceptionSvcPermissions() {
/* Get our stack parameters. */
auto &sp = this->GetStackParameters();
/* Clear ReturnFromException. */
sp.svc_access_flags[svc::SvcId_ReturnFromException] = false;
/* If pinned, clear GetInfo. */
if (sp.is_pinned) {
sp.svc_access_flags[svc::SvcId_GetInfo] = false;
}
}
Result KThread::Initialize(KThreadFunction func, uintptr_t arg, void *kern_stack_top, KProcessAddress user_stack_top, s32 prio, s32 virt_core, KProcess *owner, ThreadType type) {
/* Assert parameters are valid. */
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(kern_stack_top != nullptr);
MESOSPHERE_ASSERT((type == ThreadType_Main) || (ams::svc::HighestThreadPriority <= prio && prio <= ams::svc::LowestThreadPriority));
MESOSPHERE_ASSERT((owner != nullptr) || (type != ThreadType_User));
MESOSPHERE_ASSERT(0 <= virt_core && virt_core < static_cast<s32>(BITSIZEOF(u64)));
/* Convert the virtual core to a physical core. */
const s32 phys_core = cpu::VirtualToPhysicalCoreMap[virt_core];
MESOSPHERE_ASSERT(0 <= phys_core && phys_core < static_cast<s32>(cpu::NumCores));
/* First, clear the TLS address. */
m_tls_address = Null<KProcessAddress>;
const uintptr_t kern_stack_top_address = reinterpret_cast<uintptr_t>(kern_stack_top);
MESOSPHERE_UNUSED(kern_stack_top_address);
/* Next, assert things based on the type. */
switch (type) {
case ThreadType_Main:
{
MESOSPHERE_ASSERT(arg == 0);
}
[[fallthrough]];
case ThreadType_HighPriority:
if (type != ThreadType_Main) {
MESOSPHERE_ASSERT(phys_core == GetCurrentCoreId());
}
[[fallthrough]];
case ThreadType_Kernel:
{
MESOSPHERE_ASSERT(user_stack_top == 0);
MESOSPHERE_ASSERT(util::IsAligned(kern_stack_top_address, PageSize));
}
[[fallthrough]];
case ThreadType_User:
{
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetCoreMask() | (1ul << virt_core)) == owner->GetCoreMask()));
MESOSPHERE_ASSERT(((owner == nullptr) || (owner->GetPriorityMask() | (1ul << prio)) == owner->GetPriorityMask()));
}
break;
default:
MESOSPHERE_PANIC("KThread::Initialize: Unknown ThreadType %u", static_cast<u32>(type));
break;
}
/* Set the ideal core ID and affinity mask. */
m_virtual_ideal_core_id = virt_core;
m_physical_ideal_core_id = phys_core;
m_virtual_affinity_mask = (static_cast<u64>(1) << virt_core);
m_physical_affinity_mask.SetAffinity(phys_core, true);
/* Set the thread state. */
m_thread_state = (type == ThreadType_Main) ? ThreadState_Runnable : ThreadState_Initialized;
/* Set TLS address and TLS heap address. */
/* NOTE: Nintendo wrote TLS address above already, but official code really does write tls address twice. */
m_tls_address = 0;
m_tls_heap_address = 0;
/* Set parent and condvar tree. */
m_parent = nullptr;
m_condvar_tree = nullptr;
m_condvar_key = 0;
/* Set sync booleans. */
m_signaled = false;
m_termination_requested = false;
m_wait_cancelled = false;
m_cancellable = false;
/* Set core ID and wait result. */
m_core_id = phys_core;
m_wait_result = svc::ResultNoSynchronizationObject();
/* Set the stack top. */
m_kernel_stack_top = kern_stack_top;
/* Set priorities. */
m_priority = prio;
m_base_priority = prio;
/* Initialize wait queue/sync index. */
m_synced_index = -1;
m_wait_queue = nullptr;
/* Set suspend flags. */
m_suspend_request_flags = 0;
m_suspend_allowed_flags = ThreadState_SuspendFlagMask;
/* We're neither debug attached, nor are we nesting our priority inheritance. */
m_debug_attached = false;
m_priority_inheritance_count = 0;
/* We haven't been scheduled, and we have done no light IPC. */
m_schedule_count = -1;
m_last_scheduled_tick = 0;
m_light_ipc_data = nullptr;
/* We're not waiting for a lock, and we haven't disabled migration. */
m_waiting_lock_info = nullptr;
m_num_core_migration_disables = 0;
/* We have no waiters, and no closed objects. */
m_num_kernel_waiters = 0;
m_closed_object = nullptr;
/* Set our current core id. */
m_current_core_id = phys_core;
/* We haven't released our resource limit hint, and we've spent no time on the cpu. */
m_resource_limit_release_hint = false;
m_cpu_time = 0;
/* Setup our kernel stack. */
if (type != ThreadType_Main) {
InitializeKernelStack(reinterpret_cast<uintptr_t>(kern_stack_top));
}
/* Clear our stack parameters. */
std::memset(static_cast<void *>(std::addressof(this->GetStackParameters())), 0, sizeof(StackParameters));
/* Setup the TLS, if needed. */
if (type == ThreadType_User) {
R_TRY(owner->CreateThreadLocalRegion(std::addressof(m_tls_address)));
m_tls_heap_address = owner->GetThreadLocalRegionPointer(m_tls_address);
std::memset(m_tls_heap_address, 0, ams::svc::ThreadLocalRegionSize);
}
/* Set parent, if relevant. */
if (owner != nullptr) {
m_parent = owner;
m_parent->Open();
}
/* Initialize thread context. */
constexpr bool IsDefault64Bit = sizeof(uintptr_t) == sizeof(u64);
const bool is_64_bit = m_parent ? m_parent->Is64Bit() : IsDefault64Bit;
const bool is_user = (type == ThreadType_User);
const bool is_main = (type == ThreadType_Main);
this->GetContext().Initialize(reinterpret_cast<uintptr_t>(func), reinterpret_cast<uintptr_t>(this->GetStackTop()), GetInteger(user_stack_top), arg, is_user, is_64_bit, is_main);
/* Setup the stack parameters. */
StackParameters &sp = this->GetStackParameters();
if (m_parent != nullptr) {
this->SetUnpinnedSvcPermissions();
this->ClearUsermodeExceptionSvcPermissions();
}
sp.caller_save_fpu_registers = std::addressof(m_caller_save_fpu_registers);
sp.cur_thread = this;
sp.disable_count = 1;
this->SetInExceptionHandler();
if (m_parent != nullptr && is_64_bit) {
this->SetFpu64Bit();
}
/* Set thread ID. */
m_thread_id = g_thread_id++;
/* We initialized! */
m_initialized = true;
/* Register ourselves with our parent process. */
if (m_parent != nullptr) {
m_parent->RegisterThread(this);
if (m_parent->IsSuspended()) {
this->RequestSuspend(SuspendType_Process);
}
}
R_SUCCEED();
}
Result KThread::InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
/* Get stack region for the thread. */
const auto &stack_region = KMemoryLayout::GetKernelStackRegion();
MESOSPHERE_ABORT_UNLESS(stack_region.GetEndAddress() != 0);
/* Allocate a page to use as the thread. */
KPageBuffer *page = KPageBuffer::AllocateChecked<PageSize>();
R_UNLESS(page != nullptr, svc::ResultOutOfResource());
/* Map the stack page. */
KProcessAddress stack_top = Null<KProcessAddress>;
{
/* If we fail to map, avoid leaking the page. */
ON_RESULT_FAILURE { KPageBuffer::Free(page); };
/* Perform the mapping. */
KProcessAddress stack_bottom = Null<KProcessAddress>;
R_TRY(Kernel::GetKernelPageTable().MapPages(std::addressof(stack_bottom), 1, PageSize, page->GetPhysicalAddress(), stack_region.GetAddress(),
stack_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
/* Calculate top of the stack. */
stack_top = stack_bottom + PageSize;
}
/* If we fail, cleanup the stack we mapped. */
ON_RESULT_FAILURE { CleanupKernelStack(GetInteger(stack_top)); };
/* Initialize the thread. */
R_RETURN(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
}
void KThread::PostDestroy(uintptr_t arg) {
KProcess *owner = reinterpret_cast<KProcess *>(arg & ~1ul);
const bool resource_limit_release_hint = (arg & 1);
const s64 hint_value = (resource_limit_release_hint ? 0 : 1);
if (owner != nullptr) {
owner->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 1, hint_value);
owner->Close();
} else {
Kernel::GetSystemResourceLimit().Release(ams::svc::LimitableResource_ThreadCountMax, 1, hint_value);
}
}
void KThread::ResumeThreadsSuspendedForInit() {
KThread::ListAccessor list_accessor;
{
KScopedSchedulerLock sl;
for (auto &thread : list_accessor) {
static_cast<KThread &>(thread).Resume(SuspendType_Init);
}
}
}
void KThread::Finalize() {
MESOSPHERE_ASSERT_THIS();
/* If the thread has an owner process, unregister it. */
if (m_parent != nullptr) {
m_parent->UnregisterThread(this);
}
/* If the thread has a local region, delete it. */
if (m_tls_address != Null<KProcessAddress>) {
MESOSPHERE_R_ABORT_UNLESS(m_parent->DeleteThreadLocalRegion(m_tls_address));
}
/* Release any waiters. */
{
MESOSPHERE_ASSERT(m_waiting_lock_info == nullptr);
KScopedSchedulerLock sl;
/* Check that we have no kernel waiters. */
MESOSPHERE_ABORT_UNLESS(m_num_kernel_waiters == 0);
auto it = m_held_lock_info_list.begin();
while (it != m_held_lock_info_list.end()) {
/* Get the lock info. */
auto * const lock_info = std::addressof(*it);
/* The lock shouldn't have a kernel waiter. */
MESOSPHERE_ASSERT(!IsKernelAddressKey(lock_info->GetAddressKey()));
/* Remove all waiters. */
while (lock_info->GetWaiterCount() != 0) {
/* Get the front waiter. */
KThread * const waiter = lock_info->GetHighestPriorityWaiter();
/* Remove it from the lock. */
if (lock_info->RemoveWaiter(waiter)) {
MESOSPHERE_ASSERT(lock_info->GetWaiterCount() == 0);
}
/* Cancel the thread's wait. */
waiter->CancelWait(svc::ResultInvalidState(), true);
}
/* Remove the held lock from our list. */
it = m_held_lock_info_list.erase(it);
/* Free the lock info. */
LockWithPriorityInheritanceInfo::Free(lock_info);
}
}
/* Cleanup the kernel stack. */
if (m_kernel_stack_top != nullptr) {
CleanupKernelStack(reinterpret_cast<uintptr_t>(m_kernel_stack_top));
}
/* Perform inherited finalization. */
KSynchronizationObject::Finalize();
}
bool KThread::IsSignaled() const {
return m_signaled;
}
void KThread::OnTimer() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* If we're waiting, cancel the wait. */
if (this->GetState() == ThreadState_Waiting) {
m_wait_queue->CancelWait(this, svc::ResultTimedOut(), false);
}
}
void KThread::StartTermination() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Release user exception and unpin, if relevant. */
if (m_parent != nullptr) {
m_parent->ReleaseUserException(this);
if (m_parent->GetPinnedThread(GetCurrentCoreId()) == this) {
m_parent->UnpinCurrentThread();
}
}
/* Set state to terminated. */
this->SetState(KThread::ThreadState_Terminated);
/* Clear the thread's status as running in parent. */
if (m_parent != nullptr) {
m_parent->ClearRunningThread(this);
}
/* Call the on thread termination handler. */
KThreadContext::OnThreadTerminating(this);
/* Clear previous thread in KScheduler. */
KScheduler::ClearPreviousThread(this);
/* Register terminated dpc flag. */
this->RegisterDpc(DpcFlag_Terminated);
}
void KThread::FinishTermination() {
MESOSPHERE_ASSERT_THIS();
/* Ensure that the thread is not executing on any core. */
if (m_parent != nullptr) {
/* Wait for the thread to not be current on any core. */
for (size_t i = 0; i < cpu::NumCores; ++i) {
KThread *core_thread;
do {
core_thread = Kernel::GetScheduler(i).GetSchedulerCurrentThread();
} while (core_thread == this);
}
/* Ensure that all cores are synchronized at this point. */
cpu::SynchronizeCores(m_parent->GetPhysicalCoreMask());
}
/* Acquire the scheduler lock. */
KScopedSchedulerLock sl;
/* Signal. */
m_signaled = true;
KSynchronizationObject::NotifyAvailable();
/* Close the thread. */
this->Close();
}
void KThread::DoWorkerTaskImpl() {
/* Finish the termination that was begun by Exit(). */
this->FinishTermination();
}
void KThread::OnEnterUsermodeException() {
this->SetUsermodeExceptionSvcPermissions();
this->SetInUsermodeExceptionHandler();
}
void KThread::OnLeaveUsermodeException() {
this->ClearUsermodeExceptionSvcPermissions();
/* NOTE: InUsermodeExceptionHandler will be cleared by RestoreContext. */
}
void KThread::Pin() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Set ourselves as pinned. */
this->GetStackParameters().is_pinned = true;
/* Disable core migration. */
MESOSPHERE_ASSERT(m_num_core_migration_disables == 0);
{
++m_num_core_migration_disables;
/* Save our ideal state to restore when we're unpinned. */
m_original_physical_ideal_core_id = m_physical_ideal_core_id;
m_original_physical_affinity_mask = m_physical_affinity_mask;
/* Bind ourselves to this core. */
const s32 active_core = this->GetActiveCore();
const s32 current_core = GetCurrentCoreId();
this->SetActiveCore(current_core);
m_physical_ideal_core_id = current_core;
m_physical_affinity_mask.SetAffinityMask(1ul << current_core);
if (active_core != current_core || m_physical_affinity_mask.GetAffinityMask() != m_original_physical_affinity_mask.GetAffinityMask()) {
KScheduler::OnThreadAffinityMaskChanged(this, m_original_physical_affinity_mask, active_core);
}
/* Set base priority-on-unpin. */
const s32 old_base_priority = m_base_priority;
m_base_priority_on_unpin = old_base_priority;
/* Set base priority to higher than any possible process priority. */
m_base_priority = std::min<s32>(old_base_priority, __builtin_ctzll(this->GetOwnerProcess()->GetPriorityMask()) - 1);
RestorePriority(this);
}
/* Disallow performing thread suspension. */
{
/* Update our allow flags. */
m_suspend_allowed_flags &= ~(1 << (util::ToUnderlying(SuspendType_Thread) + util::ToUnderlying(ThreadState_SuspendShift)));
/* Update our state. */
this->UpdateState();
}
/* Update our SVC access permissions. */
this->SetPinnedSvcPermissions();
}
void KThread::Unpin() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Set ourselves as unpinned. */
this->GetStackParameters().is_pinned = false;
/* Enable core migration. */
MESOSPHERE_ASSERT(m_num_core_migration_disables == 1);
{
--m_num_core_migration_disables;
/* Restore our original state. */
const KAffinityMask old_mask = m_physical_affinity_mask;
m_physical_ideal_core_id = m_original_physical_ideal_core_id;
m_physical_affinity_mask = m_original_physical_affinity_mask;
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
const s32 active_core = this->GetActiveCore();
if (!m_physical_affinity_mask.GetAffinity(active_core)) {
if (m_physical_ideal_core_id >= 0) {
this->SetActiveCore(m_physical_ideal_core_id);
} else {
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask()));
}
}
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
}
m_base_priority = m_base_priority_on_unpin;
RestorePriority(this);
}
/* Allow performing thread suspension (if termination hasn't been requested). */
if (!this->IsTerminationRequested()) {
/* Update our allow flags. */
m_suspend_allowed_flags |= (1 << (util::ToUnderlying(SuspendType_Thread) + util::ToUnderlying(ThreadState_SuspendShift)));
/* Update our state. */
this->UpdateState();
/* Update our SVC access permissions. */
MESOSPHERE_ASSERT(m_parent != nullptr);
this->SetUnpinnedSvcPermissions();
}
/* Resume any threads that began waiting on us while we were pinned. */
for (auto it = m_pinned_waiter_list.begin(); it != m_pinned_waiter_list.end(); it = m_pinned_waiter_list.erase(it)) {
it->EndWait(ResultSuccess());
}
}
void KThread::DisableCoreMigration() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0);
if ((m_num_core_migration_disables++) == 0) {
/* Save our ideal state to restore when we can migrate again. */
m_original_physical_ideal_core_id = m_physical_ideal_core_id;
m_original_physical_affinity_mask = m_physical_affinity_mask;
/* Bind ourselves to this core. */
const s32 active_core = this->GetActiveCore();
m_physical_ideal_core_id = active_core;
m_physical_affinity_mask.SetAffinityMask(1ul << active_core);
if (m_physical_affinity_mask.GetAffinityMask() != m_original_physical_affinity_mask.GetAffinityMask()) {
KScheduler::OnThreadAffinityMaskChanged(this, m_original_physical_affinity_mask, active_core);
}
}
}
void KThread::EnableCoreMigration() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(m_num_core_migration_disables > 0);
if ((--m_num_core_migration_disables) == 0) {
const KAffinityMask old_mask = m_physical_affinity_mask;
/* Restore our ideals. */
m_physical_ideal_core_id = m_original_physical_ideal_core_id;
m_physical_affinity_mask = m_original_physical_affinity_mask;
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
const s32 active_core = this->GetActiveCore();
if (!m_physical_affinity_mask.GetAffinity(active_core)) {
if (m_physical_ideal_core_id >= 0) {
this->SetActiveCore(m_physical_ideal_core_id);
} else {
this->SetActiveCore(BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask()));
}
}
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
}
}
}
Result KThread::GetCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
MESOSPHERE_ASSERT_THIS();
{
KScopedSchedulerLock sl;
/* Get the virtual mask. */
*out_ideal_core = m_virtual_ideal_core_id;
*out_affinity_mask = m_virtual_affinity_mask;
}
R_SUCCEED();
}
Result KThread::GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
MESOSPHERE_ASSERT_THIS();
{
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0);
/* Select between core mask and original core mask. */
if (m_num_core_migration_disables == 0) {
*out_ideal_core = m_physical_ideal_core_id;
*out_affinity_mask = m_physical_affinity_mask.GetAffinityMask();
} else {
*out_ideal_core = m_original_physical_ideal_core_id;
*out_affinity_mask = m_original_physical_affinity_mask.GetAffinityMask();
}
}
R_SUCCEED();
}
Result KThread::SetCoreMask(int32_t core_id, u64 v_affinity_mask) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(m_parent != nullptr);
MESOSPHERE_ASSERT(v_affinity_mask != 0);
KScopedLightLock lk(m_activity_pause_lock);
/* Set the core mask. */
u64 p_affinity_mask = 0;
{
KScopedSchedulerLock sl;
MESOSPHERE_ASSERT(m_num_core_migration_disables >= 0);
/* If we're updating, set our ideal virtual core. */
if (core_id != ams::svc::IdealCoreNoUpdate) {
m_virtual_ideal_core_id = core_id;
} else {
/* Preserve our ideal core id. */
core_id = m_virtual_ideal_core_id;
R_UNLESS(((1ul << core_id) & v_affinity_mask) != 0, svc::ResultInvalidCombination());
}
/* Set our affinity mask. */
m_virtual_affinity_mask = v_affinity_mask;
/* Translate the virtual core to a physical core. */
if (core_id >= 0) {
core_id = cpu::VirtualToPhysicalCoreMap[core_id];
}
/* Translate the virtual affinity mask to a physical one. */
p_affinity_mask = cpu::ConvertVirtualCoreMaskToPhysical(v_affinity_mask);
/* If we haven't disabled migration, perform an affinity change. */
if (m_num_core_migration_disables == 0) {
const KAffinityMask old_mask = m_physical_affinity_mask;
/* Set our new ideals. */
m_physical_ideal_core_id = core_id;
m_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
if (m_physical_affinity_mask.GetAffinityMask() != old_mask.GetAffinityMask()) {
const s32 active_core = this->GetActiveCore();
if (active_core >= 0 && !m_physical_affinity_mask.GetAffinity(active_core)) {
const s32 new_core = m_physical_ideal_core_id >= 0 ? m_physical_ideal_core_id : BITSIZEOF(unsigned long long) - 1 - __builtin_clzll(m_physical_affinity_mask.GetAffinityMask());
this->SetActiveCore(new_core);
}
KScheduler::OnThreadAffinityMaskChanged(this, old_mask, active_core);
}
} else {
/* Otherwise, we edit the original affinity for restoration later. */
m_original_physical_ideal_core_id = core_id;
m_original_physical_affinity_mask.SetAffinityMask(p_affinity_mask);
}
}
/* Update the pinned waiter list. */
ThreadQueueImplForKThreadSetProperty wait_queue(std::addressof(m_pinned_waiter_list));
{
bool retry_update;
do {
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Don't do any further management if our termination has been requested. */
R_SUCCEED_IF(this->IsTerminationRequested());
/* By default, we won't need to retry. */
retry_update = false;
/* Check if the thread is currently running. */
bool thread_is_current = false;
s32 thread_core;
for (thread_core = 0; thread_core < static_cast<s32>(cpu::NumCores); ++thread_core) {
if (Kernel::GetScheduler(thread_core).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
}
/* If the thread is currently running, check whether it's no longer allowed under the new mask. */
if (thread_is_current && ((1ul << thread_core) & p_affinity_mask) == 0) {
/* If the thread is pinned, we want to wait until it's not pinned. */
if (this->GetStackParameters().is_pinned) {
/* Verify that the current thread isn't terminating. */
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
/* Wait until the thread isn't pinned any more. */
m_pinned_waiter_list.push_back(GetCurrentThread());
GetCurrentThread().BeginWait(std::addressof(wait_queue));
} else {
/* If the thread isn't pinned, release the scheduler lock and retry until it's not current. */
retry_update = true;
}
}
} while (retry_update);
}
R_SUCCEED();
}
void KThread::SetBasePriority(s32 priority) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority);
KScopedSchedulerLock sl;
/* Determine the priority value to use. */
const s32 target_priority = m_termination_requested.Load() && priority >= TerminatingThreadPriority ? TerminatingThreadPriority : priority;
/* Change our base priority. */
if (this->GetStackParameters().is_pinned) {
m_base_priority_on_unpin = target_priority;
} else {
m_base_priority = target_priority;
}
/* Perform a priority restoration. */
RestorePriority(this);
}
void KThread::IncreaseBasePriority(s32 priority) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority);
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(!this->GetStackParameters().is_pinned);
/* Set our base priority. */
if (m_base_priority > priority) {
m_base_priority = priority;
/* Perform a priority restoration. */
RestorePriority(this);
}
}
Result KThread::SetPriorityToIdle() {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock sl;
/* Change both our priorities to the idle thread priority. */
const s32 old_priority = m_priority;
m_priority = IdleThreadPriority;
m_base_priority = IdleThreadPriority;
KScheduler::OnThreadPriorityChanged(this, old_priority);
R_SUCCEED();
}
void KThread::RequestSuspend(SuspendType type) {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock lk;
/* Note the request in our flags. */
m_suspend_request_flags |= (1u << (util::ToUnderlying(ThreadState_SuspendShift) + util::ToUnderlying(type)));
/* Try to perform the suspend. */
this->TrySuspend();
}
void KThread::Resume(SuspendType type) {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock sl;
/* Clear the request in our flags. */
m_suspend_request_flags &= ~(1u << (util::ToUnderlying(ThreadState_SuspendShift) + util::ToUnderlying(type)));
/* Update our state. */
this->UpdateState();
}
void KThread::WaitCancel() {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock sl;
/* Check if we're waiting and cancellable. */
if (this->GetState() == ThreadState_Waiting && m_cancellable) {
m_wait_cancelled = false;
m_wait_queue->CancelWait(this, svc::ResultCancelled(), true);
} else {
/* Otherwise, note that we cancelled a wait. */
m_wait_cancelled = true;
}
}
void KThread::TrySuspend() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(this->IsSuspendRequested());
/* Ensure that we have no waiters. */
if (this->GetNumKernelWaiters() > 0) {
return;
}
MESOSPHERE_ABORT_UNLESS(this->GetNumKernelWaiters() == 0);
/* Perform the suspend. */
this->UpdateState();
}
void KThread::UpdateState() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Set our suspend flags in state. */
const auto old_state = m_thread_state;
const auto new_state = static_cast<ThreadState>(this->GetSuspendFlags() | (old_state & ThreadState_Mask));
m_thread_state = new_state;
/* Note the state change in scheduler. */
if (new_state != old_state) {
KScheduler::OnThreadStateChanged(this, old_state);
}
}
void KThread::Continue() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Clear our suspend flags in state. */
const auto old_state = m_thread_state;
m_thread_state = static_cast<ThreadState>(old_state & ThreadState_Mask);
/* Note the state change in scheduler. */
KScheduler::OnThreadStateChanged(this, old_state);
}
size_t KThread::GetKernelStackUsage() const {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(m_kernel_stack_top != nullptr);
#if defined(MESOSPHERE_ENABLE_KERNEL_STACK_USAGE)
const u8 *stack = static_cast<const u8 *>(m_kernel_stack_top) - PageSize;
size_t i;
for (i = 0; i < PageSize; ++i) {
if (stack[i] != 0xCC) {
break;
}
}
return PageSize - i;
#else
return 0;
#endif
}
Result KThread::SetActivity(ams::svc::ThreadActivity activity) {
/* Lock ourselves. */
KScopedLightLock lk(m_activity_pause_lock);
/* Set the activity. */
{
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Verify our state. */
const auto cur_state = this->GetState();
R_UNLESS((cur_state == ThreadState_Waiting || cur_state == ThreadState_Runnable), svc::ResultInvalidState());
/* Either pause or resume. */
if (activity == ams::svc::ThreadActivity_Paused) {
/* Verify that we're not suspended. */
R_UNLESS(!this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState());
/* Suspend. */
this->RequestSuspend(SuspendType_Thread);
} else {
MESOSPHERE_ASSERT(activity == ams::svc::ThreadActivity_Runnable);
/* Verify that we're suspended. */
R_UNLESS(this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState());
/* Resume. */
this->Resume(SuspendType_Thread);
}
}
/* If the thread is now paused, update the pinned waiter list. */
if (activity == ams::svc::ThreadActivity_Paused) {
ThreadQueueImplForKThreadSetProperty wait_queue(std::addressof(m_pinned_waiter_list));
bool thread_is_current;
do {
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Don't do any further management if our termination has been requested. */
R_SUCCEED_IF(this->IsTerminationRequested());
/* By default, treat the thread as not current. */
thread_is_current = false;
/* Check whether the thread is pinned. */
if (this->GetStackParameters().is_pinned) {
/* Verify that the current thread isn't terminating. */
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
/* Wait until the thread isn't pinned any more. */
m_pinned_waiter_list.push_back(GetCurrentThread());
GetCurrentThread().BeginWait(std::addressof(wait_queue));
} else {
/* Check if the thread is currently running. */
/* If it is, we'll need to retry. */
for (auto i = 0; i < static_cast<s32>(cpu::NumCores); ++i) {
if (Kernel::GetScheduler(i).GetSchedulerCurrentThread() == this) {
thread_is_current = true;
break;
}
}
}
} while (thread_is_current);
}
R_SUCCEED();
}
Result KThread::GetThreadContext3(ams::svc::ThreadContext *out) {
/* Lock ourselves. */
KScopedLightLock lk(m_activity_pause_lock);
/* Get the context. */
{
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* Verify that we're suspended. */
R_UNLESS(this->IsSuspendRequested(SuspendType_Thread), svc::ResultInvalidState());
/* If we're not terminating, get the thread's user context. */
if (!this->IsTerminationRequested()) {
GetUserContext(out, this);
}
}
R_SUCCEED();
}
void KThread::AddHeldLock(LockWithPriorityInheritanceInfo *lock_info) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Set ourselves as the lock's owner. */
lock_info->SetOwner(this);
/* Add the lock to our held list. */
m_held_lock_info_list.push_front(*lock_info);
}
KThread::LockWithPriorityInheritanceInfo *KThread::FindHeldLock(KProcessAddress address_key) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Try to find an existing held lock. */
for (auto &held_lock : m_held_lock_info_list) {
if (held_lock.GetAddressKey() == address_key) {
return std::addressof(held_lock);
}
}
return nullptr;
}
void KThread::AddWaiterImpl(KThread *thread) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(thread->GetConditionVariableTree() == nullptr);
/* Get the thread's address key. */
const auto address_key = thread->GetAddressKey();
/* Keep track of how many kernel waiters we have. */
if (IsKernelAddressKey(address_key)) {
MESOSPHERE_ABORT_UNLESS((m_num_kernel_waiters++) >= 0);
KScheduler::SetSchedulerUpdateNeeded();
}
/* Get the relevant lock info. */
auto *lock_info = this->FindHeldLock(address_key);
if (lock_info == nullptr) {
/* Create a new lock for the address key. */
lock_info = LockWithPriorityInheritanceInfo::Create(address_key);
/* Add the new lock to our list. */
this->AddHeldLock(lock_info);
}
/* Add the thread as waiter to the lock info. */
lock_info->AddWaiter(thread);
}
void KThread::RemoveWaiterImpl(KThread *thread) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Keep track of how many kernel waiters we have. */
if (IsKernelAddressKey(thread->GetAddressKey())) {
MESOSPHERE_ABORT_UNLESS((m_num_kernel_waiters--) > 0);
KScheduler::SetSchedulerUpdateNeeded();
}
/* Get the info for the lock the thread is waiting on. */
auto *lock_info = thread->GetWaitingLockInfo();
MESOSPHERE_ASSERT(lock_info->GetOwner() == this);
/* Remove the waiter. */
if (lock_info->RemoveWaiter(thread)) {
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
LockWithPriorityInheritanceInfo::Free(lock_info);
}
}
void KThread::RestorePriority(KThread *thread) {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
while (thread != nullptr) {
/* We want to inherit priority where possible. */
s32 new_priority = thread->GetBasePriority();
for (const auto &held_lock : thread->m_held_lock_info_list) {
new_priority = std::min(new_priority, held_lock.GetHighestPriorityWaiter()->GetPriority());
}
/* If the priority we would inherit is not different from ours, don't do anything. */
if (new_priority == thread->GetPriority()) {
return;
}
/* Get the owner of whatever lock this thread is waiting on. */
KThread * const lock_owner = thread->GetLockOwner();
/* If the thread is waiting on some lock, remove it as a waiter to prevent violating red black tree invariants. */
if (lock_owner != nullptr) {
lock_owner->RemoveWaiterImpl(thread);
}
/* Ensure we don't violate condition variable red black tree invariants. */
if (auto *cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
BeforeUpdatePriority(cv_tree, thread);
}
/* Change the priority. */
const s32 old_priority = thread->GetPriority();
thread->SetPriority(new_priority);
/* Restore the condition variable, if relevant. */
if (auto *cv_tree = thread->GetConditionVariableTree(); cv_tree != nullptr) {
AfterUpdatePriority(cv_tree, thread);
}
/* If we removed the thread from some lock's waiting list, add it back. */
if (lock_owner != nullptr) {
lock_owner->AddWaiterImpl(thread);
}
/* Update the scheduler. */
KScheduler::OnThreadPriorityChanged(thread, old_priority);
/* Continue inheriting priority. */
thread = lock_owner;
}
}
void KThread::AddWaiter(KThread *thread) {
MESOSPHERE_ASSERT_THIS();
this->AddWaiterImpl(thread);
/* If the thread has a higher priority than us, we should inherit. */
if (thread->GetPriority() < this->GetPriority()) {
RestorePriority(this);
}
}
void KThread::RemoveWaiter(KThread *thread) {
MESOSPHERE_ASSERT_THIS();
this->RemoveWaiterImpl(thread);
/* If our priority is the same as the thread's (and we've inherited), we may need to restore to lower priority. */
if (this->GetPriority() == thread->GetPriority() && this->GetPriority() < this->GetBasePriority()) {
RestorePriority(this);
}
}
KThread *KThread::RemoveWaiterByKey(bool *out_has_waiters, KProcessAddress key) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
/* Get the relevant lock info. */
auto *lock_info = this->FindHeldLock(key);
if (lock_info == nullptr) {
*out_has_waiters = false;
return nullptr;
}
/* Remove the lock info from our held list. */
m_held_lock_info_list.erase(m_held_lock_info_list.iterator_to(*lock_info));
/* Keep track of how many kernel waiters we have. */
if (IsKernelAddressKey(lock_info->GetAddressKey())) {
m_num_kernel_waiters -= lock_info->GetWaiterCount();
MESOSPHERE_ABORT_UNLESS(m_num_kernel_waiters >= 0);
KScheduler::SetSchedulerUpdateNeeded();
}
MESOSPHERE_ASSERT(lock_info->GetWaiterCount() > 0);
/* Remove the highest priority waiter from the lock to be the next owner. */
KThread *next_lock_owner = lock_info->GetHighestPriorityWaiter();
if (lock_info->RemoveWaiter(next_lock_owner)) {
/* The new owner was the only waiter. */
*out_has_waiters = false;
/* Free the lock info, since it has no waiters. */
LockWithPriorityInheritanceInfo::Free(lock_info);
} else {
/* There are additional waiters on the lock. */
*out_has_waiters = true;
/* Add the lock to the new owner's held list. */
next_lock_owner->AddHeldLock(lock_info);
/* Keep track of any kernel waiters for the new owner. */
if (IsKernelAddressKey(lock_info->GetAddressKey())) {
next_lock_owner->m_num_kernel_waiters += lock_info->GetWaiterCount();
MESOSPHERE_ABORT_UNLESS(next_lock_owner->m_num_kernel_waiters > 0);
/* NOTE: No need to set scheduler update needed, because we will have already done so when removing earlier. */
}
}
/* If our priority is the same as the next owner's (and we've inherited), we may need to restore to lower priority. */
if (this->GetPriority() == next_lock_owner->GetPriority() && this->GetPriority() < this->GetBasePriority()) {
RestorePriority(this);
/* NOTE: No need to restore priority on the next lock owner, because it was already the highest priority waiter on the lock. */
}
/* Return the next lock owner. */
return next_lock_owner;
}
Result KThread::Run() {
MESOSPHERE_ASSERT_THIS();
/* If the kernel hasn't finished initializing, then we should suspend. */
if (Kernel::GetState() != Kernel::State::Initialized) {
this->RequestSuspend(SuspendType_Init);
}
while (true) {
KScopedSchedulerLock lk;
/* If either this thread or the current thread are requesting termination, note it. */
R_UNLESS(!this->IsTerminationRequested(), svc::ResultTerminationRequested());
R_UNLESS(!GetCurrentThread().IsTerminationRequested(), svc::ResultTerminationRequested());
/* Ensure our thread state is correct. */
R_UNLESS(this->GetState() == ThreadState_Initialized, svc::ResultInvalidState());
/* If the current thread has been asked to suspend, suspend it and retry. */
if (GetCurrentThread().IsSuspended()) {
GetCurrentThread().UpdateState();
continue;
}
/* If we're not a kernel thread and we've been asked to suspend, suspend ourselves. */
if (KProcess *parent = this->GetOwnerProcess(); parent != nullptr) {
if (this->IsSuspended()) {
this->UpdateState();
}
parent->IncrementRunningThreadCount();
}
/* Open a reference, now that we're running. */
this->Open();
/* Set our state and finish. */
this->SetState(KThread::ThreadState_Runnable);
R_SUCCEED();
}
}
void KThread::Exit() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
/* Call the debug callback. */
KDebug::OnExitThread(this);
/* Release the thread resource hint, running thread count from parent. */
if (m_parent != nullptr) {
m_parent->ReleaseResource(ams::svc::LimitableResource_ThreadCountMax, 0, 1);
m_resource_limit_release_hint = true;
m_parent->DecrementRunningThreadCount();
}
/* Destroy any dependent objects. */
this->DestroyClosedObjects();
/* Perform termination. */
{
KScopedSchedulerLock sl;
/* Disallow all suspension. */
m_suspend_allowed_flags = 0;
this->UpdateState();
/* Start termination. */
this->StartTermination();
/* Register the thread as a work task. */
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_ExitThread, this);
}
MESOSPHERE_PANIC("KThread::Exit() would return");
}
Result KThread::Terminate() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
/* Request the thread terminate if it hasn't already. */
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState_Terminated) {
/* If the thread isn't terminated, wait for it to terminate. */
s32 index;
KSynchronizationObject *objects[] = { this };
R_TRY(KSynchronizationObject::Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite));
}
R_SUCCEED();
}
KThread::ThreadState KThread::RequestTerminate() {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
KScopedSchedulerLock sl;
/* Determine if this is the first termination request. */
const bool first_request = [&]() ALWAYS_INLINE_LAMBDA -> bool {
/* Perform an atomic compare-and-swap from false to true. */
bool expected = false;
return m_termination_requested.CompareExchangeStrong(expected, true);
}();
/* If this is the first request, start termination procedure. */
if (first_request) {
/* If the thread is in initialized state, just change state to terminated. */
if (this->GetState() == ThreadState_Initialized) {
m_thread_state = ThreadState_Terminated;
return ThreadState_Terminated;
}
/* Register the terminating dpc. */
this->RegisterDpc(DpcFlag_Terminating);
/* If the thread is pinned, unpin it. */
if (this->GetStackParameters().is_pinned) {
this->GetOwnerProcess()->UnpinThread(this);
}
/* If the thread is suspended, continue it. */
if (this->IsSuspended()) {
m_suspend_allowed_flags = 0;
this->UpdateState();
}
/* Change the thread's priority to be higher than any system thread's. */
this->IncreaseBasePriority(TerminatingThreadPriority);
/* If the thread is runnable, send a termination interrupt to other cores. */
if (this->GetState() == ThreadState_Runnable) {
if (const u64 core_mask = m_physical_affinity_mask.GetAffinityMask() & ~(1ul << GetCurrentCoreId()); core_mask != 0) {
cpu::DataSynchronizationBarrierInnerShareable();
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_ThreadTerminate, core_mask);
}
}
/* Wake up the thread. */
if (this->GetState() == ThreadState_Waiting) {
m_wait_queue->CancelWait(this, svc::ResultTerminationRequested(), true);
}
}
return this->GetState();
}
Result KThread::Sleep(s64 timeout) {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(this == GetCurrentThreadPointer());
MESOSPHERE_ASSERT(timeout > 0);
ThreadQueueImplForKThreadSleep wait_queue;
KHardwareTimer *timer;
{
/* Setup the scheduling lock and sleep. */
KScopedSchedulerLockAndSleep slp(std::addressof(timer), this, timeout);
/* Check if the thread should terminate. */
if (this->IsTerminationRequested()) {
slp.CancelSleep();
R_THROW(svc::ResultTerminationRequested());
}
/* Wait for the sleep to end. */
wait_queue.SetHardwareTimer(timer);
this->BeginWait(std::addressof(wait_queue));
}
R_SUCCEED();
}
void KThread::BeginWait(KThreadQueue *queue) {
/* Set our state as waiting. */
this->SetState(ThreadState_Waiting);
/* Set our wait queue. */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdangling-pointer"
m_wait_queue = queue;
#pragma GCC diagnostic pop
}
void KThread::NotifyAvailable(KSynchronizationObject *signaled_object, Result wait_result) {
MESOSPHERE_ASSERT_THIS();
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* If we're waiting, notify our queue that we're available. */
if (this->GetState() == ThreadState_Waiting) {
m_wait_queue->NotifyAvailable(this, signaled_object, wait_result);
}
}
void KThread::EndWait(Result wait_result) {
MESOSPHERE_ASSERT_THIS();
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* If we're waiting, notify our queue that we're available. */
if (this->GetState() == ThreadState_Waiting) {
m_wait_queue->EndWait(this, wait_result);
}
}
void KThread::CancelWait(Result wait_result, bool cancel_timer_task) {
MESOSPHERE_ASSERT_THIS();
/* Lock the scheduler. */
KScopedSchedulerLock sl;
/* If we're waiting, notify our queue that we're available. */
if (this->GetState() == ThreadState_Waiting) {
m_wait_queue->CancelWait(this, wait_result, cancel_timer_task);
}
}
void KThread::SetState(ThreadState state) {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock sl;
const ThreadState old_state = m_thread_state;
m_thread_state = static_cast<ThreadState>((old_state & ~ThreadState_Mask) | (state & ThreadState_Mask));
if (m_thread_state != old_state) {
KScheduler::OnThreadStateChanged(this, old_state);
}
}
KThread *KThread::GetThreadFromId(u64 thread_id) {
/* Lock the list. */
KThread::ListAccessor accessor;
const auto end = accessor.end();
/* Find the object with the right id. */
if (const auto it = accessor.find_key(thread_id); it != end) {
/* Try to open the thread. */
if (KThread *thread = static_cast<KThread *>(std::addressof(*it)); AMS_LIKELY(thread->Open())) {
MESOSPHERE_ASSERT(thread->GetId() == thread_id);
return thread;
}
}
/* We failed to find or couldn't open the thread. */
return nullptr;
}
Result KThread::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count) {
/* Lock the list. */
KThread::ListAccessor accessor;
const auto end = accessor.end();
/* Iterate over the list. */
s32 count = 0;
for (auto it = accessor.begin(); it != end; ++it) {
/* If we're within array bounds, write the id. */
if (count < max_out_count) {
/* Get the thread id. */
KThread *thread = static_cast<KThread *>(std::addressof(*it));
const u64 id = thread->GetId();
/* Copy the id to userland. */
R_TRY(out_thread_ids.CopyArrayElementFrom(std::addressof(id), count));
}
/* Increment the count. */
++count;
}
/* We successfully iterated the list. */
*out_num_threads = count;
R_SUCCEED();
}
}
| 59,839
|
C++
|
.cpp
| 1,213
| 37.788129
| 200
| 0.588089
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,984
|
kern_k_light_condition_variable.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_light_condition_variable.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class ThreadQueueImplForKLightConditionVariable final : public KThreadQueue {
private:
KThread::WaiterList *m_wait_list;
bool m_allow_terminating_thread;
public:
constexpr ThreadQueueImplForKLightConditionVariable(KThread::WaiterList *wl, bool term) : KThreadQueue(), m_wait_list(wl), m_allow_terminating_thread(term) { /* ... */ }
virtual void CancelWait(KThread *waiting_thread, Result wait_result, bool cancel_timer_task) override {
/* Only process waits if we're allowed to. */
if (svc::ResultTerminationRequested::Includes(wait_result) && m_allow_terminating_thread) {
return;
}
/* Remove the thread from the waiting thread from the light condition variable. */
m_wait_list->erase(m_wait_list->iterator_to(*waiting_thread));
/* Invoke the base cancel wait handler. */
KThreadQueue::CancelWait(waiting_thread, wait_result, cancel_timer_task);
}
};
}
void KLightConditionVariable::Wait(KLightLock *lock, s64 timeout, bool allow_terminating_thread) {
/* Create thread queue. */
KThread *owner = GetCurrentThreadPointer();
KHardwareTimer *timer;
ThreadQueueImplForKLightConditionVariable wait_queue(std::addressof(m_wait_list), allow_terminating_thread);
/* Sleep the thread. */
{
KScopedSchedulerLockAndSleep lk(std::addressof(timer), owner, timeout);
if (!allow_terminating_thread && owner->IsTerminationRequested()) {
lk.CancelSleep();
return;
}
lock->Unlock();
/* Add the thread to the queue. */
m_wait_list.push_back(*owner);
/* Begin waiting. */
wait_queue.SetHardwareTimer(timer);
owner->BeginWait(std::addressof(wait_queue));
}
/* Re-acquire the lock. */
lock->Lock();
}
void KLightConditionVariable::Broadcast() {
KScopedSchedulerLock lk;
/* Signal all threads. */
for (auto it = m_wait_list.begin(); it != m_wait_list.end(); it = m_wait_list.erase(it)) {
it->EndWait(ResultSuccess());
}
}
}
| 3,079
|
C++
|
.cpp
| 66
| 36.621212
| 185
| 0.622037
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,985
|
kern_k_readable_event.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_readable_event.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
void KReadableEvent::Initialize(KEvent *parent) {
MESOSPHERE_ASSERT_THIS();
m_is_signaled = false;
m_parent = parent;
if (m_parent != nullptr) {
m_parent->Open();
}
}
bool KReadableEvent::IsSignaled() const {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
return m_is_signaled;
}
void KReadableEvent::Destroy() {
MESOSPHERE_ASSERT_THIS();
if (m_parent) {
{
KScopedSchedulerLock sl;
m_parent->OnReadableEventDestroyed();
}
m_parent->Close();
}
}
Result KReadableEvent::Signal() {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock lk;
if (!m_is_signaled) {
m_is_signaled = true;
this->NotifyAvailable();
}
R_SUCCEED();
}
Result KReadableEvent::Reset() {
MESOSPHERE_ASSERT_THIS();
KScopedSchedulerLock lk;
R_UNLESS(m_is_signaled, svc::ResultInvalidState());
m_is_signaled = false;
R_SUCCEED();
}
}
| 1,856
|
C++
|
.cpp
| 57
| 25.561404
| 76
| 0.628716
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,986
|
kern_k_dpc_manager.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/kern_k_dpc_manager.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern {
namespace {
class KDpcTask {
private:
static constinit inline KLightLock s_req_lock;
static constinit inline KLightLock s_lock;
static constinit inline KLightConditionVariable s_cond_var{util::ConstantInitialize};
static constinit inline u64 s_core_mask;
static constinit inline KDpcTask *s_task;
private:
static bool HasRequest(s32 core_id) {
return (s_core_mask & (1ull << core_id)) != 0;
}
static void SetRequest(s32 core_id) {
s_core_mask |= (1ull << core_id);
}
static void ClearRequest(s32 core_id) {
s_core_mask &= ~(1ull << core_id);
}
public:
virtual void DoTask() { /* ... */ }
static void Request(KDpcTask *task) {
KScopedLightLock rlk(s_req_lock);
/* Acquire the requested task. */
MESOSPHERE_ABORT_UNLESS(s_task == nullptr);
s_task = task;
{
KScopedLightLock lk(s_lock);
MESOSPHERE_ABORT_UNLESS(s_core_mask == 0);
for (auto core = 0; core < static_cast<s32>(cpu::NumCores); ++core) {
SetRequest(core);
}
s_cond_var.Broadcast();
while (s_core_mask != 0) {
s_cond_var.Wait(std::addressof(s_lock), -1ll);
}
}
s_task = nullptr;
}
static void WaitForRequest() {
/* Wait for a request to come in. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
while (!HasRequest(core_id)) {
s_cond_var.Wait(std::addressof(s_lock), -1ll);
}
}
static bool TimedWaitForRequest(s64 timeout) {
/* Wait for a request to come in. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
while (!HasRequest(core_id)) {
s_cond_var.Wait(std::addressof(s_lock), timeout);
if (KHardwareTimer::GetTick() >= timeout) {
return false;
}
}
return true;
}
static void HandleRequest() {
/* Perform the request. */
s_task->DoTask();
/* Clear the request. */
const auto core_id = GetCurrentCoreId();
KScopedLightLock lk(s_lock);
ClearRequest(core_id);
if (s_core_mask == 0) {
s_cond_var.Broadcast();
}
}
};
/* Convenience definitions. */
constexpr s32 DpcManagerThreadPriority = 3;
constexpr s64 DpcManagerTimeout = ams::svc::Tick(TimeSpan::FromMilliSeconds(10));
/* Globals. */
s64 g_preemption_priorities[cpu::NumCores];
/* Manager thread functions. */
void DpcManagerNormalThreadFunction(uintptr_t arg) {
/* Input argument goes unused. */
MESOSPHERE_UNUSED(arg);
/* Forever wait and service requests. */
while (true) {
KDpcTask::WaitForRequest();
KDpcTask::HandleRequest();
}
}
void DpcManagerPreemptionThreadFunction(uintptr_t arg) {
/* Input argument goes unused. */
MESOSPHERE_UNUSED(arg);
/* Forever wait and service requests, rotating the scheduled queue every 10 ms. */
s64 timeout = KHardwareTimer::GetTick() + DpcManagerTimeout;
while (true) {
if (KDpcTask::TimedWaitForRequest(timeout)) {
KDpcTask::HandleRequest();
} else {
/* Rotate the scheduler queue for each core. */
KScopedSchedulerLock lk;
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
if (const s32 priority = g_preemption_priorities[core_id]; priority > DpcManagerThreadPriority) {
KScheduler::RotateScheduledQueue(static_cast<s32>(core_id), priority);
}
}
/* Update our next timeout. */
timeout = KHardwareTimer::GetTick() + DpcManagerTimeout;
}
}
}
}
void KDpcManager::Initialize(s32 core_id, s32 priority) {
/* Reserve a thread from the system limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
/* Create a new thread. */
KThread *new_thread = KThread::Create();
MESOSPHERE_ABORT_UNLESS(new_thread != nullptr);
/* Launch the new thread. */
g_preemption_priorities[core_id] = priority;
if (core_id == cpu::NumCores - 1) {
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerPreemptionThreadFunction, 0, DpcManagerThreadPriority, core_id));
} else {
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, DpcManagerNormalThreadFunction, 0, DpcManagerThreadPriority, core_id));
}
/* Register the new thread. */
KThread::Register(new_thread);
/* Run the thread. */
new_thread->Run();
}
void KDpcManager::HandleDpc() {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
MESOSPHERE_ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread());
/* Get reference to the current thread. */
KThread &cur_thread = GetCurrentThread();
/* Enable interrupts, temporarily. */
KScopedInterruptEnable ei;
/* If the thread is scheduled for termination, exit the thread. */
if (cur_thread.IsTerminationRequested()) {
cur_thread.Exit();
__builtin_unreachable();
}
/* We may also need to destroy any closed objects. */
cur_thread.DestroyClosedObjects();
}
void KDpcManager::Sync() {
MESOSPHERE_ASSERT(!KScheduler::IsSchedulerLockedByCurrentThread());
KDpcTask dummy_task;
KDpcTask::Request(std::addressof(dummy_task));
}
}
| 7,489
|
C++
|
.cpp
| 163
| 31.527607
| 157
| 0.536425
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,987
|
kern_lps_driver.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/board/nintendo/nx/kern_lps_driver.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_lps_driver.hpp"
#include "kern_k_sleep_manager.hpp"
#include "kern_bpmp_api.hpp"
#include "kern_atomics_registers.hpp"
#include "kern_ictlr_registers.hpp"
#include "kern_sema_registers.hpp"
namespace ams::kern::board::nintendo::nx::lps {
namespace {
constexpr inline int ChannelCount = 12;
constexpr inline TimeSpan ChannelTimeout = TimeSpan::FromSeconds(1);
constinit bool g_lps_init_done = false;
constinit bool g_bpmp_connected = false;
constinit bool g_bpmp_mail_initialized = false;
constinit KSpinLock g_bpmp_mrq_lock;
constinit KVirtualAddress g_evp_address = Null<KVirtualAddress>;
constinit KVirtualAddress g_flow_address = Null<KVirtualAddress>;
constinit KVirtualAddress g_prictlr_address = Null<KVirtualAddress>;
constinit KVirtualAddress g_sema_address = Null<KVirtualAddress>;
constinit KVirtualAddress g_atomics_address = Null<KVirtualAddress>;
constinit KVirtualAddress g_clkrst_address = Null<KVirtualAddress>;
constinit KVirtualAddress g_pmc_address = Null<KVirtualAddress>;
constinit ChannelData g_channel_area[ChannelCount] = {};
constinit u32 g_csite_clk_source = 0;
ALWAYS_INLINE u32 Read(KVirtualAddress address) {
return *GetPointer<volatile u32>(address);
}
ALWAYS_INLINE void Write(KVirtualAddress address, u32 value) {
*GetPointer<volatile u32>(address) = value;
}
void InitializeDeviceVirtualAddresses() {
/* Retrieve randomized mappings. */
g_evp_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsExceptionVectors);
g_flow_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsFlowController);
g_prictlr_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsPrimaryICtlr);
g_sema_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsSemaphore);
g_atomics_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsAtomics);
g_clkrst_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsClkRst);
g_pmc_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_PowerManagementController);
}
/* NOTE: linux "do_cc4_init" */
void ConfigureCc3AndCc4() {
/* Configure CC4/CC3 as enabled with time threshold as 2 microseconds. */
Write(g_flow_address + FLOW_CTLR_CC4_HVC_CONTROL, (0x2 << 3) | 0x1);
/* Configure Retention with threshold 2 microseconds. */
Write(g_flow_address + FLOW_CTLR_CC4_RETENTION_CONTROL, (0x2 << 3));
/* Configure CC3/CC3 retry threshold as 2 microseconds. */
Write(g_flow_address + FLOW_CTLR_CC4_HVC_RETRY, (0x2 << 3));
/* Read the retry register to ensure writes take. */
Read(g_flow_address + FLOW_CTLR_CC4_HVC_RETRY);
}
constexpr bool IsValidMessageDataSize(int size) {
return 0 <= size && size < MessageDataSizeMax;
}
/* NOTE: linux "bpmp_valid_txfer" */
constexpr bool IsTransferValid(const void *ob, int ob_size, void *ib, int ib_size) {
return IsValidMessageDataSize(ob_size) && IsValidMessageDataSize(ib_size) && (ob_size == 0 || ob != nullptr) && (ib_size == 0 || ib != nullptr);
}
/* NOTE: linux "bpmp_ob_channel" */
int BpmpGetOutboundChannel() {
return GetCurrentCoreId();
}
/* NOTE: linux "bpmp_ch_sta" */
u32 BpmpGetChannelState(int channel) {
cpu::DataSynchronizationBarrier();
return Read(g_sema_address + RES_SEMA_SHRD_SMP_STA) & CH_MASK(channel);
}
/* NOTE: linux "bpmp_master_free" */
bool BpmpIsMasterFree(int channel) {
return BpmpGetChannelState(channel) == MA_FREE(channel);
}
/* NOTE: linux "bpmp_master_acked" */
bool BpmpIsMasterAcked(int channel) {
return BpmpGetChannelState(channel) == MA_ACKD(channel);
}
/* NOTE: linux "bpmp_signal_slave" */
void BpmpSignalSlave(int channel) {
Write(g_sema_address + RES_SEMA_SHRD_SMP_CLR, CH_MASK(channel));
cpu::DataSynchronizationBarrier();
}
/* NOTE: linux "bpmp_free_master" */
void BpmpFreeMaster(int channel) {
/* Transition state from ack'd to free. */
Write(g_sema_address + RES_SEMA_SHRD_SMP_CLR, ((MA_ACKD(channel)) ^ (MA_FREE(channel))));
cpu::DataSynchronizationBarrier();
}
/* NOTE: linux "bpmp_ring_doorbell" */
void BpmpRingDoorbell() {
Write(g_prictlr_address + ICTLR_FIR_SET(INT_SHR_SEM_OUTBOX_IBF), FIR_BIT(INT_SHR_SEM_OUTBOX_IBF));
cpu::DataSynchronizationBarrier();
}
/* NOTE: linux "bpmp_wait_master_free" */
int BpmpWaitMasterFree(int channel) {
/* Check if the master is already freed. */
if (BpmpIsMasterFree(channel)) {
return 0;
}
/* Spin-poll for the master to be freed until timeout occurs. */
const auto start_tick = KHardwareTimer::GetTick();
const auto timeout = ams::svc::Tick(ChannelTimeout);
do {
if (BpmpIsMasterFree(channel)) {
return 0;
}
} while ((KHardwareTimer::GetTick() - start_tick) < timeout);
/* The master didn't become free. */
return -1;
}
/* NOTE: linux "bpmp_wait_ack" */
int BpmpWaitAck(int channel) {
/* Check if the master is already ACK'd. */
if (BpmpIsMasterAcked(channel)) {
return 0;
}
/* Spin-poll for the master to be ACK'd until timeout occurs. */
const auto start_tick = KHardwareTimer::GetTick();
const auto timeout = ams::svc::Tick(ChannelTimeout);
do {
if (BpmpIsMasterAcked(channel)) {
return 0;
}
} while ((KHardwareTimer::GetTick() - start_tick) < timeout);
/* The master didn't get ACK'd. */
return -1;
}
/* NOTE: linux "bpmp_write_ch" */
int BpmpWriteChannel(int channel, int mrq, int flags, const void *data, size_t data_size) {
/* Wait to be able to master the mailbox. */
if (int res = BpmpWaitMasterFree(channel); res != 0) {
return res;
}
/* Prepare the message. */
MailboxData *mb = g_channel_area[channel].ob;
mb->code = mrq;
mb->flags = flags;
if (data != nullptr) {
std::memcpy(mb->data, data, data_size);
}
/* Signal to slave that message is available. */
BpmpSignalSlave(channel);
return 0;
}
/* NOTE: linux "__bpmp_read_ch" */
int BpmpReadChannel(int channel, void *data, size_t data_size) {
/* Get the message. */
MailboxData *mb = g_channel_area[channel].ib;
/* Copy any return data. */
if (data != nullptr) {
std::memcpy(data, mb->data, data_size);
}
/* Free the channel. */
BpmpFreeMaster(channel);
/* Return result. */
return mb->code;
}
/* NOTE: linux "tegra_bpmp_send_receive_atomic" or "tegra_bpmp_send_receive". */
int BpmpSendAndReceive(int mrq, const void *ob, int ob_size, void *ib, int ib_size) {
/* Validate that the data transfer is valid. */
if (!IsTransferValid(ob, ob_size, ib, ib_size)) {
return -1;
}
/* Validate that the bpmp is connected. */
if (!g_bpmp_connected) {
return -1;
}
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Acquire exclusive access to send mrqs. */
KScopedSpinLock lk(g_bpmp_mrq_lock);
/* Send the message. */
int channel = BpmpGetOutboundChannel();
if (int res = BpmpWriteChannel(channel, mrq, BPMP_MSG_DO_ACK, ob, ob_size); res != 0) {
return res;
}
/* Send "doorbell" irq to the bpmp firmware. */
BpmpRingDoorbell();
/* Wait for the bpmp firmware to acknowledge our request. */
if (int res = BpmpWaitAck(channel); res != 0) {
return res;
}
/* Read the data the bpmp sent back. */
return BpmpReadChannel(channel, ib, ib_size);
}
/* NOTE: linux "tegra_bpmp_send" */
int BpmpSend(int mrq, const void *ob, int ob_size) {
/* Validate that the data transfer is valid. */
if (!IsTransferValid(ob, ob_size, nullptr, 0)) {
return -1;
}
/* Validate that the bpmp is connected. */
if (!g_bpmp_connected) {
return -1;
}
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Acquire exclusive access to send mrqs. */
KScopedSpinLock lk(g_bpmp_mrq_lock);
/* Send the message. */
int channel = BpmpGetOutboundChannel();
if (int res = BpmpWriteChannel(channel, mrq, 0, ob, ob_size); res != 0) {
return res;
}
/* Send "doorbell" irq to the bpmp firmware. */
BpmpRingDoorbell();
return 0;
}
/* NOTE: modified linux "tegra_bpmp_enable_suspend" */
int BpmpEnableSuspend(int mode, int flags) {
/* Prepare data for bpmp. */
const s32 data[] = { mode, flags };
/* Send the data. */
return BpmpSend(MRQ_ENABLE_SUSPEND, data, sizeof(data));
}
/* NOTE: linux "__bpmp_connect" */
int ConnectToBpmp() {
/* Check if we've already connected. */
if (g_bpmp_connected) {
return 0;
}
/* Verify that the resource semaphore state is set. */
if (Read(g_sema_address + RES_SEMA_SHRD_SMP_STA) == 0) {
return -1;
}
/* Get the channels, which the bpmp firmware has configured in advance. */
{
const KVirtualAddress iram_virt_addr = KMemoryLayout::GetDeviceVirtualAddress (KMemoryRegionType_LegacyLpsIram);
const KPhysicalAddress iram_phys_addr = KMemoryLayout::GetDevicePhysicalAddress(KMemoryRegionType_LegacyLpsIram);
for (auto i = 0; i < ChannelCount; ++i) {
/* Trigger a get command for the desired channel. */
Write(g_atomics_address + ATOMICS_AP0_TRIGGER, TRIGGER_CMD_GET | (i << 16));
/* Retrieve the channel phys-addr-in-iram, and convert it to a kernel address. */
auto *ch = GetPointer<MailboxData>(iram_virt_addr + (Read(g_atomics_address + ATOMICS_AP0_RESULT(i)) - GetInteger(iram_phys_addr)));
/* Verify the channel isn't null. */
/* NOTE: This is an utterly nonsense check, as this would require the bpmp firmware to specify */
/* a phys-to-virt diff as an address. On 1.0.0, which had no ASLR, this was 0x8028C000. */
/* However, Nintendo has the check, and we'll preserve it to be faithful. */
if (ch == nullptr) {
return -1;
}
/* Set the channel in the channel area. */
g_channel_area[i].ib = ch;
g_channel_area[i].ob = ch;
}
}
/* Mark driver as connected to bpmp. */
g_bpmp_connected = true;
return 0;
}
/* NOTE: Modified linux "bpmp_mail_init" */
int InitializeBpmpMail() {
/* Check if we've already initialized. */
if (g_bpmp_mail_initialized) {
return 0;
}
/* Mark function as having been called. */
g_bpmp_mail_initialized = true;
/* Forward declare result/reply variables. */
int res, request = 0, reply = 0;
/* Try to connect to the bpmp. */
if (res = ConnectToBpmp(); res != 0) {
MESOSPHERE_LOG("bpmp: connect error returns %d\n", res);
return res;
}
/* Ensure that we can successfully ping the bpmp. */
request = 1;
if (res = BpmpSendAndReceive(MRQ_PING, std::addressof(request), sizeof(request), std::addressof(reply), sizeof(reply)); res != 0) {
MESOSPHERE_LOG("bpmp: MRQ_PING error returns %d with reply %d\n", res, reply);
return res;
}
/* Configure the PMIC. */
request = 1;
if (res = BpmpSendAndReceive(MRQ_CPU_PMIC_SELECT, std::addressof(request), sizeof(request), std::addressof(reply), sizeof(reply)); res != 0) {
MESOSPHERE_LOG("bpmp: MRQ_CPU_PMIC_SELECT for MAX77621 error returns %d with reply %d\n", res, reply);
return res;
}
return 0;
}
}
void Initialize() {
if (!g_lps_init_done) {
/* Get the addresses of the devices the driver needs. */
InitializeDeviceVirtualAddresses();
/* Configure CC3/CC4. */
ConfigureCc3AndCc4();
/* Initialize ccplex <-> bpmp mail. */
/* NOTE: Nintendo does not check that this call succeeds. */
InitializeBpmpMail();
g_lps_init_done = true;
}
}
Result EnableSuspend(bool enable) {
/* If we're not on core 0, there's nothing to do. */
R_SUCCEED_IF(GetCurrentCoreId() != 0);
/* If we're not enabling suspend, there's nothing to do. */
R_SUCCEED_IF(!enable);
/* Instruct BPMP to enable suspend-to-sc7. */
R_UNLESS(BpmpEnableSuspend(TEGRA_BPMP_PM_SC7, 0) == 0, svc::ResultInvalidState());
R_SUCCEED();
}
void InvokeCpuSleepHandler(uintptr_t arg, uintptr_t entry, uintptr_t entry_arg) {
/* Verify that we're allowed to perform suspension. */
MESOSPHERE_ABORT_UNLESS(g_lps_init_done);
MESOSPHERE_ABORT_UNLESS(GetCurrentCoreId() == 0);
/* Save the CSITE clock source. */
g_csite_clk_source = Read(g_clkrst_address + CLK_RST_CONTROLLER_CLK_SOURCE_CSITE);
/* Configure CSITE clock source as CLK_M. */
Write(g_clkrst_address + CLK_RST_CONTROLLER_CLK_SOURCE_CSITE, (0x6 << 29));
/* Clear the top bit of PMC_SCRATCH4. */
Write(g_pmc_address + APBDEV_PMC_SCRATCH4, Read(g_pmc_address + APBDEV_PMC_SCRATCH4) & 0x7FFFFFFF);
/* Write 1 to PMC_SCRATCH0. This will cause the bootrom to use the warmboot code-path. */
Write(g_pmc_address + APBDEV_PMC_SCRATCH0, 1);
/* Read PMC_SCRATCH0 to be sure our write takes. */
Read(g_pmc_address + APBDEV_PMC_SCRATCH0);
/* Invoke the sleep hander. */
KSleepManager::CpuSleepHandler(arg, entry, entry_arg);
/* Disable deep power down. */
Write(g_pmc_address + APBDEV_PMC_DPD_ENABLE, 0);
/* Restore the saved CSITE clock source. */
Write(g_clkrst_address + CLK_RST_CONTROLLER_CLK_SOURCE_CSITE, g_csite_clk_source);
/* Read the CSITE clock source to ensure our configuration takes. */
Read(g_clkrst_address + CLK_RST_CONTROLLER_CLK_SOURCE_CSITE);
/* Configure CC3/CC4. */
ConfigureCc3AndCc4();
}
void ResumeBpmpFirmware() {
/* Halt the bpmp. */
Write(g_flow_address + FLOW_CTLR_HALT_COP_EVENTS, (0x2 << 29));
/* Hold the bpmp in reset. */
Write(g_clkrst_address + CLK_RST_CONTROLLER_RST_DEV_L_SET, 0x2);
/* Read the saved bpmp entrypoint, and write it to the relevant exception vector. */
const u32 bpmp_entry = Read(g_pmc_address + APBDEV_PMC_SCRATCH39);
Write(g_evp_address + EVP_COP_RESET_VECTOR, bpmp_entry);
/* Verify that we can read back the address we wrote. */
while (Read(g_evp_address + EVP_COP_RESET_VECTOR) != bpmp_entry) {
/* ... */
}
/* Spin for 40 ticks, to give enough time for the bpmp to be reset. */
const auto start_tick = KHardwareTimer::GetTick();
do {
__asm__ __volatile__("" ::: "memory");
} while ((KHardwareTimer::GetTick() - start_tick) < 40);
/* Take the bpmp out of reset. */
Write(g_clkrst_address + CLK_RST_CONTROLLER_RST_DEV_L_CLR, 0x2);
/* Resume the bpmp. */
Write(g_flow_address + FLOW_CTLR_HALT_COP_EVENTS, (0x0 << 29));
}
}
| 17,876
|
C++
|
.cpp
| 362
| 37.801105
| 156
| 0.579361
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,988
|
kern_k_sleep_manager.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/board/nintendo/nx/kern_k_sleep_manager.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_k_sleep_manager.hpp"
#include "kern_secure_monitor.hpp"
#include "kern_lps_driver.hpp"
namespace ams::kern::init {
void StartOtherCore(const ams::kern::init::KInitArguments *init_args);
}
namespace ams::kern::board::nintendo::nx {
namespace {
/* Struct representing registers saved on wake/sleep. */
class SavedSystemRegisters {
private:
u64 elr_el1;
u64 sp_el0;
u64 spsr_el1;
u64 daif;
u64 cpacr_el1;
u64 vbar_el1;
u64 csselr_el1;
u64 cntp_ctl_el0;
u64 cntp_cval_el0;
u64 cntkctl_el1;
u64 tpidr_el0;
u64 tpidrro_el0;
u64 mdscr_el1;
u64 contextidr_el1;
u64 dbgwcrN_el1[16];
u64 dbgwvrN_el1[16];
u64 dbgbcrN_el1[16];
u64 dbgbvrN_el1[16];
u64 pmccfiltr_el0;
u64 pmccntr_el0;
u64 pmcntenset_el0;
u64 pmcr_el0;
u64 pmevcntrN_el0[31];
u64 pmevtyperN_el0[31];
u64 pmintenset_el1;
u64 pmovsset_el0;
u64 pmselr_el0;
u64 pmuserenr_el0;
public:
void Save();
void Restore() const;
};
constexpr s32 SleepManagerThreadPriority = 2;
/* Globals for sleep/wake. */
constinit u64 g_sleep_target_cores;
constinit KLightLock g_request_lock;
constinit KLightLock g_cv_lock;
constinit KLightConditionVariable g_cv{util::ConstantInitialize};
alignas(1_KB) constinit u64 g_sleep_buffers[cpu::NumCores][1_KB / sizeof(u64)];
constinit ams::kern::init::KInitArguments g_sleep_init_arguments[cpu::NumCores];
constinit SavedSystemRegisters g_sleep_system_registers[cpu::NumCores] = {};
void WaitOtherCpuPowerOff() {
constexpr u64 PmcPhysicalAddress = 0x7000E400;
constexpr u32 PWRGATE_STATUS_CE123_MASK = ((1u << 3) - 1) << 9;
u32 value;
do {
bool res = smc::ReadWriteRegister(std::addressof(value), PmcPhysicalAddress + APBDEV_PMC_PWRGATE_STATUS, 0, 0);
MESOSPHERE_ASSERT(res);
MESOSPHERE_UNUSED(res);
} while ((value & PWRGATE_STATUS_CE123_MASK) != 0);
}
void SavedSystemRegisters::Save() {
/* Save system registers. */
this->tpidr_el0 = cpu::GetTpidrEl0();
this->elr_el1 = cpu::GetElrEl1();
this->sp_el0 = cpu::GetSpEl0();
this->spsr_el1 = cpu::GetSpsrEl1();
this->daif = cpu::GetDaif();
this->cpacr_el1 = cpu::GetCpacrEl1();
this->vbar_el1 = cpu::GetVbarEl1();
this->csselr_el1 = cpu::GetCsselrEl1();
this->cntp_ctl_el0 = cpu::GetCntpCtlEl0();
this->cntp_cval_el0 = cpu::GetCntpCvalEl0();
this->cntkctl_el1 = cpu::GetCntkCtlEl1();
this->tpidrro_el0 = cpu::GetTpidrRoEl0();
/* Save pmu registers. */
{
/* Get and clear pmcr_el0 */
this->pmcr_el0 = cpu::GetPmcrEl0();
cpu::SetPmcrEl0(0);
cpu::EnsureInstructionConsistency();
/* Save other pmu registers. */
this->pmuserenr_el0 = cpu::GetPmUserEnrEl0();
this->pmselr_el0 = cpu::GetPmSelrEl0();
this->pmccfiltr_el0 = cpu::GetPmcCfiltrEl0();
this->pmcntenset_el0 = cpu::GetPmCntEnSetEl0();
this->pmintenset_el1 = cpu::GetPmIntEnSetEl1();
this->pmovsset_el0 = cpu::GetPmOvsSetEl0();
this->pmccntr_el0 = cpu::GetPmcCntrEl0();
switch (cpu::PerformanceMonitorsControlRegisterAccessor(this->pmcr_el0).GetN()) {
#define HANDLE_PMU_CASE(N) \
case (N+1): \
this->pmevcntrN_el0 [ N ] = cpu::GetPmevCntr##N##El0(); \
this->pmevtyperN_el0[ N ] = cpu::GetPmevTyper##N##El0(); \
[[fallthrough]]
HANDLE_PMU_CASE(30);
HANDLE_PMU_CASE(29);
HANDLE_PMU_CASE(28);
HANDLE_PMU_CASE(27);
HANDLE_PMU_CASE(26);
HANDLE_PMU_CASE(25);
HANDLE_PMU_CASE(24);
HANDLE_PMU_CASE(23);
HANDLE_PMU_CASE(22);
HANDLE_PMU_CASE(21);
HANDLE_PMU_CASE(20);
HANDLE_PMU_CASE(19);
HANDLE_PMU_CASE(18);
HANDLE_PMU_CASE(17);
HANDLE_PMU_CASE(16);
HANDLE_PMU_CASE(15);
HANDLE_PMU_CASE(14);
HANDLE_PMU_CASE(13);
HANDLE_PMU_CASE(12);
HANDLE_PMU_CASE(11);
HANDLE_PMU_CASE(10);
HANDLE_PMU_CASE( 9);
HANDLE_PMU_CASE( 8);
HANDLE_PMU_CASE( 7);
HANDLE_PMU_CASE( 6);
HANDLE_PMU_CASE( 5);
HANDLE_PMU_CASE( 4);
HANDLE_PMU_CASE( 3);
HANDLE_PMU_CASE( 2);
HANDLE_PMU_CASE( 1);
HANDLE_PMU_CASE( 0);
#undef HANDLE_PMU_CASE
case 0:
default:
break;
}
}
/* Save debug registers. */
const u64 dfr0 = cpu::GetIdAa64Dfr0El1();
this->mdscr_el1 = cpu::GetMdscrEl1();
this->contextidr_el1 = cpu::GetContextidrEl1();
/* Save watchpoints. */
switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumWatchpoints()) {
#define HANDLE_DBG_CASE(N) \
case N: \
this->dbgwcrN_el1[ N ] = cpu::GetDbgWcr##N##El1(); \
this->dbgwvrN_el1[ N ] = cpu::GetDbgWvr##N##El1(); \
[[fallthrough]]
HANDLE_DBG_CASE(15);
HANDLE_DBG_CASE(14);
HANDLE_DBG_CASE(13);
HANDLE_DBG_CASE(12);
HANDLE_DBG_CASE(11);
HANDLE_DBG_CASE(10);
HANDLE_DBG_CASE( 9);
HANDLE_DBG_CASE( 8);
HANDLE_DBG_CASE( 7);
HANDLE_DBG_CASE( 6);
HANDLE_DBG_CASE( 5);
HANDLE_DBG_CASE( 4);
HANDLE_DBG_CASE( 3);
HANDLE_DBG_CASE( 2);
#undef HANDLE_DBG_CASE
case 1:
this->dbgwcrN_el1[1] = cpu::GetDbgWcr1El1();
this->dbgwvrN_el1[1] = cpu::GetDbgWvr1El1();
this->dbgwcrN_el1[0] = cpu::GetDbgWcr0El1();
this->dbgwvrN_el1[0] = cpu::GetDbgWvr0El1();
[[fallthrough]];
default:
break;
}
/* Save breakpoints. */
switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumBreakpoints()) {
#define HANDLE_DBG_CASE(N) \
case N: \
this->dbgbcrN_el1[ N ] = cpu::GetDbgBcr##N##El1(); \
this->dbgbvrN_el1[ N ] = cpu::GetDbgBvr##N##El1(); \
[[fallthrough]]
HANDLE_DBG_CASE(15);
HANDLE_DBG_CASE(14);
HANDLE_DBG_CASE(13);
HANDLE_DBG_CASE(12);
HANDLE_DBG_CASE(11);
HANDLE_DBG_CASE(10);
HANDLE_DBG_CASE( 9);
HANDLE_DBG_CASE( 8);
HANDLE_DBG_CASE( 7);
HANDLE_DBG_CASE( 6);
HANDLE_DBG_CASE( 5);
HANDLE_DBG_CASE( 4);
HANDLE_DBG_CASE( 3);
HANDLE_DBG_CASE( 2);
#undef HANDLE_DBG_CASE
case 1:
this->dbgbcrN_el1[1] = cpu::GetDbgBcr1El1();
this->dbgbvrN_el1[1] = cpu::GetDbgBvr1El1();
[[fallthrough]];
default:
break;
}
this->dbgbcrN_el1[0] = cpu::GetDbgBcr0El1();
this->dbgbvrN_el1[0] = cpu::GetDbgBvr0El1();
cpu::EnsureInstructionConsistency();
/* Clear mdscr_el1. */
cpu::SetMdscrEl1(0);
cpu::EnsureInstructionConsistency();
}
void SavedSystemRegisters::Restore() const {
/* Restore debug registers. */
const u64 dfr0 = cpu::GetIdAa64Dfr0El1();
cpu::EnsureInstructionConsistency();
cpu::SetMdscrEl1(0);
cpu::EnsureInstructionConsistency();
cpu::SetOslarEl1(0);
cpu::EnsureInstructionConsistency();
/* Restore watchpoints. */
switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumWatchpoints()) {
#define HANDLE_DBG_CASE(N) \
case N: \
cpu::SetDbgWcr##N##El1(this->dbgwcrN_el1[ N ]); \
cpu::SetDbgWvr##N##El1(this->dbgwvrN_el1[ N ]); \
[[fallthrough]]
HANDLE_DBG_CASE(15);
HANDLE_DBG_CASE(14);
HANDLE_DBG_CASE(13);
HANDLE_DBG_CASE(12);
HANDLE_DBG_CASE(11);
HANDLE_DBG_CASE(10);
HANDLE_DBG_CASE( 9);
HANDLE_DBG_CASE( 8);
HANDLE_DBG_CASE( 7);
HANDLE_DBG_CASE( 6);
HANDLE_DBG_CASE( 5);
HANDLE_DBG_CASE( 4);
HANDLE_DBG_CASE( 3);
HANDLE_DBG_CASE( 2);
#undef HANDLE_DBG_CASE
case 1:
cpu::SetDbgWcr1El1(this->dbgwcrN_el1[1]);
cpu::SetDbgWvr1El1(this->dbgwvrN_el1[1]);
cpu::SetDbgWcr0El1(this->dbgwcrN_el1[0]);
cpu::SetDbgWvr0El1(this->dbgwvrN_el1[0]);
[[fallthrough]];
default:
break;
}
/* Restore breakpoints. */
switch (cpu::DebugFeatureRegisterAccessor(dfr0).GetNumBreakpoints()) {
#define HANDLE_DBG_CASE(N) \
case N: \
cpu::SetDbgBcr##N##El1(this->dbgbcrN_el1[ N ]); \
cpu::SetDbgBvr##N##El1(this->dbgbvrN_el1[ N ]); \
[[fallthrough]]
HANDLE_DBG_CASE(15);
HANDLE_DBG_CASE(14);
HANDLE_DBG_CASE(13);
HANDLE_DBG_CASE(12);
HANDLE_DBG_CASE(11);
HANDLE_DBG_CASE(10);
HANDLE_DBG_CASE( 9);
HANDLE_DBG_CASE( 8);
HANDLE_DBG_CASE( 7);
HANDLE_DBG_CASE( 6);
HANDLE_DBG_CASE( 5);
HANDLE_DBG_CASE( 4);
HANDLE_DBG_CASE( 3);
HANDLE_DBG_CASE( 2);
#undef HANDLE_DBG_CASE
case 1:
cpu::SetDbgBcr1El1(this->dbgbcrN_el1[1]);
cpu::SetDbgBvr1El1(this->dbgbvrN_el1[1]);
[[fallthrough]];
default:
break;
}
cpu::SetDbgBcr0El1(this->dbgbcrN_el1[0]);
cpu::SetDbgBvr0El1(this->dbgbvrN_el1[0]);
cpu::EnsureInstructionConsistency();
cpu::SetContextidrEl1(this->contextidr_el1);
cpu::EnsureInstructionConsistency();
cpu::SetMdscrEl1(this->mdscr_el1);
cpu::EnsureInstructionConsistency();
/* Restore pmu registers. */
cpu::SetPmUserEnrEl0(0);
cpu::PerformanceMonitorsControlRegisterAccessor(0).SetEventCounterReset(true).SetCycleCounterReset(true).Store();
cpu::EnsureInstructionConsistency();
cpu::SetPmOvsClrEl0(static_cast<u64>(static_cast<u32>(~u32())));
cpu::SetPmIntEnClrEl1(static_cast<u64>(static_cast<u32>(~u32())));
cpu::SetPmCntEnClrEl0(static_cast<u64>(static_cast<u32>(~u32())));
switch (cpu::PerformanceMonitorsControlRegisterAccessor(this->pmcr_el0).GetN()) {
#define HANDLE_PMU_CASE(N) \
case (N+1): \
cpu::SetPmevCntr##N##El0 (this->pmevcntrN_el0 [ N ]); \
cpu::SetPmevTyper##N##El0(this->pmevtyperN_el0[ N ]); \
[[fallthrough]]
HANDLE_PMU_CASE(30);
HANDLE_PMU_CASE(29);
HANDLE_PMU_CASE(28);
HANDLE_PMU_CASE(27);
HANDLE_PMU_CASE(26);
HANDLE_PMU_CASE(25);
HANDLE_PMU_CASE(24);
HANDLE_PMU_CASE(23);
HANDLE_PMU_CASE(22);
HANDLE_PMU_CASE(21);
HANDLE_PMU_CASE(20);
HANDLE_PMU_CASE(19);
HANDLE_PMU_CASE(18);
HANDLE_PMU_CASE(17);
HANDLE_PMU_CASE(16);
HANDLE_PMU_CASE(15);
HANDLE_PMU_CASE(14);
HANDLE_PMU_CASE(13);
HANDLE_PMU_CASE(12);
HANDLE_PMU_CASE(11);
HANDLE_PMU_CASE(10);
HANDLE_PMU_CASE( 9);
HANDLE_PMU_CASE( 8);
HANDLE_PMU_CASE( 7);
HANDLE_PMU_CASE( 6);
HANDLE_PMU_CASE( 5);
HANDLE_PMU_CASE( 4);
HANDLE_PMU_CASE( 3);
HANDLE_PMU_CASE( 2);
HANDLE_PMU_CASE( 1);
HANDLE_PMU_CASE( 0);
#undef HANDLE_PMU_CASE
case 0:
default:
break;
}
cpu::SetPmUserEnrEl0 (this->pmuserenr_el0);
cpu::SetPmSelrEl0 (this->pmselr_el0);
cpu::SetPmcCfiltrEl0 (this->pmccfiltr_el0);
cpu::SetPmCntEnSetEl0(this->pmcntenset_el0);
cpu::SetPmIntEnSetEl1(this->pmintenset_el1);
cpu::SetPmOvsSetEl0 (this->pmovsset_el0);
cpu::SetPmcCntrEl0 (this->pmccntr_el0);
cpu::EnsureInstructionConsistency();
cpu::SetPmcrEl0(this->pmcr_el0);
cpu::EnsureInstructionConsistency();
/* Restore system registers. */
cpu::SetTtbr0El1 (KPageTable::GetKernelTtbr0());
cpu::SetTpidrEl0 (this->tpidr_el0);
cpu::SetElrEl1 (this->elr_el1);
cpu::SetSpEl0 (this->sp_el0);
cpu::SetSpsrEl1 (this->spsr_el1);
cpu::SetDaif (this->daif);
cpu::SetCpacrEl1 (this->cpacr_el1);
cpu::SetVbarEl1 (this->vbar_el1);
cpu::SetCsselrEl1 (this->csselr_el1);
cpu::SetCntpCtlEl0 (this->cntp_ctl_el0);
cpu::SetCntpCvalEl0(this->cntp_cval_el0);
cpu::SetCntkCtlEl1 (this->cntkctl_el1);
cpu::SetTpidrRoEl0 (this->tpidrro_el0);
cpu::EnsureInstructionConsistency();
/* Invalidate the entire tlb. */
cpu::InvalidateEntireTlb();
}
}
void KSleepManager::Initialize() {
/* Create a sleep manager thread for each core. */
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
/* Reserve a thread from the system limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
/* Create a new thread. */
KThread *new_thread = KThread::Create();
MESOSPHERE_ABORT_UNLESS(new_thread != nullptr);
/* Launch the new thread. */
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, KSleepManager::ProcessRequests, reinterpret_cast<uintptr_t>(g_sleep_buffers[core_id]), SleepManagerThreadPriority, static_cast<s32>(core_id)));
/* Register the new thread. */
KThread::Register(new_thread);
/* Run the thread. */
new_thread->Run();
}
}
void KSleepManager::SleepSystem() {
/* Ensure device mappings are not modified during sleep. */
KDevicePageTable::Lock();
ON_SCOPE_EXIT { KDevicePageTable::Unlock(); };
/* Request that the system sleep. */
{
KScopedLightLock lk(g_request_lock);
/* Signal the manager to sleep on all cores. */
{
KScopedLightLock lk(g_cv_lock);
MESOSPHERE_ABORT_UNLESS(g_sleep_target_cores == 0);
g_sleep_target_cores = (1ul << cpu::NumCores) - 1;
g_cv.Broadcast();
while (g_sleep_target_cores != 0) {
g_cv.Wait(std::addressof(g_cv_lock));
}
}
}
}
void KSleepManager::ProcessRequests(uintptr_t sleep_buffer) {
const auto target_fw = GetTargetFirmware();
const s32 core_id = GetCurrentCoreId();
ams::kern::init::KInitArguments * const init_args = g_sleep_init_arguments + core_id;
KPhysicalAddress start_core_phys_addr = Null<KPhysicalAddress>;
KPhysicalAddress init_args_phys_addr = Null<KPhysicalAddress>;
/* Get the physical addresses we'll need. */
{
MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(start_core_phys_addr), KProcessAddress(&::ams::kern::init::StartOtherCore)));
MESOSPHERE_ABORT_UNLESS(Kernel::GetKernelPageTable().GetPhysicalAddress(std::addressof(init_args_phys_addr), KProcessAddress(init_args)));
}
const u64 target_core_mask = (1ul << core_id);
const bool use_legacy_lps_driver = target_fw < TargetFirmware_2_0_0;
/* Loop, processing sleep when requested. */
while (true) {
/* Wait for a request. */
{
KScopedLightLock lk(g_cv_lock);
while ((g_sleep_target_cores & target_core_mask) == 0) {
g_cv.Wait(std::addressof(g_cv_lock));
}
}
/* If on core 0, ensure the legacy lps driver is initialized. */
if (use_legacy_lps_driver && core_id == 0) {
lps::Initialize();
}
/* Perform Sleep/Wake sequence. */
{
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Save the system registers for the current core. */
g_sleep_system_registers[core_id].Save();
/* Invalidate the entire tlb. */
cpu::InvalidateEntireTlb();
/* Ensure that all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* If on core 0, put the device page tables to sleep. */
if (core_id == 0) {
KDevicePageTable::Sleep();
}
/* Ensure that all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Save the interrupt manager's state. */
Kernel::GetInterruptManager().Save(core_id);
/* Setup the initial arguments. */
{
/* Determine whether we're running on a cortex-a53 or a-57. */
cpu::MainIdRegisterAccessor midr_el1;
const auto implementer = midr_el1.GetImplementer();
const auto primary_part = midr_el1.GetPrimaryPartNumber();
const bool needs_cpu_ctlr = (implementer == cpu::MainIdRegisterAccessor::Implementer::ArmLimited) && (primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA57 || primary_part == cpu::MainIdRegisterAccessor::PrimaryPartNumber::CortexA53);
init_args->cpuactlr = needs_cpu_ctlr ? cpu::GetCpuActlrEl1() : 0;
init_args->cpuectlr = needs_cpu_ctlr ? cpu::GetCpuEctlrEl1() : 0;
init_args->sp = 0;
init_args->entrypoint = reinterpret_cast<uintptr_t>(::ams::kern::board::nintendo::nx::KSleepManager::ResumeEntry);
init_args->argument = sleep_buffer;
}
/* Ensure that all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Log that the core is going to sleep. */
MESOSPHERE_LOG("Core[%d]: Going to sleep, buffer = %010lx\n", core_id, sleep_buffer);
/* If we're on a core other than zero, we can just invoke the sleep handler. */
if (core_id != 0) {
CpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr));
} else {
/* Wait for all other cores to be powered off. */
WaitOtherCpuPowerOff();
/* If we're using the legacy lps driver, enable suspend. */
if (use_legacy_lps_driver) {
MESOSPHERE_R_ABORT_UNLESS(lps::EnableSuspend(true));
}
/* Log that we're about to enter SC7. */
MESOSPHERE_LOG("Entering SC7\n");
/* Save the debug log state. */
KDebugLog::Save();
/* Invoke the sleep handler. */
if (!use_legacy_lps_driver) {
/* When not using the legacy driver, invoke directly. */
CpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr));
} else {
lps::InvokeCpuSleepHandler(sleep_buffer, GetInteger(start_core_phys_addr), GetInteger(init_args_phys_addr));
}
/* Restore the debug log state. */
KDebugLog::Restore();
/* Log that we're about to exit SC7. */
MESOSPHERE_LOG("Exiting SC7\n");
/* Wake up the other cores. */
cpu::MultiprocessorAffinityRegisterAccessor mpidr;
const auto arg = mpidr.GetCpuOnArgument();
for (s32 i = 1; i < static_cast<s32>(cpu::NumCores); ++i) {
KSystemControl::Init::TurnOnCpu(arg | i, g_sleep_init_arguments + i);
}
}
/* Log that the core is waking from sleep. */
MESOSPHERE_LOG("Core[%d]: Woke from sleep.\n", core_id);
/* Ensure that all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Restore the interrupt manager's state. */
Kernel::GetInterruptManager().Restore(core_id);
/* Ensure that all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* If on core 0, wake up the device page tables. */
if (core_id == 0) {
KDevicePageTable::Wakeup();
/* If we're using the legacy driver, resume the bpmp firmware. */
if (use_legacy_lps_driver) {
lps::ResumeBpmpFirmware();
}
}
/* Ensure that all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Restore the system registers for the current core. */
g_sleep_system_registers[core_id].Restore();
}
/* Signal request completed. */
{
KScopedLightLock lk(g_cv_lock);
g_sleep_target_cores &= ~target_core_mask;
if (g_sleep_target_cores == 0) {
g_cv.Broadcast();
}
}
}
}
}
| 25,567
|
C++
|
.cpp
| 533
| 32.04878
| 274
| 0.493022
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,989
|
kern_secure_monitor.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/board/nintendo/nx/kern_secure_monitor.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_secure_monitor.hpp"
namespace ams::kern::board::nintendo::nx::smc {
namespace {
enum UserFunctionId : u32 {
UserFunctionId_SetConfig = 0xC3000401,
UserFunctionId_GetConfigUser = 0xC3000002,
UserFunctionId_GetResult = 0xC3000003,
UserFunctionId_GetResultData = 0xC3000404,
UserFunctionId_ModularExponentiate = 0xC3000E05,
UserFunctionId_GenerateRandomBytes = 0xC3000006,
UserFunctionId_GenerateAesKek = 0xC3000007,
UserFunctionId_LoadAesKey = 0xC3000008,
UserFunctionId_ComputeAes = 0xC3000009,
UserFunctionId_GenerateSpecificAesKey = 0xC300000A,
UserFunctionId_ComputeCmac = 0xC300040B,
UserFunctionId_ReencryptDeviceUniqueData = 0xC300D60C,
UserFunctionId_DecryptDeviceUniqueData = 0xC300100D,
UserFunctionId_ModularExponentiateByStorageKey = 0xC300060F,
UserFunctionId_PrepareEsDeviceUniqueKey = 0xC3000610,
UserFunctionId_LoadPreparedAesKey = 0xC3000011,
UserFunctionId_PrepareEsCommonTitleKey = 0xC3000012,
};
enum FunctionId : u32 {
FunctionId_GetConfig = 0xC3000004,
FunctionId_GenerateRandomBytes = 0xC3000005,
FunctionId_ShowError = 0xC3000006,
FunctionId_ConfigureCarveout = 0xC3000007,
FunctionId_ReadWriteRegister = 0xC3000008,
/* NOTE: Atmosphere extension for mesosphere. This ID is subject to change at any time. */
FunctionId_SetConfig = 0xC3000409,
};
constexpr size_t GenerateRandomBytesSizeMax = sizeof(::ams::svc::lp64::SecureMonitorArguments) - sizeof(::ams::svc::lp64::SecureMonitorArguments{}.r[0]);
/* Global lock for generate random bytes. */
constinit KSpinLock g_generate_random_lock;
bool TryGetConfigImpl(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GetConfig, static_cast<u32>(config_item) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
for (size_t i = 0; i < num_qwords && i < 7; i++) {
out[i] = args.r[1 + i];
}
}
return success;
}
bool SetConfigImpl(ConfigItem config_item, u64 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_SetConfig, static_cast<u32>(config_item), 0, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ReadWriteRegisterImpl(u32 *out, u64 address, u32 mask, u32 value) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ReadWriteRegister, address, mask, value } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Unconditionally write the output. */
*out = static_cast<u32>(args.r[1]);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool GenerateRandomBytesImpl(void *dst, size_t size) {
/* Create the arguments. */
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_GenerateRandomBytes, size } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* If successful, copy the output. */
const bool success = static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
if (AMS_LIKELY(success)) {
std::memcpy(dst, std::addressof(args.r[1]), size);
}
return success;
}
bool ConfigureCarveoutImpl(size_t which, uintptr_t address, size_t size) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ConfigureCarveout, static_cast<u64>(which), static_cast<u64>(address), static_cast<u64>(size) } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
bool ShowErrorImpl(u32 color) {
/* Create the arguments .*/
ams::svc::lp64::SecureMonitorArguments args = { { FunctionId_ShowError, color } };
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_Supervisor>(args.r);
/* Return whether the call was successful. */
return static_cast<SmcResult>(args.r[0]) == SmcResult::Success;
}
void CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* Call into the secure monitor. */
::ams::kern::arch::arm64::smc::SecureMonitorCall<SmcId_User>(args->r);
}
}
/* SMC functionality needed for init. */
namespace init {
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Ensure we successfully get the config. */
MESOSPHERE_INIT_ABORT_UNLESS(TryGetConfigImpl(out, num_qwords, config_item));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Check that the size is valid. */
MESOSPHERE_INIT_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax);
/* Ensure we successfully generate the random bytes. */
MESOSPHERE_INIT_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size));
}
void ReadWriteRegister(u32 *out, u64 address, u32 mask, u32 value) {
/* Ensure we successfully access the register. */
MESOSPHERE_INIT_ABORT_UNLESS(ReadWriteRegisterImpl(out, address, mask, value));
}
}
bool TryGetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Get the config. */
return TryGetConfigImpl(out, num_qwords, config_item);
}
void GetConfig(u64 *out, size_t num_qwords, ConfigItem config_item) {
/* Ensure we successfully get the config. */
MESOSPHERE_ABORT_UNLESS(TryGetConfig(out, num_qwords, config_item));
}
bool SetConfig(ConfigItem config_item, u64 value) {
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Set the config. */
return SetConfigImpl(config_item, value);
}
bool ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Access the register. */
return ReadWriteRegisterImpl(out, address, mask, value);
}
void ConfigureCarveout(size_t which, uintptr_t address, size_t size) {
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Ensure that we successfully configure the carveout. */
MESOSPHERE_ABORT_UNLESS(ConfigureCarveoutImpl(which, address, size));
}
void GenerateRandomBytes(void *dst, size_t size) {
/* Check that the size is valid. */
MESOSPHERE_ABORT_UNLESS(0 < size && size <= GenerateRandomBytesSizeMax);
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Acquire the exclusive right to generate random bytes. */
KScopedSpinLock lk(g_generate_random_lock);
/* Ensure we successfully generate the random bytes. */
MESOSPHERE_ABORT_UNLESS(GenerateRandomBytesImpl(dst, size));
}
void ShowError(u32 color) {
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Ensure we successfully show the error. */
MESOSPHERE_ABORT_UNLESS(ShowErrorImpl(color));
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Perform the call. */
CallSecureMonitorFromUserImpl(args);
}
}
| 9,735
|
C++
|
.cpp
| 183
| 43.218579
| 171
| 0.621855
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,990
|
kern_k_system_control.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/board/nintendo/nx/kern_k_system_control.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_secure_monitor.hpp"
#include "kern_k_sleep_manager.hpp"
namespace ams::kern::board::nintendo::nx {
namespace {
constexpr size_t SecureAlignment = 128_KB;
constexpr size_t SecureSizeMax = util::AlignDown(512_MB - 1, SecureAlignment);
/* Global variables for panic. */
constinit const volatile bool g_call_smc_on_panic = false;
/* Global variables for secure memory. */
constinit KSpinLock g_secure_applet_lock;
constinit bool g_secure_applet_memory_used = false;
constinit KVirtualAddress g_secure_applet_memory_address = Null<KVirtualAddress>;
constinit KSpinLock g_secure_region_lock;
constinit bool g_secure_region_used = false;
constinit KPhysicalAddress g_secure_region_phys_addr = Null<KPhysicalAddress>;
constinit size_t g_secure_region_size = 0;
ALWAYS_INLINE util::BitPack32 GetKernelConfigurationForInit() {
u64 value = 0;
smc::init::GetConfig(&value, 1, smc::ConfigItem::KernelConfiguration);
return util::BitPack32{static_cast<u32>(value)};
}
ALWAYS_INLINE u32 GetMemoryModeForInit() {
u64 value = 0;
smc::init::GetConfig(&value, 1, smc::ConfigItem::MemoryMode);
return static_cast<u32>(value);
}
ALWAYS_INLINE smc::MemoryArrangement GetMemoryArrangeForInit() {
switch(GetMemoryModeForInit() & 0x3F) {
case 0x01:
default:
return smc::MemoryArrangement_4GB;
case 0x02:
return smc::MemoryArrangement_4GBForAppletDev;
case 0x03:
return smc::MemoryArrangement_4GBForSystemDev;
case 0x11:
return smc::MemoryArrangement_6GB;
case 0x12:
return smc::MemoryArrangement_6GBForAppletDev;
case 0x21:
return smc::MemoryArrangement_8GB;
}
}
ALWAYS_INLINE u64 GenerateRandomU64ForInit() {
u64 value;
smc::init::GenerateRandomBytes(std::addressof(value), sizeof(value));
return value;
}
ALWAYS_INLINE u64 GenerateRandomU64FromSmc() {
u64 value;
smc::GenerateRandomBytes(std::addressof(value), sizeof(value));
return value;
}
ALWAYS_INLINE u64 GetConfigU64(smc::ConfigItem which) {
u64 value;
smc::GetConfig(&value, 1, which);
return value;
}
ALWAYS_INLINE u32 GetConfigU32(smc::ConfigItem which) {
return static_cast<u32>(GetConfigU64(which));
}
ALWAYS_INLINE bool GetConfigBool(smc::ConfigItem which) {
return GetConfigU64(which) != 0;
}
ALWAYS_INLINE bool CheckRegisterAllowedTable(const u8 *table, const size_t offset) {
return (table[(offset / sizeof(u32)) / BITSIZEOF(u8)] & (1u << ((offset / sizeof(u32)) % BITSIZEOF(u8)))) != 0;
}
/* TODO: Generate this from a list of register names (see similar logic in exosphere)? */
constexpr inline const u8 McKernelRegisterWhitelist[(PageSize / sizeof(u32)) / BITSIZEOF(u8)] = {
0x9F, 0x31, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xC0, 0x73, 0x3E, 0x6F, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xE4, 0xFF, 0xFF, 0x01,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
/* TODO: Generate this from a list of register names (see similar logic in exosphere)? */
constexpr inline const u8 McUserRegisterWhitelist[(PageSize / sizeof(u32)) / BITSIZEOF(u8)] = {
0x00, 0x00, 0x20, 0x00, 0xF0, 0xFF, 0xF7, 0x01,
0xCD, 0xFE, 0xC0, 0xFE, 0x00, 0x00, 0x00, 0x00,
0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6E,
0x30, 0x05, 0x06, 0xB0, 0x71, 0xC8, 0x43, 0x04,
0x80, 0xFF, 0x08, 0x80, 0x03, 0x38, 0x8E, 0x1F,
0xC8, 0xFF, 0xFF, 0x00, 0x0E, 0x00, 0x00, 0x00,
0xF0, 0x1F, 0x00, 0x30, 0xF0, 0x03, 0x03, 0x30,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x31, 0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0C, 0x00, 0xFE, 0x0F,
0x01, 0x00, 0x80, 0x00, 0x00, 0x08, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
};
bool IsRegisterAccessibleToPrivileged(ams::svc::PhysicalAddress address) {
/* Find the region for the address. */
const KMemoryRegion *region = KMemoryLayout::Find(KPhysicalAddress(address));
if (AMS_LIKELY(region != nullptr)) {
if (AMS_LIKELY(region->IsDerivedFrom(KMemoryRegionType_MemoryController))) {
/* Check the region is valid. */
MESOSPHERE_ABORT_UNLESS(region->GetEndAddress() != 0);
/* Get the offset within the region. */
const size_t offset = address - region->GetAddress();
MESOSPHERE_ABORT_UNLESS(offset < region->GetSize());
/* Check the whitelist. */
if (AMS_LIKELY(CheckRegisterAllowedTable(McKernelRegisterWhitelist, offset))) {
return true;
}
}
}
return false;
}
bool IsRegisterAccessibleToUser(ams::svc::PhysicalAddress address) {
/* Find the region for the address. */
const KMemoryRegion *region = KMemoryLayout::Find(KPhysicalAddress(address));
if (AMS_LIKELY(region != nullptr)) {
/* The PMC is always allowed. */
if (region->IsDerivedFrom(KMemoryRegionType_PowerManagementController)) {
return true;
}
/* Memory controller is allowed if the register is whitelisted. */
if (region->IsDerivedFrom(KMemoryRegionType_MemoryController ) ||
region->IsDerivedFrom(KMemoryRegionType_MemoryController0) ||
region->IsDerivedFrom(KMemoryRegionType_MemoryController1))
{
/* Check the region is valid. */
MESOSPHERE_ABORT_UNLESS(region->GetEndAddress() != 0);
/* Get the offset within the region. */
const size_t offset = address - region->GetAddress();
MESOSPHERE_ABORT_UNLESS(offset < region->GetSize());
/* Check the whitelist. */
if (AMS_LIKELY(CheckRegisterAllowedTable(McUserRegisterWhitelist, offset))) {
return true;
}
}
}
return false;
}
bool SetSecureRegion(KPhysicalAddress phys_addr, size_t size) {
/* Ensure size is valid. */
if (size > SecureSizeMax) {
return false;
}
/* Ensure address and size are aligned. */
if (!util::IsAligned(GetInteger(phys_addr), SecureAlignment)) {
return false;
}
if (!util::IsAligned(size, SecureAlignment)) {
return false;
}
/* Disable interrupts and acquire the secure region lock. */
KScopedInterruptDisable di;
KScopedSpinLock lk(g_secure_region_lock);
/* If size is non-zero, we're allocating the secure region. Otherwise, we're freeing it. */
if (size != 0) {
/* Verify that the secure region is free. */
if (g_secure_region_used) {
return false;
}
/* Set the secure region. */
g_secure_region_used = true;
g_secure_region_phys_addr = phys_addr;
g_secure_region_size = size;
} else {
/* Verify that the secure region is in use. */
if (!g_secure_region_used) {
return false;
}
/* Verify that the address being freed is the secure region. */
if (phys_addr != g_secure_region_phys_addr) {
return false;
}
/* Clear the secure region. */
g_secure_region_used = false;
g_secure_region_phys_addr = Null<KPhysicalAddress>;
g_secure_region_size = 0;
}
/* Configure the carveout with the secure monitor. */
smc::ConfigureCarveout(1, GetInteger(phys_addr), size);
return true;
}
Result AllocateSecureMemoryForApplet(KVirtualAddress *out, size_t size) {
/* Verify that the size is valid. */
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size <= KSystemControl::SecureAppletMemorySize, svc::ResultOutOfMemory());
/* Disable interrupts and acquire the secure applet lock. */
KScopedInterruptDisable di;
KScopedSpinLock lk(g_secure_applet_lock);
/* Check that memory is reserved for secure applet use. */
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_address != Null<KVirtualAddress>);
/* Verify that the secure applet memory isn't already being used. */
R_UNLESS(!g_secure_applet_memory_used, svc::ResultOutOfMemory());
/* Return the secure applet memory. */
g_secure_applet_memory_used = true;
*out = g_secure_applet_memory_address;
R_SUCCEED();
}
void FreeSecureMemoryForApplet(KVirtualAddress address, size_t size) {
/* Disable interrupts and acquire the secure applet lock. */
KScopedInterruptDisable di;
KScopedSpinLock lk(g_secure_applet_lock);
/* Verify that the memory being freed is correct. */
MESOSPHERE_ABORT_UNLESS(address == g_secure_applet_memory_address);
MESOSPHERE_ABORT_UNLESS(size <= KSystemControl::SecureAppletMemorySize);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, PageSize));
MESOSPHERE_ABORT_UNLESS(g_secure_applet_memory_used);
/* Release the secure applet memory. */
g_secure_applet_memory_used = false;
}
u32 GetVersionIdentifier() {
u32 value = 0;
value |= static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MICRO) << 0;
value |= static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MINOR) << 8;
value |= static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MAJOR) << 16;
value |= static_cast<u64>('M') << 24;
return value;
}
}
/* Initialization. */
size_t KSystemControl::Init::GetRealMemorySize() {
/* TODO: Move this into a header for the MC in general. */
constexpr u32 MemoryControllerConfigurationRegister = 0x70019050;
u32 config_value;
smc::init::ReadWriteRegister(std::addressof(config_value), MemoryControllerConfigurationRegister, 0, 0);
return static_cast<size_t>(config_value & 0x3FFF) << 20;
}
size_t KSystemControl::Init::GetIntendedMemorySize() {
switch (GetKernelConfigurationForInit().Get<smc::KernelConfiguration::MemorySize>()) {
case smc::MemorySize_4GB:
default: /* All invalid modes should go to 4GB. */
return 4_GB;
case smc::MemorySize_6GB:
return 6_GB;
case smc::MemorySize_8GB:
return 8_GB;
}
}
bool KSystemControl::Init::ShouldIncreaseThreadResourceLimit() {
return GetKernelConfigurationForInit().Get<smc::KernelConfiguration::IncreaseThreadResourceLimit>();
}
size_t KSystemControl::Init::GetApplicationPoolSize() {
/* Get the base pool size. */
const size_t base_pool_size = []() ALWAYS_INLINE_LAMBDA -> size_t {
switch (GetMemoryArrangeForInit()) {
case smc::MemoryArrangement_4GB:
default:
return 3285_MB;
case smc::MemoryArrangement_4GBForAppletDev:
return 2048_MB;
case smc::MemoryArrangement_4GBForSystemDev:
return 3285_MB;
case smc::MemoryArrangement_6GB:
return 4916_MB;
case smc::MemoryArrangement_6GBForAppletDev:
return 3285_MB;
case smc::MemoryArrangement_8GB:
return 6964_MB;
}
}();
/* Return (possibly) adjusted size. */
return base_pool_size;
}
size_t KSystemControl::Init::GetAppletPoolSize() {
/* Get the base pool size. */
const size_t base_pool_size = []() ALWAYS_INLINE_LAMBDA -> size_t {
switch (GetMemoryArrangeForInit()) {
case smc::MemoryArrangement_4GB:
default:
return 507_MB;
case smc::MemoryArrangement_4GBForAppletDev:
return 1554_MB;
case smc::MemoryArrangement_4GBForSystemDev:
return 448_MB;
case smc::MemoryArrangement_6GB:
return 562_MB;
case smc::MemoryArrangement_6GBForAppletDev:
return 2193_MB;
case smc::MemoryArrangement_8GB:
return 562_MB;
}
}();
/* Return (possibly) adjusted size. */
constexpr size_t ExtraSystemMemoryForAtmosphere = 40_MB;
return base_pool_size - ExtraSystemMemoryForAtmosphere - KTraceBufferSize;
}
size_t KSystemControl::Init::GetMinimumNonSecureSystemPoolSize() {
/* Verify that our minimum is at least as large as Nintendo's. */
constexpr size_t MinimumSizeWithFatal = ::ams::svc::RequiredNonSecureSystemMemorySizeWithFatal;
static_assert(MinimumSizeWithFatal >= 0x2C04000);
constexpr size_t MinimumSizeWithoutFatal = ::ams::svc::RequiredNonSecureSystemMemorySize;
static_assert(MinimumSizeWithoutFatal >= 0x2A00000);
/* Include fatal in non-seure size on 16.0.0+. */
return kern::GetTargetFirmware() >= ams::TargetFirmware_16_0_0 ? MinimumSizeWithFatal : MinimumSizeWithoutFatal;
}
u8 KSystemControl::Init::GetDebugLogUartPort() {
/* Get the log configuration. */
u64 value = 0;
smc::init::GetConfig(std::addressof(value), 1, smc::ConfigItem::ExosphereLogConfiguration);
/* Extract the port. */
return static_cast<u8>((value >> 32) & 0xFF);
}
void KSystemControl::Init::CpuOnImpl(u64 core_id, uintptr_t entrypoint, uintptr_t arg) {
MESOSPHERE_INIT_ABORT_UNLESS((::ams::kern::arch::arm64::smc::CpuOn<smc::SmcId_Supervisor>(core_id, entrypoint, arg)) == 0);
}
/* Randomness for Initialization. */
void KSystemControl::Init::GenerateRandom(u64 *dst, size_t count) {
MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
smc::init::GenerateRandomBytes(dst, count * sizeof(u64));
}
u64 KSystemControl::Init::GenerateRandomRange(u64 min, u64 max) {
return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64ForInit);
}
/* System Initialization. */
void KSystemControl::ConfigureKTargetSystem() {
/* Configure KTargetSystem. */
volatile auto *ts = const_cast<volatile KTargetSystem::KTargetSystemData *>(std::addressof(KTargetSystem::s_data));
{
/* Set IsDebugMode. */
{
ts->is_debug_mode = GetConfigBool(smc::ConfigItem::IsDebugMode);
/* If debug mode, we want to initialize uart logging. */
ts->enable_debug_logging = ts->is_debug_mode;
}
/* Set Kernel Configuration. */
{
const auto kernel_config = util::BitPack32{GetConfigU32(smc::ConfigItem::KernelConfiguration)};
ts->enable_debug_memory_fill = kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>();
ts->enable_user_exception_handlers = kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>();
ts->enable_dynamic_resource_limits = !kernel_config.Get<smc::KernelConfiguration::DisableDynamicResourceLimits>();
ts->enable_user_pmu_access = kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>();
/* Configure call smc on panic. */
*const_cast<volatile bool *>(std::addressof(g_call_smc_on_panic)) = kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>();
}
/* Set Kernel Debugging. */
{
/* NOTE: This is used to restrict access to SvcKernelDebug/SvcChangeKernelTraceState. */
/* Mesosphere may wish to not require this, as we'd ideally keep ProgramVerification enabled for userland. */
ts->enable_kernel_debugging = GetConfigBool(smc::ConfigItem::DisableProgramVerification);
}
}
}
void KSystemControl::InitializePhase1() {
/* Enable KTargetSystem. */
KTargetSystem::SetInitialized();
/* Check KTargetSystem was configured correctly. */
{
/* Check IsDebugMode. */
{
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugMode() == GetConfigBool(smc::ConfigItem::IsDebugMode));
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugLoggingEnabled() == GetConfigBool(smc::ConfigItem::IsDebugMode));
}
/* Check Kernel Configuration. */
{
const auto kernel_config = util::BitPack32{GetConfigU32(smc::ConfigItem::KernelConfiguration)};
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDebugMemoryFillEnabled() == kernel_config.Get<smc::KernelConfiguration::DebugFillMemory>());
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsUserExceptionHandlersEnabled() == kernel_config.Get<smc::KernelConfiguration::EnableUserExceptionHandlers>());
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled() == !kernel_config.Get<smc::KernelConfiguration::DisableDynamicResourceLimits>());
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsUserPmuAccessEnabled() == kernel_config.Get<smc::KernelConfiguration::EnableUserPmuAccess>());
MESOSPHERE_ABORT_UNLESS(g_call_smc_on_panic == kernel_config.Get<smc::KernelConfiguration::UseSecureMonitorPanicCall>());
}
/* Check Kernel Debugging. */
{
MESOSPHERE_ABORT_UNLESS(KTargetSystem::IsKernelDebuggingEnabled() == GetConfigBool(smc::ConfigItem::DisableProgramVerification));
}
}
/* Initialize random and resource limit. */
{
u64 seed;
smc::GenerateRandomBytes(std::addressof(seed), sizeof(seed));
KSystemControlBase::InitializePhase1Base(seed);
}
/* Configure the Kernel Carveout region. */
{
const auto carveout = KMemoryLayout::GetCarveoutRegionExtents();
MESOSPHERE_ABORT_UNLESS(carveout.GetEndAddress() != 0);
smc::ConfigureCarveout(0, carveout.GetAddress(), carveout.GetSize());
}
}
void KSystemControl::InitializePhase2() {
/* Initialize the sleep manager. */
KSleepManager::Initialize();
/* Get the secure applet memory. */
const auto &secure_applet_memory = KMemoryLayout::GetSecureAppletMemoryRegion();
MESOSPHERE_INIT_ABORT_UNLESS(secure_applet_memory.GetSize() == SecureAppletMemorySize);
g_secure_applet_memory_address = secure_applet_memory.GetAddress();
/* Initialize KTrace (and potentially other init). */
KSystemControlBase::InitializePhase2();
}
u32 KSystemControl::GetCreateProcessMemoryPool() {
return KMemoryManager::Pool_Unsafe;
}
/* Privileged Access. */
void KSystemControl::ReadWriteRegisterPrivileged(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
MESOSPHERE_ABORT_UNLESS(util::IsAligned(address, sizeof(u32)));
MESOSPHERE_ABORT_UNLESS(IsRegisterAccessibleToPrivileged(address));
MESOSPHERE_ABORT_UNLESS(smc::ReadWriteRegister(out, address, mask, value));
}
Result KSystemControl::ReadWriteRegister(u32 *out, ams::svc::PhysicalAddress address, u32 mask, u32 value) {
R_UNLESS(AMS_LIKELY(util::IsAligned(address, sizeof(u32))), svc::ResultInvalidAddress());
R_UNLESS(AMS_LIKELY(IsRegisterAccessibleToUser(address)), svc::ResultInvalidAddress());
R_UNLESS(AMS_LIKELY(smc::ReadWriteRegister(out, address, mask, value)), svc::ResultInvalidAddress());
R_SUCCEED();
}
/* Randomness. */
void KSystemControl::GenerateRandom(u64 *dst, size_t count) {
MESOSPHERE_INIT_ABORT_UNLESS(count <= 7);
smc::GenerateRandomBytes(dst, count * sizeof(u64));
}
u64 KSystemControl::GenerateRandomRange(u64 min, u64 max) {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
if (AMS_LIKELY(s_initialized_random_generator)) {
return KSystemControlBase::GenerateUniformRange(min, max, []() ALWAYS_INLINE_LAMBDA -> u64 { return s_random_generator.GenerateRandomU64(); });
} else {
return KSystemControlBase::GenerateUniformRange(min, max, GenerateRandomU64FromSmc);
}
}
u64 KSystemControl::GenerateRandomU64() {
KScopedInterruptDisable intr_disable;
KScopedSpinLock lk(s_random_lock);
if (AMS_LIKELY(s_initialized_random_generator)) {
return s_random_generator.GenerateRandomU64();
} else {
return GenerateRandomU64FromSmc();
}
}
void KSystemControl::SleepSystem() {
MESOSPHERE_LOG("SleepSystem() was called\n");
KSleepManager::SleepSystem();
}
void KSystemControl::StopSystem(void *arg) {
if (arg != nullptr) {
/* Get the address of the legacy IRAM region. */
const KVirtualAddress iram_address = KMemoryLayout::GetDeviceVirtualAddress(KMemoryRegionType_LegacyLpsIram) + 64_KB;
constexpr size_t RebootPayloadSize = 0x24000;
/* NOTE: Atmosphere extension; if we received an exception context from Panic(), */
/* generate a fatal error report using it. */
const KExceptionContext *e_ctx = static_cast<const KExceptionContext *>(arg);
auto *f_ctx = GetPointer<::ams::impl::FatalErrorContext>(iram_address + 0x2E000);
/* Clear the fatal context. */
std::memset(f_ctx, 0xCC, sizeof(*f_ctx));
/* Set metadata. */
f_ctx->magic = ::ams::impl::FatalErrorContext::Magic;
f_ctx->error_desc = ::ams::impl::FatalErrorContext::KernelPanicDesc;
f_ctx->program_id = (static_cast<u64>(util::FourCC<'M', 'E', 'S', 'O'>::Code) << 0) | (static_cast<u64>(util::FourCC<'S', 'P', 'H', 'R'>::Code) << 32);
/* Set identifier. */
f_ctx->report_identifier = KHardwareTimer::GetTick();
/* Set module base. */
f_ctx->module_base = KMemoryLayout::GetKernelCodeRegionExtents().GetAddress();
/* Set afsr1. */
f_ctx->afsr0 = GetVersionIdentifier();
f_ctx->afsr1 = static_cast<u32>(kern::GetTargetFirmware());
/* Set efsr/far. */
f_ctx->far = cpu::GetFarEl1();
f_ctx->esr = cpu::GetEsrEl1();
/* Copy registers. */
for (size_t i = 0; i < util::size(e_ctx->x); ++i) {
f_ctx->gprs[i] = e_ctx->x[i];
}
f_ctx->sp = e_ctx->sp;
f_ctx->pc = cpu::GetElrEl1();
/* Dump stack trace. */
{
uintptr_t fp = e_ctx->x[29];
for (f_ctx->stack_trace_size = 0; f_ctx->stack_trace_size < ::ams::impl::FatalErrorContext::MaxStackTrace && fp != 0 && util::IsAligned(fp, 0x10) && cpu::GetPhysicalAddressWritable(nullptr, fp, true); ++(f_ctx->stack_trace_size)) {
struct {
uintptr_t fp;
uintptr_t lr;
} *stack_frame = reinterpret_cast<decltype(stack_frame)>(fp);
f_ctx->stack_trace[f_ctx->stack_trace_size] = stack_frame->lr;
fp = stack_frame->fp;
}
}
/* Dump stack. */
{
uintptr_t sp = e_ctx->sp;
for (f_ctx->stack_dump_size = 0; f_ctx->stack_dump_size < ::ams::impl::FatalErrorContext::MaxStackDumpSize && cpu::GetPhysicalAddressWritable(nullptr, sp + f_ctx->stack_dump_size, true); f_ctx->stack_dump_size += sizeof(u64)) {
*reinterpret_cast<u64 *>(f_ctx->stack_dump + f_ctx->stack_dump_size) = *reinterpret_cast<u64 *>(sp + f_ctx->stack_dump_size);
}
}
/* Try to get a payload address. */
const KMemoryRegion *cached_region = nullptr;
u64 reboot_payload_paddr = 0;
if (smc::TryGetConfig(std::addressof(reboot_payload_paddr), 1, smc::ConfigItem::ExospherePayloadAddress) && KMemoryLayout::IsLinearMappedPhysicalAddress(cached_region, reboot_payload_paddr, RebootPayloadSize)) {
/* If we have a payload, reboot to it. */
const KVirtualAddress reboot_payload = KMemoryLayout::GetLinearVirtualAddress(KPhysicalAddress(reboot_payload_paddr));
/* Clear IRAM. */
std::memset(GetVoidPointer(iram_address), 0xCC, RebootPayloadSize);
/* Copy the payload to iram. */
for (size_t i = 0; i < RebootPayloadSize / sizeof(u32); ++i) {
GetPointer<volatile u32>(iram_address)[i] = GetPointer<volatile u32>(reboot_payload)[i];
}
}
smc::SetConfig(smc::ConfigItem::ExosphereNeedsReboot, smc::UserRebootType_ToFatalError);
}
if (g_call_smc_on_panic) {
/* If we should, instruct the secure monitor to display a panic screen. */
smc::ShowError(0xF00);
}
AMS_INFINITE_LOOP();
}
/* User access. */
void KSystemControl::CallSecureMonitorFromUserImpl(ams::svc::lp64::SecureMonitorArguments *args) {
/* Invoke the secure monitor. */
return smc::CallSecureMonitorFromUser(args);
}
/* Secure Memory. */
size_t KSystemControl::CalculateRequiredSecureMemorySize(size_t size, u32 pool) {
if (pool == KMemoryManager::Pool_Applet) {
return 0;
} else {
return KSystemControlBase::CalculateRequiredSecureMemorySize(size, pool);
}
}
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
/* Applet secure memory is handled separately. */
if (pool == KMemoryManager::Pool_Applet) {
R_RETURN(AllocateSecureMemoryForApplet(out, size));
}
/* Ensure the size is aligned. */
const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment);
R_UNLESS(util::IsAligned(size, alignment), svc::ResultInvalidSize());
/* Allocate the memory. */
const size_t num_pages = size / PageSize;
const KPhysicalAddress paddr = Kernel::GetMemoryManager().AllocateAndOpenContinuous(num_pages, alignment / PageSize, KMemoryManager::EncodeOption(static_cast<KMemoryManager::Pool>(pool), KMemoryManager::Direction_FromFront));
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
/* Ensure we don't leak references to the memory on error. */
ON_RESULT_FAILURE { Kernel::GetMemoryManager().Close(paddr, num_pages); };
/* If the memory isn't already secure, set it as secure. */
if (pool != KMemoryManager::Pool_System) {
/* Set the secure region. */
R_UNLESS(SetSecureRegion(paddr, size), svc::ResultOutOfMemory());
}
/* We succeeded. */
*out = KPageTable::GetHeapVirtualAddress(paddr);
R_SUCCEED();
}
void KSystemControl::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {
/* Applet secure memory is handled separately. */
if (pool == KMemoryManager::Pool_Applet) {
return FreeSecureMemoryForApplet(address, size);
}
/* Ensure the size is aligned. */
const size_t alignment = (pool == KMemoryManager::Pool_System ? PageSize : SecureAlignment);
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(address), alignment));
MESOSPHERE_ABORT_UNLESS(util::IsAligned(size, alignment));
/* If the memory isn't secure system, reset the secure region. */
if (pool != KMemoryManager::Pool_System) {
/* Check that the size being freed is the current secure region size. */
MESOSPHERE_ABORT_UNLESS(g_secure_region_size == size);
/* Get the physical address. */
const KPhysicalAddress paddr = KPageTable::GetHeapPhysicalAddress(address);
MESOSPHERE_ABORT_UNLESS(paddr != Null<KPhysicalAddress>);
/* Check that the memory being freed is the current secure region. */
MESOSPHERE_ABORT_UNLESS(paddr == g_secure_region_phys_addr);
/* Free the secure region. */
MESOSPHERE_ABORT_UNLESS(SetSecureRegion(paddr, 0));
}
/* Close the secure region's pages. */
Kernel::GetMemoryManager().Close(KPageTable::GetHeapPhysicalAddress(address), size / PageSize);
}
}
| 31,644
|
C++
|
.cpp
| 589
| 41.529711
| 247
| 0.605444
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,991
|
kern_k_device_page_table.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/board/nintendo/nx/kern_k_device_page_table.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING) || defined(MESOSPHERE_BUILD_FOR_AUDITING)
#define MESOSPHERE_ENABLE_MEMORY_CONTROLLER_INTERRUPT
#endif
namespace ams::kern::board::nintendo::nx {
namespace {
/* Definitions. */
constexpr size_t PageDirectorySize = KPageTableManager::PageTableSize;
constexpr size_t PageTableSize = KPageTableManager::PageTableSize;
static_assert(PageDirectorySize == PageSize);
constexpr size_t AsidCount = 0x80;
constexpr size_t PhysicalAddressBits = 34;
constexpr size_t PhysicalAddressMask = (1ul << PhysicalAddressBits) - 1ul;
constexpr size_t DeviceVirtualAddressBits = 34;
constexpr size_t DeviceVirtualAddressMask = (1ul << DeviceVirtualAddressBits) - 1ul;
constexpr size_t DevicePageBits = 12;
constexpr size_t DevicePageSize = (1ul << DevicePageBits);
static_assert(DevicePageSize == PageSize);
constexpr size_t DeviceLargePageBits = 22;
constexpr size_t DeviceLargePageSize = (1ul << DeviceLargePageBits);
static_assert(DeviceLargePageSize % DevicePageSize == 0);
constexpr size_t DeviceRegionBits = 32;
constexpr size_t DeviceRegionSize = (1ul << DeviceRegionBits);
static_assert(DeviceRegionSize % DeviceLargePageSize == 0);
constexpr size_t DeviceAsidRegisterOffsets[] = {
[ams::svc::DeviceName_Afi] = MC_SMMU_AFI_ASID,
[ams::svc::DeviceName_Avpc] = MC_SMMU_AVPC_ASID,
[ams::svc::DeviceName_Dc] = MC_SMMU_DC_ASID,
[ams::svc::DeviceName_Dcb] = MC_SMMU_DCB_ASID,
[ams::svc::DeviceName_Hc] = MC_SMMU_HC_ASID,
[ams::svc::DeviceName_Hda] = MC_SMMU_HDA_ASID,
[ams::svc::DeviceName_Isp2] = MC_SMMU_ISP2_ASID,
[ams::svc::DeviceName_MsencNvenc] = MC_SMMU_MSENC_NVENC_ASID,
[ams::svc::DeviceName_Nv] = MC_SMMU_NV_ASID,
[ams::svc::DeviceName_Nv2] = MC_SMMU_NV2_ASID,
[ams::svc::DeviceName_Ppcs] = MC_SMMU_PPCS_ASID,
[ams::svc::DeviceName_Sata] = MC_SMMU_SATA_ASID,
[ams::svc::DeviceName_Vi] = MC_SMMU_VI_ASID,
[ams::svc::DeviceName_Vic] = MC_SMMU_VIC_ASID,
[ams::svc::DeviceName_XusbHost] = MC_SMMU_XUSB_HOST_ASID,
[ams::svc::DeviceName_XusbDev] = MC_SMMU_XUSB_DEV_ASID,
[ams::svc::DeviceName_Tsec] = MC_SMMU_TSEC_ASID,
[ams::svc::DeviceName_Ppcs1] = MC_SMMU_PPCS1_ASID,
[ams::svc::DeviceName_Dc1] = MC_SMMU_DC1_ASID,
[ams::svc::DeviceName_Sdmmc1a] = MC_SMMU_SDMMC1A_ASID,
[ams::svc::DeviceName_Sdmmc2a] = MC_SMMU_SDMMC2A_ASID,
[ams::svc::DeviceName_Sdmmc3a] = MC_SMMU_SDMMC3A_ASID,
[ams::svc::DeviceName_Sdmmc4a] = MC_SMMU_SDMMC4A_ASID,
[ams::svc::DeviceName_Isp2b] = MC_SMMU_ISP2B_ASID,
[ams::svc::DeviceName_Gpu] = MC_SMMU_GPU_ASID,
[ams::svc::DeviceName_Gpub] = MC_SMMU_GPUB_ASID,
[ams::svc::DeviceName_Ppcs2] = MC_SMMU_PPCS2_ASID,
[ams::svc::DeviceName_Nvdec] = MC_SMMU_NVDEC_ASID,
[ams::svc::DeviceName_Ape] = MC_SMMU_APE_ASID,
[ams::svc::DeviceName_Se] = MC_SMMU_SE_ASID,
[ams::svc::DeviceName_Nvjpg] = MC_SMMU_NVJPG_ASID,
[ams::svc::DeviceName_Hc1] = MC_SMMU_HC1_ASID,
[ams::svc::DeviceName_Se1] = MC_SMMU_SE1_ASID,
[ams::svc::DeviceName_Axiap] = MC_SMMU_AXIAP_ASID,
[ams::svc::DeviceName_Etr] = MC_SMMU_ETR_ASID,
[ams::svc::DeviceName_Tsecb] = MC_SMMU_TSECB_ASID,
[ams::svc::DeviceName_Tsec1] = MC_SMMU_TSEC1_ASID,
[ams::svc::DeviceName_Tsecb1] = MC_SMMU_TSECB1_ASID,
[ams::svc::DeviceName_Nvdec1] = MC_SMMU_NVDEC1_ASID,
};
static_assert(util::size(DeviceAsidRegisterOffsets) == ams::svc::DeviceName_Count);
constexpr bool DeviceAsidRegistersValid = [] {
for (size_t i = 0; i < ams::svc::DeviceName_Count; i++) {
if (DeviceAsidRegisterOffsets[i] == 0 || !util::IsAligned(DeviceAsidRegisterOffsets[i], sizeof(u32))) {
return false;
}
}
return true;
}();
static_assert(DeviceAsidRegistersValid);
constexpr ALWAYS_INLINE int GetDeviceAsidRegisterOffset(ams::svc::DeviceName dev) {
if (dev < ams::svc::DeviceName_Count) {
return DeviceAsidRegisterOffsets[dev];
} else {
return -1;
}
}
constexpr ams::svc::DeviceName HsDevices[] = {
ams::svc::DeviceName_Afi,
ams::svc::DeviceName_Dc,
ams::svc::DeviceName_Dcb,
ams::svc::DeviceName_Hda,
ams::svc::DeviceName_Isp2,
ams::svc::DeviceName_Sata,
ams::svc::DeviceName_Vi,
ams::svc::DeviceName_XusbHost,
ams::svc::DeviceName_XusbDev,
ams::svc::DeviceName_Tsec,
ams::svc::DeviceName_Dc1,
ams::svc::DeviceName_Sdmmc1a,
ams::svc::DeviceName_Sdmmc2a,
ams::svc::DeviceName_Sdmmc3a,
ams::svc::DeviceName_Sdmmc4a,
ams::svc::DeviceName_Isp2b,
ams::svc::DeviceName_Gpu,
ams::svc::DeviceName_Gpub,
ams::svc::DeviceName_Axiap,
ams::svc::DeviceName_Etr,
ams::svc::DeviceName_Tsecb,
ams::svc::DeviceName_Tsec1,
ams::svc::DeviceName_Tsecb1,
};
constexpr size_t NumHsDevices = util::size(HsDevices);
constexpr u64 HsDeviceMask = [] {
u64 mask = 0;
for (size_t i = 0; i < NumHsDevices; i++) {
mask |= 1ul << HsDevices[i];
}
return mask;
}();
constexpr ALWAYS_INLINE bool IsHsSupported(ams::svc::DeviceName dv) {
return (HsDeviceMask & (1ul << dv)) != 0;
}
constexpr ALWAYS_INLINE bool IsValidPhysicalAddress(KPhysicalAddress addr) {
return (static_cast<u64>(GetInteger(addr)) & ~PhysicalAddressMask) == 0;
}
constexpr struct { u64 start; u64 end; } SmmuSupportedRanges[] = {
[ams::svc::DeviceName_Afi] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Avpc] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Dc] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Dcb] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Hc] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Hda] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Isp2] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_MsencNvenc] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Nv] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Nv2] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Ppcs] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Sata] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Vi] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Vic] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_XusbHost] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_XusbDev] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Tsec] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Ppcs1] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Dc1] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Sdmmc1a] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Sdmmc2a] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Sdmmc3a] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Sdmmc4a] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Isp2b] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Gpu] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Gpub] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Ppcs2] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Nvdec] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Ape] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Se] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Nvjpg] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Hc1] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Se1] = { 0x00000000ul, 0x0FFFFFFFFul },
[ams::svc::DeviceName_Axiap] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Etr] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Tsecb] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Tsec1] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Tsecb1] = { 0x00000000ul, 0x3FFFFFFFFul },
[ams::svc::DeviceName_Nvdec1] = { 0x00000000ul, 0x0FFFFFFFFul },
};
static_assert(util::size(SmmuSupportedRanges) == ams::svc::DeviceName_Count);
constexpr bool IsAttachable(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) {
if (0 <= device_name && device_name < ams::svc::DeviceName_Count) {
const auto &range = SmmuSupportedRanges[device_name];
return range.start <= space_address && (space_address + space_size - 1) <= range.end;
}
return false;
}
/* Types. */
class EntryBase {
protected:
enum Bit : u32 {
Bit_Table = 28,
Bit_NonSecure = 29,
Bit_Writeable = 30,
Bit_Readable = 31,
};
private:
u32 m_value;
protected:
constexpr ALWAYS_INLINE u32 SelectBit(Bit n) const {
return (m_value & (1u << n));
}
template<Bit... Bits>
constexpr ALWAYS_INLINE u32 SelectBits() const {
constexpr u32 Mask = ((1u << Bits) | ...);
return m_value & Mask;
}
constexpr ALWAYS_INLINE bool GetBit(Bit n) const {
return this->SelectBit(n) != 0;
}
static constexpr ALWAYS_INLINE u32 EncodeBit(Bit n, bool en) {
return en ? (1u << n) : 0;
}
static constexpr ALWAYS_INLINE u32 EncodeValue(bool r, bool w, bool ns, KPhysicalAddress addr, bool t) {
return EncodeBit(Bit_Readable, r) | EncodeBit(Bit_Writeable, w) | EncodeBit(Bit_NonSecure, ns) | EncodeBit(Bit_Table, t) | static_cast<u32>(addr >> DevicePageBits);
}
ALWAYS_INLINE void SetValue(u32 v) {
/* Prevent re-ordering around entry modifications. */
__asm__ __volatile__("" ::: "memory");
m_value = v;
__asm__ __volatile__("" ::: "memory");
}
public:
static constexpr ALWAYS_INLINE u32 EncodePtbDataValue(KPhysicalAddress addr) {
return EncodeValue(true, true, true, addr, false);
}
public:
constexpr ALWAYS_INLINE bool IsNonSecure() const { return this->GetBit(Bit_NonSecure); }
constexpr ALWAYS_INLINE bool IsWriteable() const { return this->GetBit(Bit_Writeable); }
constexpr ALWAYS_INLINE bool IsReadable() const { return this->GetBit(Bit_Readable); }
constexpr ALWAYS_INLINE bool IsValid() const { return this->SelectBits<Bit_Readable, Bit_Writeable>(); }
constexpr ALWAYS_INLINE u32 GetAttributes() const { return this->SelectBits<Bit_Readable, Bit_Writeable, Bit_NonSecure>(); }
constexpr ALWAYS_INLINE KPhysicalAddress GetPhysicalAddress() const { return (static_cast<u64>(m_value) << DevicePageBits) & PhysicalAddressMask; }
ALWAYS_INLINE void InvalidateAttributes() { this->SetValue(m_value & ~(0xCu << 28)); }
ALWAYS_INLINE void Invalidate() { this->SetValue(0); }
};
class PageDirectoryEntry : public EntryBase {
public:
constexpr ALWAYS_INLINE bool IsTable() const { return this->GetBit(Bit_Table); }
ALWAYS_INLINE void SetTable(bool r, bool w, bool ns, KPhysicalAddress addr) {
MESOSPHERE_ASSERT(IsValidPhysicalAddress(addr));
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), DevicePageSize));
this->SetValue(EncodeValue(r, w, ns, addr, true));
}
ALWAYS_INLINE void SetLargePage(bool r, bool w, bool ns, KPhysicalAddress addr) {
MESOSPHERE_ASSERT(IsValidPhysicalAddress(addr));
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), DeviceLargePageSize));
this->SetValue(EncodeValue(r, w, ns, addr, false));
}
};
class PageTableEntry : public EntryBase {
public:
ALWAYS_INLINE void SetPage(bool r, bool w, bool ns, KPhysicalAddress addr) {
MESOSPHERE_ASSERT(IsValidPhysicalAddress(addr));
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(addr), DevicePageSize));
this->SetValue(EncodeValue(r, w, ns, addr, true));
}
};
class KDeviceAsidManager {
private:
using WordType = u32;
static constexpr u8 ReservedAsids[] = { 0, 1, 2, 3 };
static constexpr size_t NumReservedAsids = util::size(ReservedAsids);
static constexpr size_t BitsPerWord = BITSIZEOF(WordType);
static constexpr size_t NumWords = AsidCount / BitsPerWord;
static constexpr WordType FullWord = ~WordType(0u);
private:
WordType m_state[NumWords];
KLightLock m_lock;
private:
constexpr void ReserveImpl(u8 asid) {
m_state[asid / BitsPerWord] |= (1u << (asid % BitsPerWord));
}
constexpr void ReleaseImpl(u8 asid) {
m_state[asid / BitsPerWord] &= ~(1u << (asid % BitsPerWord));
}
static constexpr ALWAYS_INLINE WordType ClearLeadingZero(WordType value) {
return __builtin_clzll(value) - (BITSIZEOF(unsigned long long) - BITSIZEOF(WordType));
}
public:
constexpr KDeviceAsidManager() : m_state(), m_lock() {
for (size_t i = 0; i < NumReservedAsids; i++) {
this->ReserveImpl(ReservedAsids[i]);
}
}
Result Reserve(u8 *out, size_t num_desired) {
KScopedLightLock lk(m_lock);
MESOSPHERE_ASSERT(num_desired > 0);
size_t num_reserved = 0;
for (size_t i = 0; i < NumWords; i++) {
while (m_state[i] != FullWord) {
const WordType clear_bit = (m_state[i] + 1) ^ (m_state[i]);
m_state[i] |= clear_bit;
out[num_reserved++] = static_cast<u8>(BitsPerWord * i + BitsPerWord - 1 - ClearLeadingZero(clear_bit));
R_SUCCEED_IF(num_reserved == num_desired);
}
}
/* We failed, so free what we reserved. */
for (size_t i = 0; i < num_reserved; i++) {
this->ReleaseImpl(out[i]);
}
R_THROW(svc::ResultOutOfResource());
}
void Release(u8 asid) {
KScopedLightLock lk(m_lock);
this->ReleaseImpl(asid);
}
};
/* Globals. */
constinit KLightLock g_lock;
constinit u8 g_reserved_asid;
constinit KPhysicalAddress g_memory_controller_address{Null<KPhysicalAddress>};
constinit KPhysicalAddress g_reserved_table_phys_addr{Null<KPhysicalAddress>};
constinit KDeviceAsidManager g_asid_manager;
constinit u32 g_saved_page_tables[AsidCount];
constinit u32 g_saved_asid_registers[ams::svc::DeviceName_Count];
/* Memory controller access functionality. */
void WriteMcRegister(size_t offset, u32 value) {
KSystemControl::WriteRegisterPrivileged(GetInteger(g_memory_controller_address) + offset, value);
}
u32 ReadMcRegister(size_t offset) {
return KSystemControl::ReadRegisterPrivileged(GetInteger(g_memory_controller_address) + offset);
}
/* Memory controller interrupt functionality. */
constexpr const char * const MemoryControllerClientNames[138] = {
[ 0] = "csr_ptcr (ptc)",
[ 1] = "csr_display0a (dc)",
[ 2] = "csr_display0ab (dcb)",
[ 3] = "csr_display0b (dc)",
[ 4] = "csr_display0bb (dcb)",
[ 5] = "csr_display0c (dc)",
[ 6] = "csr_display0cb (dcb)",
[ 7] = "Unknown Client",
[ 8] = "Unknown Client",
[ 9] = "Unknown Client",
[ 10] = "Unknown Client",
[ 11] = "Unknown Client",
[ 12] = "Unknown Client",
[ 13] = "Unknown Client",
[ 14] = "csr_afir (afi)",
[ 15] = "csr_avpcarm7r (avpc)",
[ 16] = "csr_displayhc (dc)",
[ 17] = "csr_displayhcb (dcb)",
[ 18] = "Unknown Client",
[ 19] = "Unknown Client",
[ 20] = "Unknown Client",
[ 21] = "csr_hdar (hda)",
[ 22] = "csr_host1xdmar (hc)",
[ 23] = "csr_host1xr (hc)",
[ 24] = "Unknown Client",
[ 25] = "Unknown Client",
[ 26] = "Unknown Client",
[ 27] = "Unknown Client",
[ 28] = "csr_nvencsrd (nvenc)",
[ 29] = "csr_ppcsahbdmar (ppcs)",
[ 30] = "csr_ppcsahbslvr (ppcs)",
[ 31] = "csr_satar (sata)",
[ 32] = "Unknown Client",
[ 33] = "Unknown Client",
[ 34] = "Unknown Client",
[ 35] = "Unknown Client",
[ 36] = "Unknown Client",
[ 37] = "Unknown Client",
[ 38] = "Unknown Client",
[ 39] = "csr_mpcorer (cpu)",
[ 40] = "Unknown Client",
[ 41] = "Unknown Client",
[ 42] = "Unknown Client",
[ 43] = "csw_nvencswr (nvenc)",
[ 44] = "Unknown Client",
[ 45] = "Unknown Client",
[ 46] = "Unknown Client",
[ 47] = "Unknown Client",
[ 48] = "Unknown Client",
[ 49] = "csw_afiw (afi)",
[ 50] = "csw_avpcarm7w (avpc)",
[ 51] = "Unknown Client",
[ 52] = "Unknown Client",
[ 53] = "csw_hdaw (hda)",
[ 54] = "csw_host1xw (hc)",
[ 55] = "Unknown Client",
[ 56] = "Unknown Client",
[ 57] = "csw_mpcorew (cpu)",
[ 58] = "Unknown Client",
[ 59] = "csw_ppcsahbdmaw (ppcs)",
[ 60] = "csw_ppcsahbslvw (ppcs)",
[ 61] = "csw_sataw (sata)",
[ 62] = "Unknown Client",
[ 63] = "Unknown Client",
[ 64] = "Unknown Client",
[ 65] = "Unknown Client",
[ 66] = "Unknown Client",
[ 67] = "Unknown Client",
[ 68] = "csr_ispra (isp2)",
[ 69] = "Unknown Client",
[ 70] = "csw_ispwa (isp2)",
[ 71] = "csw_ispwb (isp2)",
[ 72] = "Unknown Client",
[ 73] = "Unknown Client",
[ 74] = "csr_xusb_hostr (xusb_host)",
[ 75] = "csw_xusb_hostw (xusb_host)",
[ 76] = "csr_xusb_devr (xusb_dev)",
[ 77] = "csw_xusb_devw (xusb_dev)",
[ 78] = "csr_isprab (isp2b)",
[ 79] = "Unknown Client",
[ 80] = "csw_ispwab (isp2b)",
[ 81] = "csw_ispwbb (isp2b)",
[ 82] = "Unknown Client",
[ 83] = "Unknown Client",
[ 84] = "csr_tsecsrd (tsec)",
[ 85] = "csw_tsecswr (tsec)",
[ 86] = "csr_a9avpscr (a9avp)",
[ 87] = "csw_a9avpscw (a9avp)",
[ 88] = "csr_gpusrd (gpu)",
[ 89] = "csw_gpuswr (gpu)",
[ 90] = "csr_displayt (dc)",
[ 91] = "Unknown Client",
[ 92] = "Unknown Client",
[ 93] = "Unknown Client",
[ 94] = "Unknown Client",
[ 95] = "Unknown Client",
[ 96] = "csr_sdmmcra (sdmmc1a)",
[ 97] = "csr_sdmmcraa (sdmmc2a)",
[ 98] = "csr_sdmmcr (sdmmc3a)",
[ 99] = "csr_sdmmcrab (sdmmc4a)",
[100] = "csw_sdmmcwa (sdmmc1a)",
[101] = "csw_sdmmcwaa (sdmmc2a)",
[102] = "csw_sdmmcw (sdmmc3a)",
[103] = "csw_sdmmcwab (sdmmc4a)",
[104] = "Unknown Client",
[105] = "Unknown Client",
[106] = "Unknown Client",
[107] = "Unknown Client",
[108] = "csr_vicsrd (vic)",
[109] = "csw_vicswr (vic)",
[110] = "Unknown Client",
[111] = "Unknown Client",
[112] = "Unknown Client",
[113] = "Unknown Client",
[114] = "csw_viw (vi)",
[115] = "csr_displayd (dc)",
[116] = "Unknown Client",
[117] = "Unknown Client",
[118] = "Unknown Client",
[119] = "Unknown Client",
[120] = "csr_nvdecsrd (nvdec)",
[121] = "csw_nvdecswr (nvdec)",
[122] = "csr_aper (ape)",
[123] = "csw_apew (ape)",
[124] = "Unknown Client",
[125] = "Unknown Client",
[126] = "csr_nvjpgsrd (nvjpg)",
[127] = "csw_nvjpgswr (nvjpg)",
[128] = "csr_sesrd (se)",
[129] = "csw_seswr (se)",
[130] = "csr_axiapr (axiap)",
[131] = "csw_axiapw (axiap)",
[132] = "csr_etrr (etr)",
[133] = "csw_etrw (etr)",
[134] = "csr_tsecsrdb (tsecb)",
[135] = "csw_tsecswrb (tsecb)",
[136] = "csr_gpusrd2 (gpu)",
[137] = "csw_gpuswr2 (gpu)",
};
constexpr const char * GetMemoryControllerClientName(size_t i) {
if (i < util::size(MemoryControllerClientNames)) {
return MemoryControllerClientNames[i];
}
return "Unknown Client";
}
constexpr const char * const MemoryControllerErrorTypes[8] = {
"RSVD",
"Unknown",
"DECERR_EMEM",
"SECURITY_TRUSTZONE",
"SECURITY_CARVEOUT",
"Unknown",
"INVALID_SMMU_PAGE",
"Unknown",
};
class KMemoryControllerInterruptTask : public KInterruptTask {
public:
constexpr KMemoryControllerInterruptTask() : KInterruptTask() { /* ... */ }
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
MESOSPHERE_UNUSED(interrupt_id);
return this;
}
virtual void DoTask() override {
#if defined(MESOSPHERE_ENABLE_MEMORY_CONTROLLER_INTERRUPT)
{
/* Clear the interrupt when we're done. */
ON_SCOPE_EXIT { Kernel::GetInterruptManager().ClearInterrupt(KInterruptName_MemoryController, GetCurrentCoreId()); };
/* Get and clear the interrupt status. */
u32 int_status, err_status, err_adr;
{
int_status = ReadMcRegister(MC_INTSTATUS);
err_status = ReadMcRegister(MC_ERR_STATUS);
err_adr = ReadMcRegister(MC_ERR_ADR);
WriteMcRegister(MC_INTSTATUS, int_status);
}
/* Print the interrupt. */
{
constexpr auto GetBits = [](u32 value, size_t ofs, size_t count) ALWAYS_INLINE_LAMBDA {
return (value >> ofs) & ((1u << count) - 1);
};
constexpr auto GetBit = [GetBits](u32 value, size_t ofs) ALWAYS_INLINE_LAMBDA {
return (value >> ofs) & 1u;
};
MESOSPHERE_RELEASE_LOG("sMMU error interrupt\n");
MESOSPHERE_RELEASE_LOG(" MC_INTSTATUS=%08x\n", int_status);
MESOSPHERE_RELEASE_LOG(" DECERR_GENERALIZED_CARVEOUT=%d\n", GetBit(int_status, 17));
MESOSPHERE_RELEASE_LOG(" DECERR_MTS=%d\n", GetBit(int_status, 16));
MESOSPHERE_RELEASE_LOG(" SECERR_SEC=%d\n", GetBit(int_status, 13));
MESOSPHERE_RELEASE_LOG(" DECERR_VPR=%d\n", GetBit(int_status, 12));
MESOSPHERE_RELEASE_LOG(" INVALID_APB_ASID_UPDATE=%d\n", GetBit(int_status, 11));
MESOSPHERE_RELEASE_LOG(" INVALID_SMMU_PAGE=%d\n", GetBit(int_status, 10));
MESOSPHERE_RELEASE_LOG(" ARBITRATION_EMEM=%d\n", GetBit(int_status, 9));
MESOSPHERE_RELEASE_LOG(" SECURITY_VIOLATION=%d\n", GetBit(int_status, 8));
MESOSPHERE_RELEASE_LOG(" DECERR_EMEM=%d\n", GetBit(int_status, 6));
MESOSPHERE_RELEASE_LOG(" MC_ERRSTATUS=%08x\n", err_status);
MESOSPHERE_RELEASE_LOG(" ERR_TYPE=%d (%s)\n", GetBits(err_status, 28, 3), MemoryControllerErrorTypes[GetBits(err_status, 28, 3)]);
MESOSPHERE_RELEASE_LOG(" ERR_INVALID_SMMU_PAGE_READABLE=%d\n", GetBit (err_status, 27));
MESOSPHERE_RELEASE_LOG(" ERR_INVALID_SMMU_PAGE_WRITABLE=%d\n", GetBit (err_status, 26));
MESOSPHERE_RELEASE_LOG(" ERR_INVALID_SMMU_NONSECURE=%d\n", GetBit (err_status, 25));
MESOSPHERE_RELEASE_LOG(" ERR_ADR_HI=%x\n", GetBits(err_status, 20, 2));
MESOSPHERE_RELEASE_LOG(" ERR_SWAP=%d\n", GetBit (err_status, 18));
MESOSPHERE_RELEASE_LOG(" ERR_SECURITY=%d %s\n", GetBit (err_status, 17), GetBit(err_status, 17) ? "SECURE" : "NONSECURE");
MESOSPHERE_RELEASE_LOG(" ERR_RW=%d %s\n", GetBit (err_status, 16), GetBit(err_status, 16) ? "WRITE" : "READ");
MESOSPHERE_RELEASE_LOG(" ERR_ADR1=%x\n", GetBits(err_status, 12, 3));
MESOSPHERE_RELEASE_LOG(" ERR_ID=%d %s\n", GetBits(err_status, 0, 8), GetMemoryControllerClientName(GetBits(err_status, 0, 8)));
MESOSPHERE_RELEASE_LOG(" MC_ERRADR=%08x\n", err_adr);
MESOSPHERE_RELEASE_LOG(" ERR_ADR=%lx\n", (static_cast<u64>(GetBits(err_status, 20, 2)) << 32) | static_cast<u64>(err_adr));
MESOSPHERE_RELEASE_LOG("\n");
}
}
#endif
}
};
/* Interrupt task global. */
constinit KMemoryControllerInterruptTask g_mc_interrupt_task;
/* Memory controller utilities. */
ALWAYS_INLINE void SmmuSynchronizationBarrier() {
ReadMcRegister(MC_SMMU_CONFIG);
}
ALWAYS_INLINE void InvalidatePtc() {
WriteMcRegister(MC_SMMU_PTC_FLUSH_0, 0);
}
ALWAYS_INLINE void InvalidatePtc(KPhysicalAddress address) {
WriteMcRegister(MC_SMMU_PTC_FLUSH_1, (static_cast<u64>(GetInteger(address)) >> 32));
WriteMcRegister(MC_SMMU_PTC_FLUSH_0, (GetInteger(address) & 0xFFFFFFF0u) | 1u);
}
enum TlbFlushVaMatch : u32 {
TlbFlushVaMatch_All = 0,
TlbFlushVaMatch_Section = 2,
TlbFlushVaMatch_Group = 3,
};
static constexpr ALWAYS_INLINE u32 EncodeTlbFlushValue(bool match_asid, u8 asid, KDeviceVirtualAddress address, TlbFlushVaMatch match) {
return ((match_asid ? 1u : 0u) << 31) | ((asid & 0x7F) << 24) | (((address & 0xFFC00000u) >> DevicePageBits)) | (match);
}
ALWAYS_INLINE void InvalidateTlb() {
return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(false, 0, 0, TlbFlushVaMatch_All));
}
ALWAYS_INLINE void InvalidateTlb(u8 asid) {
return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(true, asid, 0, TlbFlushVaMatch_All));
}
ALWAYS_INLINE void InvalidateTlbSection(u8 asid, KDeviceVirtualAddress address) {
return WriteMcRegister(MC_SMMU_TLB_FLUSH, EncodeTlbFlushValue(true, asid, address, TlbFlushVaMatch_Section));
}
void SetTable(u8 asid, KPhysicalAddress address) {
/* Write the table address. */
{
KScopedLightLock lk(g_lock);
WriteMcRegister(MC_SMMU_PTB_ASID, asid);
WriteMcRegister(MC_SMMU_PTB_DATA, EntryBase::EncodePtbDataValue(address));
SmmuSynchronizationBarrier();
}
/* Ensure consistency. */
InvalidatePtc();
InvalidateTlb(asid);
SmmuSynchronizationBarrier();
}
}
void KDevicePageTable::Initialize() {
/* Set the memory controller register address. */
g_memory_controller_address = KMemoryLayout::GetDevicePhysicalAddress(KMemoryRegionType_MemoryController);
/* Allocate a page to use as a reserved/no device table. */
auto &ptm = Kernel::GetSystemSystemResource().GetPageTableManager();
const KVirtualAddress table_virt_addr = ptm.Allocate();
MESOSPHERE_ABORT_UNLESS(table_virt_addr != Null<KVirtualAddress>);
const KPhysicalAddress table_phys_addr = GetPageTablePhysicalAddress(table_virt_addr);
MESOSPHERE_ASSERT(IsValidPhysicalAddress(table_phys_addr));
ptm.Open(table_virt_addr, 1);
/* Save the page. Note that it is a pre-condition that the page is cleared, when allocated from the system page table manager. */
/* NOTE: Nintendo does not check the result of StoreDataCache. */
cpu::StoreDataCache(GetVoidPointer(table_virt_addr), PageDirectorySize);
g_reserved_table_phys_addr = table_phys_addr;
/* Reserve an asid to correspond to no device. */
MESOSPHERE_R_ABORT_UNLESS(g_asid_manager.Reserve(std::addressof(g_reserved_asid), 1));
/* Set all asids to the reserved table. */
static_assert(AsidCount <= std::numeric_limits<u8>::max());
for (size_t i = 0; i < AsidCount; i++) {
SetTable(static_cast<u8>(i), g_reserved_table_phys_addr);
}
/* Set all devices to the reserved asid. */
for (size_t i = 0; i < ams::svc::DeviceName_Count; i++) {
u32 value = 0x80000000u;
if (IsHsSupported(static_cast<ams::svc::DeviceName>(i))) {
for (size_t t = 0; t < TableCount; t++) {
value |= (g_reserved_asid << (BITSIZEOF(u8) * t));
}
} else {
value |= g_reserved_asid;
}
WriteMcRegister(GetDeviceAsidRegisterOffset(static_cast<ams::svc::DeviceName>(i)), value);
SmmuSynchronizationBarrier();
}
/* Ensure consistency. */
InvalidatePtc();
InvalidateTlb();
SmmuSynchronizationBarrier();
/* Clear int status. */
WriteMcRegister(MC_INTSTATUS, ReadMcRegister(MC_INTSTATUS));
/* If we're setting an interrupt handler, unmask all interrupts. */
#if defined(MESOSPHERE_ENABLE_MEMORY_CONTROLLER_INTERRUPT)
{
WriteMcRegister(MC_INTMASK, 0x33D40);
}
#endif
/* Enable the SMMU */
WriteMcRegister(MC_SMMU_CONFIG, 1);
SmmuSynchronizationBarrier();
/* Install interrupt handler. */
#if defined(MESOSPHERE_ENABLE_MEMORY_CONTROLLER_INTERRUPT)
{
Kernel::GetInterruptManager().BindHandler(std::addressof(g_mc_interrupt_task), KInterruptName_MemoryController, GetCurrentCoreId(), KInterruptController::PriorityLevel_High, true, true);
}
#endif
}
void KDevicePageTable::Lock() {
g_lock.Lock();
}
void KDevicePageTable::Unlock() {
g_lock.Unlock();
}
void KDevicePageTable::Sleep() {
/* Save all page tables. */
for (size_t i = 0; i < AsidCount; ++i) {
WriteMcRegister(MC_SMMU_PTB_ASID, i);
SmmuSynchronizationBarrier();
g_saved_page_tables[i] = ReadMcRegister(MC_SMMU_PTB_DATA);
}
/* Save all asid registers. */
for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) {
g_saved_asid_registers[i] = ReadMcRegister(GetDeviceAsidRegisterOffset(static_cast<ams::svc::DeviceName>(i)));
}
}
void KDevicePageTable::Wakeup() {
/* Synchronize. */
InvalidatePtc();
InvalidateTlb();
SmmuSynchronizationBarrier();
/* Disable the SMMU */
WriteMcRegister(MC_SMMU_CONFIG, 0);
/* Restore the page tables. */
for (size_t i = 0; i < AsidCount; ++i) {
WriteMcRegister(MC_SMMU_PTB_ASID, i);
SmmuSynchronizationBarrier();
WriteMcRegister(MC_SMMU_PTB_DATA, g_saved_page_tables[i]);
}
SmmuSynchronizationBarrier();
/* Restore the asid registers. */
for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) {
WriteMcRegister(GetDeviceAsidRegisterOffset(static_cast<ams::svc::DeviceName>(i)), g_saved_asid_registers[i]);
SmmuSynchronizationBarrier();
}
/* Synchronize. */
InvalidatePtc();
InvalidateTlb();
SmmuSynchronizationBarrier();
/* Enable the SMMU */
WriteMcRegister(MC_SMMU_CONFIG, 1);
SmmuSynchronizationBarrier();
}
/* Member functions. */
Result KDevicePageTable::Initialize(u64 space_address, u64 space_size) {
/* Ensure space is valid. */
R_UNLESS(((space_address + space_size - 1) & ~DeviceVirtualAddressMask) == 0, svc::ResultInvalidMemoryRegion());
/* Determine extents. */
const size_t start_index = space_address / DeviceRegionSize;
const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize;
/* Get the page table manager. */
auto &ptm = Kernel::GetSystemSystemResource().GetPageTableManager();
/* Clear the tables. */
static_assert(TableCount == (1ul << DeviceVirtualAddressBits) / DeviceRegionSize);
for (size_t i = 0; i < TableCount; ++i) {
m_tables[i] = Null<KVirtualAddress>;
}
/* Ensure that we clean up the tables on failure. */
ON_RESULT_FAILURE {
for (size_t i = start_index; i <= end_index; ++i) {
if (m_tables[i] != Null<KVirtualAddress> && ptm.Close(m_tables[i], 1)) {
ptm.Free(m_tables[i]);
}
}
};
/* Allocate a table for all required indices. */
for (size_t i = start_index; i <= end_index; ++i) {
const KVirtualAddress table_vaddr = ptm.Allocate();
R_UNLESS(table_vaddr != Null<KVirtualAddress>, svc::ResultOutOfMemory());
MESOSPHERE_ASSERT(IsValidPhysicalAddress(GetPageTablePhysicalAddress(table_vaddr)));
ptm.Open(table_vaddr, 1);
cpu::StoreDataCache(GetVoidPointer(table_vaddr), PageDirectorySize);
m_tables[i] = table_vaddr;
}
/* Clear asids. */
for (size_t i = 0; i < TableCount; ++i) {
m_table_asids[i] = g_reserved_asid;
}
/* Reserve asids for the tables. */
R_TRY(g_asid_manager.Reserve(std::addressof(m_table_asids[start_index]), end_index - start_index + 1));
/* Associate tables with asids. */
for (size_t i = start_index; i <= end_index; ++i) {
SetTable(m_table_asids[i], GetPageTablePhysicalAddress(m_tables[i]));
}
/* Set member variables. */
m_attached_device = 0;
m_attached_value = (1u << 31) | m_table_asids[0];
m_detached_value = (1u << 31) | g_reserved_asid;
m_hs_attached_value = (1u << 31);
m_hs_detached_value = (1u << 31);
for (size_t i = 0; i < TableCount; ++i) {
m_hs_attached_value |= (m_table_asids[i] << (i * BITSIZEOF(u8)));
m_hs_detached_value |= (g_reserved_asid << (i * BITSIZEOF(u8)));
}
/* We succeeded. */
R_SUCCEED();
}
void KDevicePageTable::Finalize() {
/* Get the page table manager. */
auto &ptm = Kernel::GetSystemSystemResource().GetPageTableManager();
/* Detach from all devices. */
{
KScopedLightLock lk(g_lock);
for (size_t i = 0; i < ams::svc::DeviceName_Count; ++i) {
const auto device_name = static_cast<ams::svc::DeviceName>(i);
if ((m_attached_device & (1ul << device_name)) != 0) {
WriteMcRegister(GetDeviceAsidRegisterOffset(device_name), IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value);
SmmuSynchronizationBarrier();
}
}
}
/* Forcibly unmap all pages. */
this->UnmapImpl(0, (1ul << DeviceVirtualAddressBits), false);
/* Release all asids. */
for (size_t i = 0; i < TableCount; ++i) {
if (m_table_asids[i] != g_reserved_asid) {
/* Set the table to the reserved table. */
SetTable(m_table_asids[i], g_reserved_table_phys_addr);
/* Close the table. */
const KVirtualAddress table_vaddr = m_tables[i];
MESOSPHERE_ASSERT(ptm.GetRefCount(table_vaddr) == 1);
MESOSPHERE_ABORT_UNLESS(ptm.Close(table_vaddr, 1));
/* Free the table. */
ptm.Free(table_vaddr);
/* Release the asid. */
g_asid_manager.Release(m_table_asids[i]);
}
}
}
Result KDevicePageTable::Attach(ams::svc::DeviceName device_name, u64 space_address, u64 space_size) {
/* Validate the device name. */
R_UNLESS(0 <= device_name, svc::ResultNotFound());
R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound());
/* Check that the device isn't already attached. */
R_UNLESS((m_attached_device & (1ul << device_name)) == 0, svc::ResultBusy());
/* Validate that the space is allowed for the device. */
const size_t end_index = (space_address + space_size - 1) / DeviceRegionSize;
R_UNLESS(end_index == 0 || IsHsSupported(device_name), svc::ResultInvalidCombination());
/* Validate that the device can be attached. */
R_UNLESS(IsAttachable(device_name, space_address, space_size), svc::ResultInvalidCombination());
/* Get the device asid register offset. */
const int reg_offset = GetDeviceAsidRegisterOffset(device_name);
R_UNLESS(reg_offset >= 0, svc::ResultNotFound());
/* Determine the old/new values. */
const u32 old_val = IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value;
const u32 new_val = IsHsSupported(device_name) ? m_hs_attached_value : m_attached_value;
/* Attach the device. */
{
KScopedLightLock lk(g_lock);
/* Validate that the device is unclaimed. */
R_UNLESS((ReadMcRegister(reg_offset) | (1u << 31)) == (old_val | (1u << 31)), svc::ResultBusy());
/* Claim the device. */
WriteMcRegister(reg_offset, new_val);
SmmuSynchronizationBarrier();
/* Ensure that we claimed it successfully. */
if (ReadMcRegister(reg_offset) != new_val) {
WriteMcRegister(reg_offset, old_val);
SmmuSynchronizationBarrier();
R_THROW(svc::ResultNotFound());
}
}
/* Mark the device as attached. */
m_attached_device |= (1ul << device_name);
R_SUCCEED();
}
Result KDevicePageTable::Detach(ams::svc::DeviceName device_name) {
/* Validate the device name. */
R_UNLESS(0 <= device_name, svc::ResultNotFound());
R_UNLESS(device_name < ams::svc::DeviceName_Count, svc::ResultNotFound());
/* Check that the device is already attached. */
R_UNLESS((m_attached_device & (1ul << device_name)) != 0, svc::ResultInvalidState());
/* Get the device asid register offset. */
const int reg_offset = GetDeviceAsidRegisterOffset(device_name);
R_UNLESS(reg_offset >= 0, svc::ResultNotFound());
/* Determine the old/new values. */
const u32 old_val = IsHsSupported(device_name) ? m_hs_attached_value : m_attached_value;
const u32 new_val = IsHsSupported(device_name) ? m_hs_detached_value : m_detached_value;
/* When not building for debug, the old value might be unused. */
AMS_UNUSED(old_val);
/* Detach the device. */
{
KScopedLightLock lk(g_lock);
/* Check that the device is attached. */
MESOSPHERE_ASSERT(ReadMcRegister(reg_offset) == old_val);
/* Release the device. */
WriteMcRegister(reg_offset, new_val);
SmmuSynchronizationBarrier();
/* Check that the device was released. */
MESOSPHERE_ASSERT((ReadMcRegister(reg_offset) | (1u << 31)) == (new_val | 1u << 31));
}
/* Mark the device as detached. */
m_attached_device &= ~(1ul << device_name);
R_SUCCEED();
}
bool KDevicePageTable::IsFree(KDeviceVirtualAddress address, u64 size) const {
MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* Walk the directory, looking for entries. */
u64 remaining = size;
while (remaining > 0) {
const size_t l0_index = (address / DeviceRegionSize);
const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize;
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
if (l1 == nullptr || !l1[l1_index].IsValid()) {
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
address += DevicePageSize * map_count;
remaining -= DevicePageSize * map_count;
} else if (l1[l1_index].IsTable()) {
const PageTableEntry *l2 = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress()));
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
for (size_t i = 0; i < map_count; ++i) {
if (l2[l2_index + i].IsValid()) {
return false;
}
}
address += DevicePageSize * map_count;
remaining -= DevicePageSize * map_count;
} else {
/* If we have an entry, we're not free. */
return false;
}
}
return true;
}
Result KDevicePageTable::MapDevicePage(KPhysicalAddress phys_addr, u64 size, KDeviceVirtualAddress address, ams::svc::MemoryPermission device_perm) {
/* Ensure that the physical address is valid. */
R_UNLESS(IsValidPhysicalAddress(static_cast<u64>(GetInteger(phys_addr)) + size - 1), svc::ResultInvalidCurrentMemory());
MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* Get the memory manager and page table manager. */
KMemoryManager &mm = Kernel::GetMemoryManager();
KPageTableManager &ptm = Kernel::GetSystemSystemResource().GetPageTableManager();
/* Cache permissions. */
const bool read = (device_perm & ams::svc::MemoryPermission_Read) != 0;
const bool write = (device_perm & ams::svc::MemoryPermission_Write) != 0;
/* Walk the directory. */
u64 remaining = size;
while (remaining > 0) {
const size_t l0_index = (address / DeviceRegionSize);
const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize;
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
/* Get and validate l1. */
PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
MESOSPHERE_ASSERT(l1 != nullptr);
/* Setup an l1 table/entry, if needed. */
if (!l1[l1_index].IsTable()) {
/* Check that an entry doesn't already exist. */
MESOSPHERE_ASSERT(!l1[l1_index].IsValid());
/* If we can make an l1 entry, do so. */
if (l2_index == 0 && util::IsAligned(GetInteger(phys_addr), DeviceLargePageSize) && remaining >= DeviceLargePageSize) {
/* Set the large page. */
l1[l1_index].SetLargePage(read, write, true, phys_addr);
cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry));
/* Synchronize. */
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
InvalidateTlbSection(m_table_asids[l0_index], address);
SmmuSynchronizationBarrier();
/* Open references to the pages. */
mm.Open(phys_addr, DeviceLargePageSize / PageSize);
/* Advance. */
phys_addr += DeviceLargePageSize;
address += DeviceLargePageSize;
remaining -= DeviceLargePageSize;
continue;
} else {
/* Make an l1 table. */
const KVirtualAddress table_vaddr = ptm.Allocate();
R_UNLESS(table_vaddr != Null<KVirtualAddress>, svc::ResultOutOfMemory());
MESOSPHERE_ASSERT(IsValidPhysicalAddress(GetPageTablePhysicalAddress(table_vaddr)));
cpu::StoreDataCache(GetVoidPointer(table_vaddr), PageTableSize);
/* Set the l1 table. */
l1[l1_index].SetTable(true, true, true, GetPageTablePhysicalAddress(table_vaddr));
cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry));
/* Synchronize. */
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
InvalidateTlbSection(m_table_asids[l0_index], address);
SmmuSynchronizationBarrier();
}
}
/* If we get to this point, l1 must be a table. */
MESOSPHERE_ASSERT(l1[l1_index].IsTable());
/* Map l2 entries. */
{
PageTableEntry *l2 = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress()));
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
/* Set the entries. */
for (size_t i = 0; i < map_count; ++i) {
MESOSPHERE_ASSERT(!l2[l2_index + i].IsValid());
l2[l2_index + i].SetPage(read, write, true, phys_addr + DevicePageSize * i);
/* Add a reference to the l2 page (from the l2 entry page). */
ptm.Open(KVirtualAddress(l2), 1);
}
cpu::StoreDataCache(std::addressof(l2[l2_index]), map_count * sizeof(PageTableEntry));
/* Invalidate the page table cache. */
for (size_t i = util::AlignDown(l2_index, 4); i <= util::AlignDown(l2_index + map_count - 1, 4); i += 4) {
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l2[i]))));
}
/* Synchronize. */
InvalidateTlbSection(m_table_asids[l0_index], address);
SmmuSynchronizationBarrier();
/* Open references to the pages. */
mm.Open(phys_addr, (map_count * DevicePageSize) / PageSize);
/* Advance. */
phys_addr += map_count * DevicePageSize;
address += map_count * DevicePageSize;
remaining -= map_count * DevicePageSize;
}
}
R_SUCCEED();
}
Result KDevicePageTable::MapImpl(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
/* Ensure that the region we're mapping to is free. */
R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory());
/* Ensure that if we fail, we unmap anything we mapped. */
ON_RESULT_FAILURE { this->UnmapImpl(device_address, size, false); };
/* Iterate, mapping device pages. */
KDeviceVirtualAddress cur_addr = device_address;
size_t mapped_size = 0;
while (mapped_size < size) {
/* Map the next contiguous range. */
size_t cur_size;
{
/* Get the current contiguous range. */
KPageTableBase::MemoryRange contig_range;
R_TRY(page_table->OpenMemoryRangeForMapDeviceAddressSpace(std::addressof(contig_range), process_address + mapped_size, size - mapped_size, ConvertToKMemoryPermission(device_perm), is_aligned));
/* Ensure we close the range when we're done. */
ON_SCOPE_EXIT { contig_range.Close(); };
/* Get the current size. */
cur_size = contig_range.GetSize();
/* Map the device page. */
R_TRY(this->MapDevicePage(contig_range.GetAddress(), cur_size, cur_addr, device_perm));
}
/* Advance. */
cur_addr += cur_size;
mapped_size += cur_size;
}
R_SUCCEED();
}
void KDevicePageTable::UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force) {
MESOSPHERE_UNUSED(force);
MESOSPHERE_ASSERT((address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* Get the memory manager and page table manager. */
KMemoryManager &mm = Kernel::GetMemoryManager();
KPageTableManager &ptm = Kernel::GetSystemSystemResource().GetPageTableManager();
/* Make a page group for the pages we're closing. */
KPageGroup pg(Kernel::GetSystemSystemResource().GetBlockInfoManagerPointer());
/* Walk the directory. */
u64 remaining = size;
while (remaining > 0) {
const size_t l0_index = (address / DeviceRegionSize);
const size_t l1_index = (address % DeviceRegionSize) / DeviceLargePageSize;
const size_t l2_index = (address % DeviceLargePageSize) / DevicePageSize;
/* Get and validate l1. */
PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
/* Check if there's nothing mapped at l1. */
if (l1 == nullptr || !l1[l1_index].IsValid()) {
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
/* Advance. */
address += map_count * DevicePageSize;
remaining -= map_count * DevicePageSize;
} else if (l1[l1_index].IsTable()) {
/* Dealing with an l1 table. */
PageTableEntry *l2 = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress()));
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining / DevicePageSize);
size_t num_closed = 0;
/* Invalidate the attributes of all entries. */
for (size_t i = 0; i < map_count; ++i) {
if (l2[l2_index + i].IsValid()) {
l2[l2_index + i].InvalidateAttributes();
++num_closed;
}
}
cpu::StoreDataCache(std::addressof(l2[l2_index]), map_count * sizeof(PageTableEntry));
/* Invalidate the page table cache. */
for (size_t i = util::AlignDown(l2_index, 4); i <= util::AlignDown(l2_index + map_count - 1, 4); i += 4) {
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l2[i]))));
}
SmmuSynchronizationBarrier();
/* Close the memory manager's references to the pages. */
{
KPhysicalAddress contig_phys_addr = Null<KPhysicalAddress>;
size_t contig_count = 0;
for (size_t i = 0; i < map_count; ++i) {
/* Get the physical address. */
const KPhysicalAddress phys_addr = l2[l2_index + i].GetPhysicalAddress();
MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr));
/* Fully invalidate the entry. */
l2[l2_index + i].Invalidate();
if (contig_count == 0) {
/* Ensure that our address/count is valid. */
contig_phys_addr = phys_addr;
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
} else if (phys_addr == Null<KPhysicalAddress> || phys_addr != (contig_phys_addr + (contig_count * DevicePageSize))) {
/* If we're no longer contiguous, close the range we've been building. */
mm.Close(contig_phys_addr, (contig_count * DevicePageSize) / PageSize);
contig_phys_addr = phys_addr;
contig_count = contig_phys_addr != Null<KPhysicalAddress> ? 1 : 0;
} else {
++contig_count;
}
}
if (contig_count > 0) {
mm.Close(contig_phys_addr, (contig_count * DevicePageSize) / PageSize);
}
}
/* Close the pages. */
if (ptm.Close(KVirtualAddress(l2), num_closed)) {
/* Invalidate the l1 entry. */
l1[l1_index].Invalidate();
cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry));
/* Synchronize. */
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
SmmuSynchronizationBarrier();
/* Free the l2 page. */
ptm.Free(KVirtualAddress(l2));
}
/* Advance. */
address += map_count * DevicePageSize;
remaining -= map_count * DevicePageSize;
} else {
/* Dealing with an l1 entry. */
MESOSPHERE_ASSERT(l2_index == 0);
/* Get the physical address. */
const KPhysicalAddress phys_addr = l1[l1_index].GetPhysicalAddress();
MESOSPHERE_ASSERT(IsHeapPhysicalAddress(phys_addr));
/* Invalidate the entry. */
l1[l1_index].Invalidate();
cpu::StoreDataCache(std::addressof(l1[l1_index]), sizeof(PageDirectoryEntry));
/* Synchronize. */
InvalidatePtc(GetPageTablePhysicalAddress(KVirtualAddress(std::addressof(l1[l1_index]))));
InvalidateTlbSection(m_table_asids[l0_index], address);
SmmuSynchronizationBarrier();
/* Close references. */
mm.Close(phys_addr, DeviceLargePageSize / PageSize);
/* Advance. */
address += DeviceLargePageSize;
remaining -= DeviceLargePageSize;
}
}
}
bool KDevicePageTable::Compare(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) const {
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* We need to traverse the ranges that make up our mapping, to make sure they're all good. Start by getting a contiguous range. */
KPageTableBase::MemoryRange contig_range;
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), process_address, size))) {
return false;
}
/* Ensure that we close the range when we're done. */
bool range_open = true;
ON_SCOPE_EXIT { if (range_open) { contig_range.Close(); } };
/* Walk the directory. */
KProcessAddress cur_process_address = process_address;
size_t remaining_size = size;
KPhysicalAddress cur_phys_address = contig_range.GetAddress();
size_t remaining_in_range = contig_range.GetSize();
bool first = true;
u32 first_attr = 0;
while (remaining_size > 0) {
/* Convert the device address to a series of indices. */
const size_t l0_index = (device_address / DeviceRegionSize);
const size_t l1_index = (device_address % DeviceRegionSize) / DeviceLargePageSize;
const size_t l2_index = (device_address % DeviceLargePageSize) / DevicePageSize;
/* Get and validate l1. */
const PageDirectoryEntry *l1 = GetPointer<PageDirectoryEntry>(m_tables[l0_index]);
if (!(l1 != nullptr && l1[l1_index].IsValid())) {
return false;
}
if (l1[l1_index].IsTable()) {
/* We're acting on an l2 entry. */
const PageTableEntry *l2 = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(l1[l1_index].GetPhysicalAddress()));
/* Determine the number of pages to check. */
const size_t remaining_in_entry = (PageTableSize / sizeof(PageTableEntry)) - l2_index;
const size_t map_count = std::min<size_t>(remaining_in_entry, remaining_size / DevicePageSize);
/* Check each page. */
for (size_t i = 0; i < map_count; ++i) {
/* Ensure the l2 entry is valid. */
if (!l2[l2_index + i].IsValid()) {
return false;
}
/* Check that the attributes match the first attributes we encountered. */
const u32 cur_attr = l2[l2_index + i].GetAttributes();
if (!first && cur_attr != first_attr) {
return false;
}
/* If there's nothing remaining in the range, refresh the range. */
if (remaining_in_range == 0) {
contig_range.Close();
range_open = false;
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), cur_process_address, remaining_size))) {
return false;
}
range_open = true;
cur_phys_address = contig_range.GetAddress();
remaining_in_range = contig_range.GetSize();
}
/* Check that the physical address is expected. */
if (l2[l2_index + i].GetPhysicalAddress() != cur_phys_address) {
return false;
}
/* Advance. */
cur_phys_address += DevicePageSize;
cur_process_address += DevicePageSize;
remaining_size -= DevicePageSize;
remaining_in_range -= DevicePageSize;
first = false;
first_attr = cur_attr;
}
/* Advance the device address. */
device_address += map_count * DevicePageSize;
} else {
/* We're acting on an l1 entry. */
if (!(l2_index == 0 && remaining_size >= DeviceLargePageSize)) {
return false;
}
/* Check that the attributes match the first attributes we encountered. */
const u32 cur_attr = l1[l1_index].GetAttributes();
if (!first && cur_attr != first_attr) {
return false;
}
/* If there's nothing remaining in the range, refresh the range. */
if (remaining_in_range == 0) {
contig_range.Close();
range_open = false;
if (R_FAILED(page_table->OpenMemoryRangeForUnmapDeviceAddressSpace(std::addressof(contig_range), cur_process_address, remaining_size))) {
return false;
}
range_open = true;
cur_phys_address = contig_range.GetAddress();
remaining_in_range = contig_range.GetSize();
}
/* Check that the physical address is expected, and there's enough in the range. */
if (remaining_in_range < DeviceLargePageSize || l1[l1_index].GetPhysicalAddress() != cur_phys_address) {
return false;
}
/* Advance. */
cur_phys_address += DeviceLargePageSize;
cur_process_address += DeviceLargePageSize;
remaining_size -= DeviceLargePageSize;
remaining_in_range -= DeviceLargePageSize;
first = false;
first_attr = cur_attr;
/* Advance the device address. */
device_address += DeviceLargePageSize;
}
}
/* The range is valid! */
return true;
}
Result KDevicePageTable::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned, bool is_io) {
/* Validate address/size. */
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* IO is not supported on NX board. */
MESOSPHERE_ASSERT(!is_io);
MESOSPHERE_UNUSED(is_io);
/* Map the pages. */
R_RETURN(this->MapImpl(page_table, process_address, size, device_address, device_perm, is_aligned));
}
Result KDevicePageTable::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {
/* Validate address/size. */
MESOSPHERE_ASSERT((device_address & ~DeviceVirtualAddressMask) == 0);
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* Ensure the page group is correct. */
R_UNLESS(this->Compare(page_table, process_address, size, device_address), svc::ResultInvalidCurrentMemory());
/* Unmap the pages. */
this->UnmapImpl(device_address, size, false);
R_SUCCEED();
}
}
| 67,288
|
C++
|
.cpp
| 1,215
| 41.118519
| 218
| 0.545025
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,992
|
kern_secure_monitor.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/board/qemu/virt/kern_secure_monitor.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_secure_monitor.hpp"
namespace ams::kern::board::qemu::virt::smc {
namespace {
enum UserFunctionId : u32 {
UserFunctionId_SetConfig = 0xC3000401,
UserFunctionId_GetConfig = 0xC3000002,
UserFunctionId_GetResult = 0xC3000003,
UserFunctionId_GetResultData = 0xC3000404,
UserFunctionId_ModularExponentiate = 0xC3000E05,
UserFunctionId_GenerateRandomBytes = 0xC3000006,
UserFunctionId_GenerateAesKek = 0xC3000007,
UserFunctionId_LoadAesKey = 0xC3000008,
UserFunctionId_ComputeAes = 0xC3000009,
UserFunctionId_GenerateSpecificAesKey = 0xC300000A,
UserFunctionId_ComputeCmac = 0xC300040B,
UserFunctionId_ReencryptDeviceUniqueData = 0xC300D60C,
UserFunctionId_DecryptDeviceUniqueData = 0xC300100D,
UserFunctionId_ModularExponentiateByStorageKey = 0xC300060F,
UserFunctionId_PrepareEsDeviceUniqueKey = 0xC3000610,
UserFunctionId_LoadPreparedAesKey = 0xC3000011,
UserFunctionId_PrepareEsCommonTitleKey = 0xC3000012,
};
}
void CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
MESOSPHERE_LOG("Received SMC [%p %p %p %p %p %p %p %p] from %s\n", reinterpret_cast<void *>(args->r[0]), reinterpret_cast<void *>(args->r[1]), reinterpret_cast<void *>(args->r[2]), reinterpret_cast<void *>(args->r[3]), reinterpret_cast<void *>(args->r[4]), reinterpret_cast<void *>(args->r[5]), reinterpret_cast<void *>(args->r[6]), reinterpret_cast<void *>(args->r[7]), GetCurrentProcess().GetName());
switch (args->r[0]) {
case UserFunctionId_GetConfig:
{
switch (static_cast<ConfigItem>(args->r[1])) {
case ConfigItem::ExosphereApiVersion:
args->r[1] = (static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MAJOR & 0xFF) << 56) |
(static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MINOR & 0xFF) << 48) |
(static_cast<u64>(ATMOSPHERE_RELEASE_VERSION_MICRO & 0xFF) << 40) |
(static_cast<u64>(13) << 32) |
(static_cast<u64>(GetTargetFirmware()) << 0);
break;
default:
MESOSPHERE_PANIC("Unhandled GetConfig\n");
}
args->r[0] = static_cast<u64>(SmcResult::Success);
}
break;
default:
MESOSPHERE_PANIC("Unhandled SMC [%p %p %p %p %p %p %p %p]", reinterpret_cast<void *>(args->r[0]), reinterpret_cast<void *>(args->r[1]), reinterpret_cast<void *>(args->r[2]), reinterpret_cast<void *>(args->r[3]), reinterpret_cast<void *>(args->r[4]), reinterpret_cast<void *>(args->r[5]), reinterpret_cast<void *>(args->r[6]), reinterpret_cast<void *>(args->r[7]));
}
}
}
| 4,010
|
C++
|
.cpp
| 63
| 51.142857
| 410
| 0.566497
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,993
|
kern_k_system_control.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/board/qemu/virt/kern_k_system_control.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#include "kern_secure_monitor.hpp"
namespace ams::kern::board::qemu::virt {
/* User access. */
void KSystemControl::CallSecureMonitorFromUser(ams::svc::lp64::SecureMonitorArguments *args) {
/* Invoke the secure monitor. */
return smc::CallSecureMonitorFromUser(args);
}
}
| 963
|
C++
|
.cpp
| 24
| 37.291667
| 98
| 0.740662
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,994
|
kern_cxx.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/libc/kern_cxx.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
#ifdef __cplusplus
extern "C" {
#endif
/* cxx implementation details to be stubbed here, as needed. */
void __cxa_pure_virtual() { MESOSPHERE_PANIC("pure virtual function call"); }
#ifdef __cplusplus
} /* extern "C" */
#endif
| 890
|
C++
|
.cpp
| 24
| 35.375
| 77
| 0.746234
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,995
|
kern_k_debug.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_debug.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
/* <stratosphere/rocrt/rocrt.hpp> */
namespace ams::rocrt {
constexpr inline const u32 ModuleHeaderVersion = util::FourCC<'M','O','D','0'>::Code;
struct ModuleHeader {
u32 signature;
u32 dynamic_offset;
u32 bss_start_offset;
u32 bss_end_offset;
u32 exception_info_start_offset;
u32 exception_info_end_offset;
u32 module_offset;
};
struct ModuleHeaderLocation {
u32 pad;
u32 header_offset;
};
}
namespace ams::kern::arch::arm64 {
namespace {
constexpr inline u64 ForbiddenBreakPointFlagsMask = (((1ul << 40) - 1) << 24) | /* Reserved upper bits. */
(((1ul << 1) - 1) << 23) | /* Match VMID BreakPoint Type. */
(((1ul << 2) - 1) << 14) | /* Security State Control. */
(((1ul << 1) - 1) << 13) | /* Hyp Mode Control. */
(((1ul << 4) - 1) << 9) | /* Reserved middle bits. */
(((1ul << 2) - 1) << 3) | /* Reserved lower bits. */
(((1ul << 2) - 1) << 1); /* Privileged Mode Control. */
static_assert(ForbiddenBreakPointFlagsMask == 0xFFFFFFFFFF80FE1Eul);
constexpr inline u64 ForbiddenWatchPointFlagsMask = (((1ul << 32) - 1) << 32) | /* Reserved upper bits. */
(((1ul << 4) - 1) << 20) | /* WatchPoint Type. */
(((1ul << 2) - 1) << 14) | /* Security State Control. */
(((1ul << 1) - 1) << 13) | /* Hyp Mode Control. */
(((1ul << 2) - 1) << 1); /* Privileged Access Control. */
static_assert(ForbiddenWatchPointFlagsMask == 0xFFFFFFFF00F0E006ul);
}
uintptr_t KDebug::GetProgramCounter(const KThread &thread) {
return GetExceptionContext(std::addressof(thread))->pc;
}
void KDebug::SetPreviousProgramCounter() {
/* Get the current thread. */
KThread *thread = GetCurrentThreadPointer();
MESOSPHERE_ASSERT(thread->IsCallingSvc());
/* Get the exception context. */
KExceptionContext *e_ctx = GetExceptionContext(thread);
/* Set the previous pc. */
if (e_ctx->write == 0) {
/* Subtract from the program counter. */
if (thread->GetOwnerProcess()->Is64Bit()) {
e_ctx->pc -= sizeof(u32);
} else {
e_ctx->pc -= (e_ctx->psr & 0x20) ? sizeof(u16) : sizeof(u32);
}
/* Mark that we've set. */
e_ctx->write = 1;
}
}
Result KDebug::GetThreadContextImpl(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer());
/* Get the exception context. */
const KExceptionContext *e_ctx = GetExceptionContext(thread);
/* Get whether we're 64-bit. */
const bool is_64_bit = this->Is64Bit();
/* If general registers are requested, get them. */
if ((context_flags & ams::svc::ThreadContextFlag_General) != 0) {
/* We can always get X0-X7/R0-R7. */
auto register_count = 8;
if (!thread->IsCallingSvc() || thread->GetSvcId() == svc::SvcId_ReturnFromException) {
if (is_64_bit) {
/* We're not in an SVC, so we can get X0-X29. */
register_count = 29;
} else {
/* We're 32-bit, so we should get R0-R12. */
register_count = 13;
}
}
/* Get the registers. */
for (auto i = 0; i < register_count; ++i) {
out->r[i] = is_64_bit ? e_ctx->x[i] : static_cast<u32>(e_ctx->x[i]);
}
}
/* If control flags are requested, get them. */
if ((context_flags & ams::svc::ThreadContextFlag_Control) != 0) {
if (is_64_bit) {
out->fp = e_ctx->x[29];
out->lr = e_ctx->x[30];
out->sp = e_ctx->sp;
out->pc = e_ctx->pc;
out->pstate = (e_ctx->psr & cpu::El0Aarch64PsrMask);
/* Adjust PC if we should. */
if (e_ctx->write == 0 && thread->IsCallingSvc()) {
out->pc -= sizeof(u32);
}
out->tpidr = e_ctx->tpidr;
} else {
out->r[11] = static_cast<u32>(e_ctx->x[11]);
out->r[13] = static_cast<u32>(e_ctx->x[13]);
out->r[14] = static_cast<u32>(e_ctx->x[14]);
out->lr = 0;
out->sp = 0;
out->pc = e_ctx->pc;
out->pstate = (e_ctx->psr & cpu::El0Aarch32PsrMask);
/* Adjust PC if we should. */
if (e_ctx->write == 0 && thread->IsCallingSvc()) {
out->pc -= (e_ctx->psr & 0x20) ? sizeof(u16) : sizeof(u32);
}
out->tpidr = static_cast<u32>(e_ctx->tpidr);
}
}
/* Get the FPU context. */
R_RETURN(this->GetFpuContext(out, thread, context_flags));
}
Result KDebug::SetThreadContextImpl(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer());
/* Get the exception context. */
KExceptionContext *e_ctx = GetExceptionContext(thread);
/* If general registers are requested, set them. */
if ((context_flags & ams::svc::ThreadContextFlag_General) != 0) {
if (this->Is64Bit()) {
/* Set X0-X28. */
for (auto i = 0; i <= 28; ++i) {
e_ctx->x[i] = ctx.r[i];
}
} else {
/* Set R0-R12. */
for (auto i = 0; i <= 12; ++i) {
e_ctx->x[i] = static_cast<u32>(ctx.r[i]);
}
}
}
/* If control flags are requested, set them. */
if ((context_flags & ams::svc::ThreadContextFlag_Control) != 0) {
/* Mark ourselve as having adjusted pc. */
e_ctx->write = 1;
if (this->Is64Bit()) {
e_ctx->x[29] = ctx.fp;
e_ctx->x[30] = ctx.lr;
e_ctx->sp = ctx.sp;
e_ctx->pc = ctx.pc;
e_ctx->psr = ((ctx.pstate & cpu::El0Aarch64PsrMask) | (e_ctx->psr & ~cpu::El0Aarch64PsrMask));
e_ctx->tpidr = ctx.tpidr;
} else {
e_ctx->x[13] = static_cast<u32>(ctx.r[13]);
e_ctx->x[14] = static_cast<u32>(ctx.r[14]);
e_ctx->x[30] = 0;
e_ctx->sp = 0;
e_ctx->pc = static_cast<u32>(ctx.pc);
e_ctx->psr = ((ctx.pstate & cpu::El0Aarch32PsrMask) | (e_ctx->psr & ~cpu::El0Aarch32PsrMask));
e_ctx->tpidr = ctx.tpidr;
}
}
/* Set the FPU context. */
R_RETURN(this->SetFpuContext(ctx, thread, context_flags));
}
Result KDebug::GetFpuContext(ams::svc::ThreadContext *out, KThread *thread, u32 context_flags) {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer());
/* Succeed if there's nothing to do. */
R_SUCCEED_IF((context_flags & (ams::svc::ThreadContextFlag_Fpu | ams::svc::ThreadContextFlag_FpuControl)) == 0);
/* Get the thread context. */
KThreadContext *t_ctx = std::addressof(thread->GetContext());
/* Get the FPU control registers, if required. */
if ((context_flags & ams::svc::ThreadContextFlag_FpuControl) != 0) {
out->fpsr = t_ctx->GetFpsr();
out->fpcr = t_ctx->GetFpcr();
}
/* Get the FPU registers, if required. */
if ((context_flags & ams::svc::ThreadContextFlag_Fpu) != 0) {
static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters);
const auto &caller_save = thread->GetCallerSaveFpuRegisters();
const auto &callee_save = t_ctx->GetCalleeSaveFpuRegisters();
if (this->Is64Bit()) {
KThreadContext::GetFpuRegisters(out->v, caller_save.fpu64, callee_save.fpu64);
} else {
KThreadContext::GetFpuRegisters(out->v, caller_save.fpu32, callee_save.fpu32);
for (size_t i = KThreadContext::NumFpuRegisters / 2; i < KThreadContext::NumFpuRegisters; ++i) {
out->v[i] = 0;
}
}
}
R_SUCCEED();
}
Result KDebug::SetFpuContext(const ams::svc::ThreadContext &ctx, KThread *thread, u32 context_flags) {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(thread != GetCurrentThreadPointer());
/* Succeed if there's nothing to do. */
R_SUCCEED_IF((context_flags & (ams::svc::ThreadContextFlag_Fpu | ams::svc::ThreadContextFlag_FpuControl)) == 0);
/* Get the thread context. */
KThreadContext *t_ctx = std::addressof(thread->GetContext());
/* Set the FPU control registers, if required. */
if ((context_flags & ams::svc::ThreadContextFlag_FpuControl) != 0) {
t_ctx->SetFpsr(ctx.fpsr);
t_ctx->SetFpcr(ctx.fpcr);
}
/* Set the FPU registers, if required. */
if ((context_flags & ams::svc::ThreadContextFlag_Fpu) != 0) {
static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters);
auto &caller_save = thread->GetCallerSaveFpuRegisters();
auto &callee_save = t_ctx->GetCalleeSaveFpuRegisters();
if (this->Is64Bit()) {
KThreadContext::SetFpuRegisters(caller_save.fpu64, callee_save.fpu64, ctx.v);
} else {
KThreadContext::SetFpuRegisters(caller_save.fpu32, callee_save.fpu32, ctx.v);
}
}
R_SUCCEED();
}
Result KDebug::BreakIfAttached(ams::svc::BreakReason break_reason, uintptr_t address, size_t size) {
const uintptr_t params[5] = { ams::svc::DebugException_UserBreak, GetProgramCounter(GetCurrentThread()), break_reason, address, size };
R_RETURN(KDebugBase::OnDebugEvent(ams::svc::DebugEvent_Exception, params, util::size(params)));
}
#define MESOSPHERE_SET_HW_BREAK_POINT(ID, FLAGS, VALUE) \
({ \
cpu::SetDbgBcr##ID##El1(0); \
cpu::EnsureInstructionConsistencyFullSystem(); \
cpu::SetDbgBvr##ID##El1(VALUE); \
cpu::EnsureInstructionConsistencyFullSystem(); \
cpu::SetDbgBcr##ID##El1(FLAGS); \
cpu::EnsureInstructionConsistencyFullSystem(); \
})
#define MESOSPHERE_SET_HW_WATCH_POINT(ID, FLAGS, VALUE) \
({ \
cpu::SetDbgWcr##ID##El1(0); \
cpu::EnsureInstructionConsistencyFullSystem(); \
cpu::SetDbgWvr##ID##El1(VALUE); \
cpu::EnsureInstructionConsistencyFullSystem(); \
cpu::SetDbgWcr##ID##El1(FLAGS); \
cpu::EnsureInstructionConsistencyFullSystem(); \
})
Result KDebug::SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, u64 flags, u64 value) {
/* Get the debug feature register. */
cpu::DebugFeatureRegisterAccessor dfr0;
/* Extract interesting info from the debug feature register. */
const auto num_bp = dfr0.GetNumBreakpoints();
const auto num_wp = dfr0.GetNumWatchpoints();
const auto num_ctx = dfr0.GetNumContextAwareBreakpoints();
if (ams::svc::HardwareBreakPointRegisterName_I0 <= name && name <= ams::svc::HardwareBreakPointRegisterName_I15) {
/* Check that the name is a valid instruction breakpoint. */
R_UNLESS((name - ams::svc::HardwareBreakPointRegisterName_I0) <= num_bp, svc::ResultNotSupported());
/* Configure flags/value. */
if ((flags & 1) != 0) {
/* We're enabling the breakpoint. Check that the flags are allowable. */
R_UNLESS((flags & ForbiddenBreakPointFlagsMask) == 0, svc::ResultInvalidCombination());
/* Require that the breakpoint be linked or match context id. */
R_UNLESS((flags & ((1ul << 21) | (1ul << 20))) != 0, svc::ResultInvalidCombination());
/* If the breakpoint matches context id, we need to get the context id. */
if ((flags & (1ul << 21)) != 0) {
/* Ensure that the breakpoint is context-aware. */
R_UNLESS((name - ams::svc::HardwareBreakPointRegisterName_I0) >= (num_bp - num_ctx), svc::ResultNotSupported());
/* Check that the breakpoint does not have the mismatch bit. */
R_UNLESS((flags & (1ul << 22)) == 0, svc::ResultInvalidCombination());
/* Get the debug object from the current handle table. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(static_cast<ams::svc::Handle>(value));
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Get the process from the debug object. */
R_UNLESS(debug->IsAttached(), svc::ResultProcessTerminated());
R_UNLESS(debug->OpenProcess(), svc::ResultProcessTerminated());
/* Close the process when we're done. */
ON_SCOPE_EXIT { debug->CloseProcess(); };
/* Get the proces. */
KProcess * const process = debug->GetProcessUnsafe();
/* Set the value to be the context id. */
value = process->GetId() & 0xFFFFFFFF;
}
/* Set the breakpoint as non-secure EL0-only. */
flags |= (1ul << 14) | (2ul << 1);
} else {
/* We're disabling the breakpoint. */
flags = 0;
value = 0;
}
/* Set the breakpoint. */
switch (name) {
case ams::svc::HardwareBreakPointRegisterName_I0: MESOSPHERE_SET_HW_BREAK_POINT( 0, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I1: MESOSPHERE_SET_HW_BREAK_POINT( 1, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I2: MESOSPHERE_SET_HW_BREAK_POINT( 2, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I3: MESOSPHERE_SET_HW_BREAK_POINT( 3, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I4: MESOSPHERE_SET_HW_BREAK_POINT( 4, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I5: MESOSPHERE_SET_HW_BREAK_POINT( 5, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I6: MESOSPHERE_SET_HW_BREAK_POINT( 6, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I7: MESOSPHERE_SET_HW_BREAK_POINT( 7, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I8: MESOSPHERE_SET_HW_BREAK_POINT( 8, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I9: MESOSPHERE_SET_HW_BREAK_POINT( 9, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I10: MESOSPHERE_SET_HW_BREAK_POINT(10, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I11: MESOSPHERE_SET_HW_BREAK_POINT(11, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I12: MESOSPHERE_SET_HW_BREAK_POINT(12, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I13: MESOSPHERE_SET_HW_BREAK_POINT(13, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I14: MESOSPHERE_SET_HW_BREAK_POINT(14, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_I15: MESOSPHERE_SET_HW_BREAK_POINT(15, flags, value); break;
default: break;
}
} else if (ams::svc::HardwareBreakPointRegisterName_D0 <= name && name <= ams::svc::HardwareBreakPointRegisterName_D15) {
/* Check that the name is a valid data breakpoint. */
R_UNLESS((name - ams::svc::HardwareBreakPointRegisterName_D0) <= num_wp, svc::ResultNotSupported());
/* Configure flags/value. */
if ((flags & 1) != 0) {
/* We're enabling the watchpoint. Check that the flags are allowable. */
R_UNLESS((flags & ForbiddenWatchPointFlagsMask) == 0, svc::ResultInvalidCombination());
/* Set the breakpoint as linked non-secure EL0-only. */
flags |= (1ul << 20) | (1ul << 14) | (2ul << 1);
} else {
/* We're disabling the watchpoint. */
flags = 0;
value = 0;
}
/* Set the watchpoint. */
switch (name) {
case ams::svc::HardwareBreakPointRegisterName_D0: MESOSPHERE_SET_HW_WATCH_POINT( 0, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D1: MESOSPHERE_SET_HW_WATCH_POINT( 1, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D2: MESOSPHERE_SET_HW_WATCH_POINT( 2, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D3: MESOSPHERE_SET_HW_WATCH_POINT( 3, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D4: MESOSPHERE_SET_HW_WATCH_POINT( 4, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D5: MESOSPHERE_SET_HW_WATCH_POINT( 5, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D6: MESOSPHERE_SET_HW_WATCH_POINT( 6, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D7: MESOSPHERE_SET_HW_WATCH_POINT( 7, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D8: MESOSPHERE_SET_HW_WATCH_POINT( 8, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D9: MESOSPHERE_SET_HW_WATCH_POINT( 9, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D10: MESOSPHERE_SET_HW_WATCH_POINT(10, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D11: MESOSPHERE_SET_HW_WATCH_POINT(11, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D12: MESOSPHERE_SET_HW_WATCH_POINT(12, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D13: MESOSPHERE_SET_HW_WATCH_POINT(13, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D14: MESOSPHERE_SET_HW_WATCH_POINT(14, flags, value); break;
case ams::svc::HardwareBreakPointRegisterName_D15: MESOSPHERE_SET_HW_WATCH_POINT(15, flags, value); break;
default: break;
}
} else {
/* Invalid name. */
R_THROW(svc::ResultInvalidEnumValue());
}
R_SUCCEED();
}
#undef MESOSPHERE_SET_HW_WATCH_POINT
#undef MESOSPHERE_SET_HW_BREAK_POINT
void KDebug::PrintRegister(KThread *thread) {
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
{
/* Treat no thread as current thread. */
if (thread == nullptr) {
thread = GetCurrentThreadPointer();
}
/* Get the exception context. */
KExceptionContext *e_ctx = GetExceptionContext(thread);
/* Get the owner process. */
if (auto *process = thread->GetOwnerProcess(); process != nullptr) {
/* Lock the owner process. */
KScopedLightLock state_lk(process->GetStateLock());
KScopedLightLock list_lk(process->GetListLock());
/* Suspend all the process's threads. */
{
KScopedSchedulerLock sl;
auto end = process->GetThreadList().end();
for (auto it = process->GetThreadList().begin(); it != end; ++it) {
if (std::addressof(*it) != GetCurrentThreadPointer()) {
it->RequestSuspend(KThread::SuspendType_Backtrace);
}
}
}
/* Print the registers. */
MESOSPHERE_RELEASE_LOG("Registers\n");
if ((e_ctx->psr & 0x10) == 0) {
/* 64-bit thread. */
for (auto i = 0; i < 31; ++i) {
MESOSPHERE_RELEASE_LOG(" X[%2d]: 0x%016lx\n", i, e_ctx->x[i]);
}
MESOSPHERE_RELEASE_LOG(" SP: 0x%016lx\n", e_ctx->sp);
MESOSPHERE_RELEASE_LOG(" PC: 0x%016lx\n", e_ctx->pc - sizeof(u32));
MESOSPHERE_RELEASE_LOG(" PSR: 0x%08x\n", e_ctx->psr);
MESOSPHERE_RELEASE_LOG(" TPIDR_EL0: 0x%016lx\n", e_ctx->tpidr);
} else {
/* 32-bit thread. */
for (auto i = 0; i < 13; ++i) {
MESOSPHERE_RELEASE_LOG(" R[%2d]: 0x%08x\n", i, static_cast<u32>(e_ctx->x[i]));
}
MESOSPHERE_RELEASE_LOG(" SP: 0x%08x\n", static_cast<u32>(e_ctx->x[13]));
MESOSPHERE_RELEASE_LOG(" LR: 0x%08x\n", static_cast<u32>(e_ctx->x[14]));
MESOSPHERE_RELEASE_LOG(" PC: 0x%08x\n", static_cast<u32>(e_ctx->pc) - static_cast<u32>((e_ctx->psr & 0x20) ? sizeof(u16) : sizeof(u32)));
MESOSPHERE_RELEASE_LOG(" PSR: 0x%08x\n", e_ctx->psr);
MESOSPHERE_RELEASE_LOG(" TPIDR: 0x%08x\n", static_cast<u32>(e_ctx->tpidr));
}
/* Resume the threads that we suspended. */
{
KScopedSchedulerLock sl;
auto end = process->GetThreadList().end();
for (auto it = process->GetThreadList().begin(); it != end; ++it) {
if (std::addressof(*it) != GetCurrentThreadPointer()) {
it->Resume(KThread::SuspendType_Backtrace);
}
}
}
}
}
#else
MESOSPHERE_UNUSED(thread);
#endif
}
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
namespace {
bool IsHeapPhysicalAddress(KPhysicalAddress phys_addr) {
const KMemoryRegion *cached = nullptr;
return KMemoryLayout::IsHeapPhysicalAddress(cached, phys_addr);
}
template<typename T>
bool ReadValue(T *out, KProcess *process, uintptr_t address) {
KPhysicalAddress phys_addr;
KMemoryInfo mem_info;
ams::svc::PageInfo page_info;
if (!util::IsAligned(address, sizeof(T))) {
return false;
}
if (R_FAILED(process->GetPageTable().QueryInfo(std::addressof(mem_info), std::addressof(page_info), address))) {
return false;
}
if ((mem_info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead) {
return false;
}
if (!process->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), address)) {
return false;
}
if (!IsHeapPhysicalAddress(phys_addr)) {
return false;
}
*out = *GetPointer<T>(process->GetPageTable().GetHeapVirtualAddress(phys_addr));
return true;
}
bool GetModuleName(char *dst, size_t dst_size, KProcess *process, uintptr_t base_address) {
/* Locate .rodata. */
KMemoryInfo mem_info;
ams::svc::PageInfo page_info;
KMemoryState mem_state = KMemoryState_None;
while (true) {
if (R_FAILED(process->GetPageTable().QueryInfo(std::addressof(mem_info), std::addressof(page_info), base_address))) {
return false;
}
if (mem_state == KMemoryState_None) {
mem_state = mem_info.GetState();
if (mem_state != KMemoryState_Code && mem_state != KMemoryState_AliasCode) {
return false;
}
}
if (mem_info.GetState() != mem_state) {
return false;
}
if (mem_info.GetPermission() == KMemoryPermission_UserRead) {
break;
}
base_address = mem_info.GetEndAddress();
}
/* Check that first value is 0. */
u32 val;
if (!ReadValue(std::addressof(val), process, base_address)) {
return false;
}
if (val != 0) {
return false;
}
/* Read the name length. */
if (!ReadValue(std::addressof(val), process, base_address + sizeof(u32))) {
return false;
}
if (!(0 < val && val < dst_size)) {
return false;
}
const size_t name_len = val;
/* Read the name, one character at a time. */
for (size_t i = 0; i < name_len; ++i) {
if (!ReadValue(dst + i, process, base_address + 2 * sizeof(u32) + i)) {
return false;
}
if (!(0 < dst[i] && dst[i] <= 0x7F)) {
return false;
}
}
/* NULL-terminate. */
dst[name_len] = 0;
return true;
}
void PrintAddress(uintptr_t address) {
MESOSPHERE_RELEASE_LOG(" %p\n", reinterpret_cast<void *>(address));
}
void PrintAddressWithModuleName(uintptr_t address, bool has_module_name, const char *module_name, uintptr_t base_address) {
if (has_module_name) {
MESOSPHERE_RELEASE_LOG(" %p [%10s + %8lx]\n", reinterpret_cast<void *>(address), module_name, address - base_address);
} else {
MESOSPHERE_RELEASE_LOG(" %p [%10lx + %8lx]\n", reinterpret_cast<void *>(address), base_address, address - base_address);
}
}
void PrintAddressWithSymbol(uintptr_t address, bool has_module_name, const char *module_name, uintptr_t base_address, const char *symbol_name, uintptr_t func_address) {
if (has_module_name) {
MESOSPHERE_RELEASE_LOG(" %p [%10s + %8lx] (%s + %lx)\n", reinterpret_cast<void *>(address), module_name, address - base_address, symbol_name, address - func_address);
} else {
MESOSPHERE_RELEASE_LOG(" %p [%10lx + %8lx] (%s + %lx)\n", reinterpret_cast<void *>(address), base_address, address - base_address, symbol_name, address - func_address);
}
}
void PrintCodeAddress(KProcess *process, uintptr_t address, bool is_lr = true) {
/* Prepare to parse + print the address. */
uintptr_t test_address = is_lr ? address - sizeof(u32) : address;
uintptr_t base_address = address;
uintptr_t dyn_address = 0;
uintptr_t sym_tab = 0;
uintptr_t str_tab = 0;
size_t num_sym = 0;
u64 temp_64;
u32 temp_32;
/* Locate the start of .text. */
KMemoryInfo mem_info;
ams::svc::PageInfo page_info;
KMemoryState mem_state = KMemoryState_None;
while (true) {
if (R_FAILED(process->GetPageTable().QueryInfo(std::addressof(mem_info), std::addressof(page_info), base_address))) {
return PrintAddress(address);
}
if (mem_state == KMemoryState_None) {
mem_state = mem_info.GetState();
if (mem_state != KMemoryState_Code && mem_state != KMemoryState_AliasCode) {
return PrintAddress(address);
}
} else if (mem_info.GetState() != mem_state) {
return PrintAddress(address);
}
if (mem_info.GetPermission() != KMemoryPermission_UserReadExecute) {
return PrintAddress(address);
}
base_address = mem_info.GetAddress();
if (R_FAILED(process->GetPageTable().QueryInfo(std::addressof(mem_info), std::addressof(page_info), base_address - 1))) {
return PrintAddress(address);
}
if (mem_info.GetState() != mem_state) {
break;
}
if (mem_info.GetPermission() != KMemoryPermission_UserReadExecute) {
break;
}
}
/* Get the module name. */
char module_name[0x20];
const bool has_module_name = GetModuleName(module_name, sizeof(module_name), process, base_address);
/* If the process is 32-bit, just print the module. */
if (!process->Is64Bit()) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
/* Locate .dyn using rocrt::ModuleHeader. */
{
/* Determine the ModuleHeader offset. */
u32 mod_offset;
if (!ReadValue(std::addressof(mod_offset), process, base_address + sizeof(u32))) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
/* Read the signature. */
constexpr u32 SignatureFieldOffset = AMS_OFFSETOF(rocrt::ModuleHeader, signature);
if (!ReadValue(std::addressof(temp_32), process, base_address + mod_offset + SignatureFieldOffset)) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
/* Check that the module signature is expected. */
if (temp_32 != rocrt::ModuleHeaderVersion) { /* MOD0 */
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
/* Determine the dynamic offset. */
constexpr u32 DynamicFieldOffset = AMS_OFFSETOF(rocrt::ModuleHeader, dynamic_offset);
if (!ReadValue(std::addressof(temp_32), process, base_address + mod_offset + DynamicFieldOffset)) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
dyn_address = base_address + mod_offset + temp_32;
}
/* Locate tables inside .dyn. */
for (size_t ofs = 0; /* ... */; ofs += 0x10) {
/* Read the DynamicTag. */
if (!ReadValue(std::addressof(temp_64), process, dyn_address + ofs)) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
if (temp_64 == 0) {
/* We're done parsing .dyn. */
break;
} else if (temp_64 == 4) {
/* We found DT_HASH */
if (!ReadValue(std::addressof(temp_64), process, dyn_address + ofs + sizeof(u64))) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
/* Read nchain, to get the number of symbols. */
if (!ReadValue(std::addressof(temp_32), process, base_address + temp_64 + sizeof(u32))) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
num_sym = temp_32;
} else if (temp_64 == 5) {
/* We found DT_STRTAB */
if (!ReadValue(std::addressof(temp_64), process, dyn_address + ofs + sizeof(u64))) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
str_tab = base_address + temp_64;
} else if (temp_64 == 6) {
/* We found DT_SYMTAB */
if (!ReadValue(std::addressof(temp_64), process, dyn_address + ofs + sizeof(u64))) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
sym_tab = base_address + temp_64;
}
}
/* Check that we found all the tables. */
if (!(sym_tab != 0 && str_tab != 0 && num_sym != 0)) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
/* Try to locate an appropriate symbol. */
for (size_t i = 0; i < num_sym; ++i) {
/* Read the symbol from userspace. */
struct {
u32 st_name;
u8 st_info;
u8 st_other;
u16 st_shndx;
u64 st_value;
u64 st_size;
} sym;
{
u64 x[sizeof(sym) / sizeof(u64)];
for (size_t j = 0; j < util::size(x); ++j) {
if (!ReadValue(x + j, process, sym_tab + sizeof(sym) * i + sizeof(u64) * j)) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
}
std::memcpy(std::addressof(sym), x, sizeof(sym));
}
/* Check the symbol is valid/STT_FUNC. */
if (sym.st_shndx == 0 || ((sym.st_shndx & 0xFF00) == 0xFF00)) {
continue;
}
if ((sym.st_info & 0xF) != 2) {
continue;
}
/* Check the address. */
const uintptr_t func_start = base_address + sym.st_value;
if (func_start <= test_address && test_address < func_start + sym.st_size) {
/* Read the symbol name. */
const uintptr_t sym_address = str_tab + sym.st_name;
char sym_name[0x80];
sym_name[util::size(sym_name) - 1] = 0;
for (size_t j = 0; j < util::size(sym_name) - 1; ++j) {
if (!ReadValue(sym_name + j, process, sym_address + j)) {
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
if (sym_name[j] == 0) {
break;
}
}
/* Print the symbol. */
return PrintAddressWithSymbol(address, has_module_name, module_name, base_address, sym_name, func_start);
}
}
/* Fall back to printing the module. */
return PrintAddressWithModuleName(address, has_module_name, module_name, base_address);
}
}
#endif
void KDebug::PrintBacktrace(KThread *thread) {
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
{
/* Treat no thread as current thread. */
if (thread == nullptr) {
thread = GetCurrentThreadPointer();
}
/* Get the exception context. */
KExceptionContext *e_ctx = GetExceptionContext(thread);
/* Get the owner process. */
if (auto *process = thread->GetOwnerProcess(); process != nullptr) {
/* Lock the owner process. */
KScopedLightLock state_lk(process->GetStateLock());
KScopedLightLock list_lk(process->GetListLock());
/* Suspend all the process's threads. */
{
KScopedSchedulerLock sl;
auto end = process->GetThreadList().end();
for (auto it = process->GetThreadList().begin(); it != end; ++it) {
if (std::addressof(*it) != GetCurrentThreadPointer()) {
it->RequestSuspend(KThread::SuspendType_Backtrace);
}
}
}
/* Print the backtrace. */
MESOSPHERE_RELEASE_LOG("User Backtrace\n");
if ((e_ctx->psr & 0x10) == 0) {
/* 64-bit thread. */
PrintCodeAddress(process, e_ctx->pc, false);
PrintCodeAddress(process, e_ctx->x[30]);
/* Walk the stack frames. */
uintptr_t fp = static_cast<uintptr_t>(e_ctx->x[29]);
for (auto i = 0; i < 0x20 && fp != 0 && util::IsAligned(fp, 0x10); ++i) {
/* Read the next frame. */
struct {
u64 fp;
u64 lr;
} stack_frame;
{
KMemoryInfo mem_info;
ams::svc::PageInfo page_info;
KPhysicalAddress phys_addr;
if (R_FAILED(process->GetPageTable().QueryInfo(std::addressof(mem_info), std::addressof(page_info), fp))) {
break;
}
if ((mem_info.GetState() & KMemoryState_FlagReferenceCounted) == 0) {
break;
}
if ((mem_info.GetAttribute() & KMemoryAttribute_Uncached) != 0) {
break;
}
if ((mem_info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead) {
break;
}
if (!process->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), fp)) {
break;
}
if (!IsHeapPhysicalAddress(phys_addr)) {
break;
}
u64 *frame_ptr = GetPointer<u64>(process->GetPageTable().GetHeapVirtualAddress(phys_addr));
stack_frame.fp = frame_ptr[0];
stack_frame.lr = frame_ptr[1];
}
/* Print and advance. */
PrintCodeAddress(process, stack_frame.lr);
fp = stack_frame.fp;
}
} else {
/* 32-bit thread. */
PrintCodeAddress(process, e_ctx->pc, false);
PrintCodeAddress(process, e_ctx->x[14]);
/* Walk the stack frames. */
uintptr_t fp = static_cast<uintptr_t>(e_ctx->x[11]);
for (auto i = 0; i < 0x20 && fp != 0 && util::IsAligned(fp, 4); ++i) {
/* Read the next frame. */
struct {
u32 fp;
u32 lr;
} stack_frame;
{
KMemoryInfo mem_info;
ams::svc::PageInfo page_info;
KPhysicalAddress phys_addr;
/* Read FP */
if (R_FAILED(process->GetPageTable().QueryInfo(std::addressof(mem_info), std::addressof(page_info), fp))) {
break;
}
if ((mem_info.GetState() & KMemoryState_FlagReferenceCounted) == 0) {
break;
}
if ((mem_info.GetAttribute() & KMemoryAttribute_Uncached) != 0) {
break;
}
if ((mem_info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead) {
break;
}
if (!process->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), fp)) {
break;
}
if (!IsHeapPhysicalAddress(phys_addr)) {
break;
}
stack_frame.fp = *GetPointer<u32>(process->GetPageTable().GetHeapVirtualAddress(phys_addr));
/* Read LR. */
uintptr_t lr_ptr = (e_ctx->x[13] <= stack_frame.fp && stack_frame.fp < e_ctx->x[13] + PageSize) ? fp + 4 : fp - 4;
if (R_FAILED(process->GetPageTable().QueryInfo(std::addressof(mem_info), std::addressof(page_info), lr_ptr))) {
break;
}
if ((mem_info.GetState() & KMemoryState_FlagReferenceCounted) == 0) {
break;
}
if ((mem_info.GetAttribute() & KMemoryAttribute_Uncached) != 0) {
break;
}
if ((mem_info.GetPermission() & KMemoryPermission_UserRead) != KMemoryPermission_UserRead) {
break;
}
if (!process->GetPageTable().GetPhysicalAddress(std::addressof(phys_addr), lr_ptr)) {
break;
}
if (!IsHeapPhysicalAddress(phys_addr)) {
break;
}
stack_frame.lr = *GetPointer<u32>(process->GetPageTable().GetHeapVirtualAddress(phys_addr));
}
/* Print and advance. */
PrintCodeAddress(process, stack_frame.lr);
fp = stack_frame.fp;
}
}
/* Resume the threads that we suspended. */
{
KScopedSchedulerLock sl;
auto end = process->GetThreadList().end();
for (auto it = process->GetThreadList().begin(); it != end; ++it) {
if (std::addressof(*it) != GetCurrentThreadPointer()) {
it->Resume(KThread::SuspendType_Backtrace);
}
}
}
}
}
#else
MESOSPHERE_UNUSED(thread);
#endif
}
}
| 45,188
|
C++
|
.cpp
| 815
| 38.381595
| 186
| 0.502633
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,996
|
kern_k_supervisor_page_table.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_supervisor_page_table.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arch::arm64 {
void KSupervisorPageTable::Initialize(s32 core_id) {
/* Verify that sctlr_el1 has the wxn bit set. */
MESOSPHERE_ABORT_UNLESS(cpu::SystemControlRegisterAccessor().GetWxn());
/* Invalidate the entire TLB. */
cpu::InvalidateEntireTlb();
/* If core 0, initialize our base page table. */
if (core_id == 0) {
/* TODO: constexpr defines. */
const u64 ttbr1 = cpu::GetTtbr1El1() & 0xFFFFFFFFFFFFul;
const u64 kernel_vaddr_start = 0xFFFFFF8000000000ul;
const u64 kernel_vaddr_end = 0xFFFFFFFFFFE00000ul;
void *table = GetVoidPointer(KPageTableBase::GetLinearMappedVirtualAddress(ttbr1));
m_page_table.InitializeForKernel(table, kernel_vaddr_start, kernel_vaddr_end);
}
}
}
| 1,504
|
C++
|
.cpp
| 33
| 39.878788
| 95
| 0.695089
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,997
|
kern_k_page_table.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_page_table.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arch::arm64 {
namespace {
class AlignedMemoryBlock {
private:
uintptr_t m_before_start;
uintptr_t m_before_end;
uintptr_t m_after_start;
uintptr_t m_after_end;
size_t m_current_alignment;
public:
constexpr AlignedMemoryBlock(uintptr_t start, size_t num_pages, size_t alignment) : m_before_start(0), m_before_end(0), m_after_start(0), m_after_end(0), m_current_alignment(0) {
MESOSPHERE_ASSERT(util::IsAligned(start, PageSize));
MESOSPHERE_ASSERT(num_pages > 0);
/* Find an alignment that allows us to divide into at least two regions.*/
uintptr_t start_page = start / PageSize;
alignment /= PageSize;
while (util::AlignUp(start_page, alignment) >= util::AlignDown(start_page + num_pages, alignment)) {
alignment = KPageTable::GetSmallerAlignment(alignment * PageSize) / PageSize;
}
m_before_start = start_page;
m_before_end = util::AlignUp(start_page, alignment);
m_after_start = m_before_end;
m_after_end = start_page + num_pages;
m_current_alignment = alignment;
MESOSPHERE_ASSERT(m_current_alignment > 0);
}
constexpr void SetAlignment(size_t alignment) {
/* We can only ever decrease the granularity. */
MESOSPHERE_ASSERT(m_current_alignment >= alignment / PageSize);
m_current_alignment = alignment / PageSize;
}
constexpr size_t GetAlignment() const {
return m_current_alignment * PageSize;
}
constexpr void FindBlock(uintptr_t &out, size_t &num_pages) {
if ((m_after_end - m_after_start) >= m_current_alignment) {
/* Select aligned memory from after block. */
const size_t available_pages = util::AlignDown(m_after_end, m_current_alignment) - m_after_start;
if (num_pages == 0 || available_pages < num_pages) {
num_pages = available_pages;
}
out = m_after_start * PageSize;
m_after_start += num_pages;
} else if ((m_before_end - m_before_start) >= m_current_alignment) {
/* Select aligned memory from before block. */
const size_t available_pages = m_before_end - util::AlignUp(m_before_start, m_current_alignment);
if (num_pages == 0 || available_pages < num_pages) {
num_pages = available_pages;
}
m_before_end -= num_pages;
out = m_before_end * PageSize;
} else {
/* Neither after or before can get an aligned bit of memory. */
out = 0;
num_pages = 0;
}
}
};
constexpr u64 EncodeTtbr(KPhysicalAddress table, u8 asid) {
return (static_cast<u64>(asid) << 48) | (static_cast<u64>(GetInteger(table)));
}
}
ALWAYS_INLINE void KPageTable::NoteUpdated() const {
cpu::DataSynchronizationBarrierInnerShareableStore();
/* Mark ourselves as in a tlb maintenance operation. */
GetCurrentThread().SetInTlbMaintenanceOperation();
ON_SCOPE_EXIT { GetCurrentThread().ClearInTlbMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
if (this->IsKernel()) {
this->OnKernelTableUpdated();
} else {
this->OnTableUpdated();
}
}
ALWAYS_INLINE void KPageTable::NoteSingleKernelPageUpdated(KProcessAddress virt_addr) const {
MESOSPHERE_ASSERT(this->IsKernel());
cpu::DataSynchronizationBarrierInnerShareableStore();
/* Mark ourselves as in a tlb maintenance operation. */
GetCurrentThread().SetInTlbMaintenanceOperation();
ON_SCOPE_EXIT { GetCurrentThread().ClearInTlbMaintenanceOperation(); __asm__ __volatile__("" ::: "memory"); };
this->OnKernelTableSinglePageUpdated(virt_addr);
}
void KPageTable::Initialize(s32 core_id) {
/* Nothing actually needed here. */
MESOSPHERE_UNUSED(core_id);
}
Result KPageTable::InitializeForKernel(void *table, KVirtualAddress start, KVirtualAddress end) {
/* Initialize basic fields. */
m_asid = 0;
m_manager = Kernel::GetSystemSystemResource().GetPageTableManagerPointer();
/* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
R_SUCCEED();
}
Result KPageTable::InitializeForProcess(ams::svc::CreateProcessFlag flags, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KSystemResource *system_resource, KResourceLimit *resource_limit, size_t process_index) {
/* Determine our ASID */
m_asid = process_index + 1;
MESOSPHERE_ABORT_UNLESS(0 < m_asid && m_asid < util::size(s_ttbr0_entries));
/* Set our manager. */
m_manager = system_resource->GetPageTableManagerPointer();
/* Get the virtual address of our L1 table. */
const KPhysicalAddress ttbr0_phys = KPhysicalAddress(s_ttbr0_entries[m_asid] & UINT64_C(0xFFFFFFFFFFFE));
const KVirtualAddress ttbr0_virt = KMemoryLayout::GetLinearVirtualAddress(ttbr0_phys);
/* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(flags);
const KProcessAddress as_start = 0;
const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(flags, from_back, pool, GetVoidPointer(ttbr0_virt), as_start, as_end, code_address, code_size, system_resource, resource_limit));
/* Note that we've updated the table (since we created it). */
this->NoteUpdated();
R_SUCCEED();
}
Result KPageTable::Finalize() {
/* Only process tables should be finalized. */
MESOSPHERE_ASSERT(!this->IsKernel());
/* NOTE: Here Nintendo calls an unknown OnFinalize function. */
/* this->OnFinalize(); */
/* Note that we've updated (to ensure we're synchronized). */
this->NoteUpdated();
/* NOTE: Here Nintendo calls a second unknown OnFinalize function. */
/* this->OnFinalize2(); */
/* Free all pages in the table. */
{
/* Get implementation objects. */
auto &impl = this->GetImpl();
auto &mm = Kernel::GetMemoryManager();
/* Traverse, freeing all pages. */
{
/* Begin the traversal. */
TraversalContext context;
TraversalEntry entry;
KPhysicalAddress cur_phys_addr = Null<KPhysicalAddress>;
size_t cur_size = 0;
u8 has_attr = 0;
bool cur_valid = impl.BeginTraversal(std::addressof(entry), std::addressof(context), this->GetAddressSpaceStart());
while (true) {
if (cur_valid) {
/* Free the actual pages, if there are any. */
if (IsHeapPhysicalAddressForFinalize(entry.phys_addr)) {
if (cur_size > 0) {
/* NOTE: Nintendo really does check next_entry.attr == (cur_entry.attr != 0)...but attr is always zero as of 18.0.0, and this is "probably" for the new console or debug-only anyway, */
/* so we'll implement the weird logic verbatim even though it doesn't match the GetContiguousRange logic. */
if (entry.phys_addr == cur_phys_addr + cur_size && entry.attr == has_attr) {
/* Just extend the block, since we can. */
cur_size += entry.block_size;
} else {
/* Close the block, and begin tracking anew. */
mm.Close(cur_phys_addr, cur_size / PageSize);
cur_phys_addr = entry.phys_addr;
cur_size = entry.block_size;
has_attr = entry.attr != 0;
}
} else {
cur_phys_addr = entry.phys_addr;
cur_size = entry.block_size;
has_attr = entry.attr != 0;
}
}
/* Clean up the page table entries. */
bool freeing_table = false;
while (true) {
/* Clear the entries. */
const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1;
auto *pte = reinterpret_cast<PageTableEntry *>(context.is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)) : reinterpret_cast<uintptr_t>(context.level_entries[context.level]));
for (size_t i = 0; i < num_to_clear; ++i) {
pte[i] = InvalidPageTableEntry;
}
/* Remove the entries from the previous table. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->CloseTableReferences(num_to_clear);
}
/* If we cleared a table, we need to note that we updated and free the table. */
if (freeing_table) {
KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level - 1]), PageSize));
if (table == Null<KVirtualAddress>) {
break;
}
ClearPageTable(table);
this->GetPageTableManager().Free(table);
}
/* Advance; we're no longer contiguous. */
context.is_contiguous = false;
context.level_entries[context.level] = pte + num_to_clear - 1;
/* We may have removed the last entries in a table, in which case we can free and unmap the tables. */
if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) {
break;
}
/* Advance; we will not be working with blocks any more. */
context.level = static_cast<KPageTableImpl::EntryLevel>(util::ToUnderlying(context.level) + 1);
freeing_table = true;
}
}
/* Continue the traversal. */
cur_valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context));
if (entry.block_size == 0) {
break;
}
}
/* Free any remaining pages. */
if (cur_size > 0) {
mm.Close(cur_phys_addr, cur_size / PageSize);
}
}
/* Clear the L1 table. */
{
const KVirtualAddress l1_table = reinterpret_cast<uintptr_t>(impl.Finalize());
ClearPageTable(l1_table);
}
/* Perform inherited finalization. */
KPageTableBase::Finalize();
}
R_SUCCEED();
}
Result KPageTable::OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
/* Check validity of parameters. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(num_pages > 0);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ASSERT(this->ContainsPages(virt_addr, num_pages));
if (operation == OperationType_Map) {
MESOSPHERE_ABORT_UNLESS(is_pa_valid);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
} else {
MESOSPHERE_ABORT_UNLESS(!is_pa_valid);
}
if (operation == OperationType_Unmap) {
R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll));
} else if (operation == OperationType_Separate) {
R_RETURN(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll));
} else {
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_Map:
/* If mapping io or uncached pages, ensure that there is no pending reschedule. */
if (properties.io || properties.uncached) {
KScopedSchedulerLock sl;
}
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
case OperationType_ChangePermissions:
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, false, page_list, reuse_ll));
case OperationType_ChangePermissionsAndRefresh:
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, false, page_list, reuse_ll));
case OperationType_ChangePermissionsAndRefreshAndFlush:
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, true, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
}
Result KPageTable::OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, const KPageGroup &page_group, const KPageProperties properties, OperationType operation, bool reuse_ll) {
/* Check validity of parameters. */
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ASSERT(num_pages > 0);
MESOSPHERE_ASSERT(num_pages == page_group.GetNumPages());
/* Map the page group. */
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_MapGroup:
case OperationType_MapFirstGroup:
/* If mapping io or uncached pages, ensure that there is no pending reschedule. */
if (properties.io || properties.uncached) {
KScopedSchedulerLock sl;
}
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, operation != OperationType_MapFirstGroup, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Ensure there are no pending data writes. */
cpu::DataSynchronizationBarrier();
auto &impl = this->GetImpl();
/* If we're not forcing an unmap, separate pages immediately. */
if (!force) {
R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll));
}
/* Cache initial addresses for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr;
size_t remaining_pages = num_pages;
/* Ensure that any pages we track close on exit. */
KPageGroup pages_to_close(this->GetBlockInfoManager());
ON_SCOPE_EXIT { pages_to_close.CloseAndReset(); };
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
bool next_valid = impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), virt_addr);
while (remaining_pages > 0) {
/* Handle the case where we're not valid. */
if (!next_valid) {
MESOSPHERE_ABORT_UNLESS(force);
const size_t cur_size = std::min(next_entry.block_size - (GetInteger(virt_addr) & (next_entry.block_size - 1)), remaining_pages * PageSize);
remaining_pages -= cur_size / PageSize;
virt_addr += cur_size;
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
continue;
}
/* Handle the case where the block is bigger than it should be. */
if (next_entry.block_size > remaining_pages * PageSize) {
MESOSPHERE_ABORT_UNLESS(force);
MESOSPHERE_R_ABORT_UNLESS(this->SeparatePagesImpl(std::addressof(next_entry), std::addressof(context), virt_addr, remaining_pages * PageSize, page_list, reuse_ll));
}
/* Check that our state is coherent. */
MESOSPHERE_ASSERT((next_entry.block_size / PageSize) <= remaining_pages);
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
/* Unmap the block. */
bool freeing_table = false;
bool need_recalculate_virt_addr = false;
while (true) {
/* Clear the entries. */
const size_t num_to_clear = (!freeing_table && context.is_contiguous) ? BlocksPerContiguousBlock : 1;
auto *pte = reinterpret_cast<PageTableEntry *>(context.is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)) : reinterpret_cast<uintptr_t>(context.level_entries[context.level]));
for (size_t i = 0; i < num_to_clear; ++i) {
pte[i] = InvalidPageTableEntry;
}
/* Remove the entries from the previous table. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->CloseTableReferences(num_to_clear);
}
/* If we cleared a table, we need to note that we updated and free the table. */
if (freeing_table) {
/* If there's no table, we also don't need to do a free. */
const KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level - 1]), PageSize));
if (table == Null<KVirtualAddress>) {
break;
}
this->NoteUpdated();
this->FreePageTable(page_list, table);
need_recalculate_virt_addr = true;
}
/* Advance; we're no longer contiguous. */
context.is_contiguous = false;
context.level_entries[context.level] = pte + num_to_clear - 1;
/* We may have removed the last entries in a table, in which case we can free and unmap the tables. */
if (context.level >= KPageTableImpl::EntryLevel_L1 || context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) {
break;
}
/* Advance; we will not be working with blocks any more. */
context.level = static_cast<KPageTableImpl::EntryLevel>(util::ToUnderlying(context.level) + 1);
freeing_table = true;
}
/* Close the blocks. */
if (!force && IsHeapPhysicalAddress(next_entry.phys_addr)) {
const size_t block_num_pages = next_entry.block_size / PageSize;
if (R_FAILED(pages_to_close.AddBlock(next_entry.phys_addr, block_num_pages))) {
this->NoteUpdated();
Kernel::GetMemoryManager().Close(next_entry.phys_addr, block_num_pages);
pages_to_close.CloseAndReset();
}
}
/* Advance. */
size_t freed_size = next_entry.block_size;
if (need_recalculate_virt_addr) {
/* We advanced more than by the block, so we need to calculate the actual advanced size. */
const size_t block_size = impl.GetBlockSize(context.level, context.is_contiguous);
const KProcessAddress new_virt_addr = util::AlignDown(GetInteger(impl.GetAddressForContext(std::addressof(context))) + block_size, block_size);
MESOSPHERE_ABORT_UNLESS(new_virt_addr >= virt_addr + next_entry.block_size);
freed_size = std::min<size_t>(new_virt_addr - virt_addr, remaining_pages * PageSize);
}
/* We can just advance by the block size. */
virt_addr += freed_size;
remaining_pages -= freed_size / PageSize;
next_valid = impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context));
}
/* Ensure we remain coherent. */
if (this->IsKernel() && num_pages == 1) {
this->NoteSingleKernelPageUpdated(orig_virt_addr);
} else {
this->NoteUpdated();
}
R_SUCCEED();
}
Result KPageTable::Map(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, size_t page_size, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(virt_addr), PageSize));
MESOSPHERE_ASSERT(util::IsAligned(GetInteger(phys_addr), PageSize));
auto &impl = this->GetImpl();
u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, false, false);
/* Begin traversal. */
TraversalContext context;
TraversalEntry entry;
bool valid = impl.BeginTraversal(std::addressof(entry), std::addressof(context), virt_addr);
/* Iterate, mapping each page. */
while (num_pages > 0) {
/* If we're mapping at the address, there must be nothing there. */
MESOSPHERE_ABORT_UNLESS(!valid);
/* If we fail, clean up any empty tables we may have allocated. */
ON_RESULT_FAILURE {
/* Remove entries for and free any tables. */
while (context.level < KPageTableImpl::EntryLevel_L1) {
/* If the higher-level table has entries, we don't need to do a free. */
if (context.level_entries[context.level + 1]->GetTableReferenceCount() != 0) {
break;
}
/* If there's no table, we also don't need to do a free. */
const KVirtualAddress table = KVirtualAddress(util::AlignDown(reinterpret_cast<uintptr_t>(context.level_entries[context.level]), PageSize));
if (table == Null<KVirtualAddress>) {
break;
}
/* Clear the entry for the table we're removing. */
*context.level_entries[context.level + 1] = InvalidPageTableEntry;
/* Remove the entry for the table one level higher. */
if (context.level + 1 < KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 2]->CloseTableReferences(1);
}
/* Advance our level. */
context.level = static_cast<KPageTableImpl::EntryLevel>(util::ToUnderlying(context.level) + 1);
/* Note that we performed an update and free the table. */
this->NoteUpdated();
this->FreePageTable(page_list, table);
}
};
/* If necessary, allocate page tables for the entry. */
size_t mapping_size = entry.block_size;
while (mapping_size > page_size) {
/* Allocate the table. */
const auto table = AllocatePageTable(page_list, reuse_ll);
R_UNLESS(table != Null<KVirtualAddress>, svc::ResultOutOfResource());
/* Wait for pending stores to complete. */
cpu::DataSynchronizationBarrierInnerShareableStore();
/* Update the block entry to be a table entry. */
*context.level_entries[context.level] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(table), this->IsKernel(), true, 0);
/* Add the entry to the table containing this one. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->OpenTableReferences(1);
}
/* Decrease our level. */
context.level = static_cast<KPageTableImpl::EntryLevel>(util::ToUnderlying(context.level) - 1);
/* Add our new entry to the context. */
context.level_entries[context.level] = GetPointer<PageTableEntry>(table) + impl.GetLevelIndex(virt_addr, context.level);
/* Update our mapping size. */
mapping_size = impl.GetBlockSize(context.level);
}
/* Determine how many pages we can set up on this iteration. */
const size_t block_size = impl.GetBlockSize(context.level);
const size_t max_ptes = (context.level == KPageTableImpl::EntryLevel_L1 ? impl.GetNumL1Entries() : BlocksPerTable) - ((reinterpret_cast<uintptr_t>(context.level_entries[context.level]) / sizeof(PageTableEntry)) & (BlocksPerTable - 1));
const size_t max_pages = (block_size * max_ptes) / PageSize;
const size_t cur_pages = std::min(max_pages, num_pages);
/* Determine the new base attribute. */
const bool contig = page_size >= BlocksPerContiguousBlock * mapping_size;
const size_t num_ptes = cur_pages / (block_size / PageSize);
auto *pte = context.level_entries[context.level];
for (size_t i = 0; i < num_ptes; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + i * block_size, entry_template, sw_reserved_bits, contig, context.level == KPageTableImpl::EntryLevel_L3);
sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead);
}
/* Add the entries to the table containing this one. */
if (context.level != KPageTableImpl::EntryLevel_L1) {
context.level_entries[context.level + 1]->OpenTableReferences(num_ptes);
}
/* Update our context. */
context.is_contiguous = contig;
context.level_entries[context.level] = pte + num_ptes - (contig ? BlocksPerContiguousBlock : 1);
/* Advance our addresses. */
phys_addr += cur_pages * PageSize;
virt_addr += cur_pages * PageSize;
num_pages -= cur_pages;
/* Continue traversal. */
valid = impl.ContinueTraversal(std::addressof(entry), std::addressof(context));
}
/* We mapped, so wait for our writes to take. */
cpu::DataSynchronizationBarrierInnerShareableStore();
R_SUCCEED();
}
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Cache initial addresses for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr;
const KPhysicalAddress orig_phys_addr = phys_addr;
size_t remaining_pages = num_pages;
/* Map the pages, using a guard to ensure we don't leak. */
{
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
if (num_pages < ContiguousPageSize / PageSize) {
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll));
remaining_pages -= num_pages;
virt_addr += num_pages * PageSize;
phys_addr += num_pages * PageSize;
} else {
/* Map the fractional part of the pages. */
size_t alignment;
for (alignment = ContiguousPageSize; (virt_addr & (alignment - 1)) == (phys_addr & (alignment - 1)); alignment = GetLargerAlignment(alignment)) {
/* Check if this would be our last map. */
const size_t pages_to_map = ((alignment - (virt_addr & (alignment - 1))) & (alignment - 1)) / PageSize;
if (pages_to_map + (alignment / PageSize) > remaining_pages) {
break;
}
/* Map pages, if we should. */
if (pages_to_map > 0) {
R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, disable_head_merge && virt_addr == orig_virt_addr, GetSmallerAlignment(alignment), page_list, reuse_ll));
remaining_pages -= pages_to_map;
virt_addr += pages_to_map * PageSize;
phys_addr += pages_to_map * PageSize;
}
/* Don't go further than L1 block. */
if (alignment == L1BlockSize) {
break;
}
}
while (remaining_pages > 0) {
/* Select the next smallest alignment. */
alignment = GetSmallerAlignment(alignment);
MESOSPHERE_ASSERT((virt_addr & (alignment - 1)) == 0);
MESOSPHERE_ASSERT((phys_addr & (alignment - 1)) == 0);
/* Map pages, if we should. */
const size_t pages_to_map = util::AlignDown(remaining_pages, alignment / PageSize);
if (pages_to_map > 0) {
R_TRY(this->Map(virt_addr, phys_addr, pages_to_map, entry_template, disable_head_merge && virt_addr == orig_virt_addr, alignment, page_list, reuse_ll));
remaining_pages -= pages_to_map;
virt_addr += pages_to_map * PageSize;
phys_addr += pages_to_map * PageSize;
}
}
}
}
/* Perform what coalescing we can. */
this->MergePages(orig_virt_addr, num_pages, page_list);
/* Open references to the pages, if we should. */
if (IsHeapPhysicalAddress(orig_phys_addr)) {
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
}
R_SUCCEED();
}
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, bool not_first, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* We want to maintain a new reference to every page in the group. */
KScopedPageGroup spg(pg, not_first);
/* Cache initial address for use on cleanup. */
const KProcessAddress orig_virt_addr = virt_addr;
size_t mapped_pages = 0;
/* Map the pages, using a guard to ensure we don't leak. */
{
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
if (num_pages < ContiguousPageSize / PageSize) {
for (const auto &block : pg) {
const KPhysicalAddress block_phys_addr = block.GetAddress();
const size_t cur_pages = block.GetNumPages();
R_TRY(this->Map(virt_addr, block_phys_addr, cur_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll));
virt_addr += cur_pages * PageSize;
mapped_pages += cur_pages;
}
} else {
/* Create a block representing our virtual space. */
AlignedMemoryBlock virt_block(GetInteger(virt_addr), num_pages, L1BlockSize);
for (const auto &block : pg) {
/* Create a block representing this physical group, synchronize its alignment to our virtual block. */
const KPhysicalAddress block_phys_addr = block.GetAddress();
size_t cur_pages = block.GetNumPages();
AlignedMemoryBlock phys_block(GetInteger(block_phys_addr), cur_pages, virt_block.GetAlignment());
virt_block.SetAlignment(phys_block.GetAlignment());
while (cur_pages > 0) {
/* Find a physical region for us to map at. */
uintptr_t phys_choice = 0;
size_t phys_pages = 0;
phys_block.FindBlock(phys_choice, phys_pages);
/* If we didn't find a region, try decreasing our alignment. */
if (phys_pages == 0) {
const size_t next_alignment = KPageTable::GetSmallerAlignment(phys_block.GetAlignment());
MESOSPHERE_ASSERT(next_alignment >= PageSize);
phys_block.SetAlignment(next_alignment);
virt_block.SetAlignment(next_alignment);
continue;
}
/* Begin choosing virtual blocks to map at the region we chose. */
while (phys_pages > 0) {
/* Find a virtual region for us to map at. */
uintptr_t virt_choice = 0;
size_t virt_pages = phys_pages;
virt_block.FindBlock(virt_choice, virt_pages);
/* If we didn't find a region, try decreasing our alignment. */
if (virt_pages == 0) {
const size_t next_alignment = KPageTable::GetSmallerAlignment(virt_block.GetAlignment());
MESOSPHERE_ASSERT(next_alignment >= PageSize);
phys_block.SetAlignment(next_alignment);
virt_block.SetAlignment(next_alignment);
continue;
}
/* Map! */
R_TRY(this->Map(virt_choice, phys_choice, virt_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, virt_block.GetAlignment(), page_list, reuse_ll));
/* Advance. */
phys_choice += virt_pages * PageSize;
phys_pages -= virt_pages;
cur_pages -= virt_pages;
mapped_pages += virt_pages;
}
}
}
}
}
MESOSPHERE_ASSERT(mapped_pages == num_pages);
/* Perform what coalescing we can. */
this->MergePages(orig_virt_addr, num_pages, page_list);
/* We succeeded! We want to persist the reference to the pages. */
spg.CancelClose();
R_SUCCEED();
}
bool KPageTable::MergePages(TraversalContext *context, PageLinkedList *page_list) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
auto &impl = this->GetImpl();
/* Iteratively merge, until we can't. */
bool merged = false;
while (true) {
/* Try to merge. */
KVirtualAddress freed_table = Null<KVirtualAddress>;
if (!impl.MergePages(std::addressof(freed_table), context)) {
break;
}
/* Note that we updated. */
this->NoteUpdated();
/* Free the page. */
if (freed_table != Null<KVirtualAddress>) {
ClearPageTable(freed_table);
this->FreePageTable(page_list, freed_table);
}
/* We performed at least one merge. */
merged = true;
}
return merged;
}
void KPageTable::MergePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext context;
TraversalEntry entry;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), virt_addr));
/* Merge start of the range. */
this->MergePages(std::addressof(context), page_list);
/* If we have more than one page, do the same for the end of the range. */
if (num_pages > 1) {
/* Begin traversal for end of range. */
const size_t size = num_pages * PageSize;
const auto end_page = virt_addr + size;
const auto last_page = end_page - PageSize;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(context), last_page));
/* Merge. */
this->MergePages(std::addressof(context), page_list);
}
}
Result KPageTable::SeparatePagesImpl(TraversalEntry *entry, TraversalContext *context, KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
auto &impl = this->GetImpl();
/* If at any point we fail, we want to merge. */
ON_RESULT_FAILURE { this->MergePages(context, page_list); };
/* Iterate, separating until our block size is small enough. */
while (entry->block_size > block_size) {
/* If necessary, allocate a table. */
KVirtualAddress table = Null<KVirtualAddress>;
if (!context->is_contiguous) {
table = this->AllocatePageTable(page_list, reuse_ll);
R_UNLESS(table != Null<KVirtualAddress>, svc::ResultOutOfResource());
}
/* Separate. */
impl.SeparatePages(entry, context, virt_addr, GetPointer<PageTableEntry>(table));
this->NoteUpdated();
}
R_SUCCEED();
}
Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
auto &impl = this->GetImpl();
/* Begin traversal. */
TraversalContext start_context;
TraversalEntry entry;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(start_context), virt_addr));
/* Separate pages at the start of the range. */
const size_t size = num_pages * PageSize;
R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(start_context), virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
/* If necessary, separate pages at the end of the range. */
if (num_pages > 1) {
const auto end_page = virt_addr + size;
const auto last_page = end_page - PageSize;
/* Begin traversal. */
TraversalContext end_context;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(entry), std::addressof(end_context), last_page));
ON_RESULT_FAILURE { this->MergePages(std::addressof(start_context), page_list); };
R_TRY(this->SeparatePagesImpl(std::addressof(entry), std::addressof(end_context), last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
}
R_SUCCEED();
}
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, bool flush_mapping, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Ensure there are no pending data writes. */
cpu::DataSynchronizationBarrier();
/* Separate pages before we change permissions. */
R_TRY(this->SeparatePages(virt_addr, num_pages, page_list, reuse_ll));
/* ===================================================== */
/* Define a helper function which will apply our template to entries. */
enum ApplyOption : u32 {
ApplyOption_None = 0,
ApplyOption_FlushDataCache = (1u << 0),
ApplyOption_MergeMappings = (1u << 1),
};
auto ApplyEntryTemplate = [this, virt_addr, disable_merge_attr, num_pages, page_list](PageTableEntry entry_template, u32 apply_option) -> void {
/* Create work variables for us to use. */
const KProcessAddress orig_virt_addr = virt_addr;
const KProcessAddress end_virt_addr = orig_virt_addr + (num_pages * PageSize);
KProcessAddress cur_virt_addr = virt_addr;
size_t remaining_pages = num_pages;
auto &impl = this->GetImpl();
/* Parse the disable merge attrs. */
const bool attr_disable_head = (disable_merge_attr & DisableMergeAttribute_DisableHead) != 0;
const bool attr_disable_head_body = (disable_merge_attr & DisableMergeAttribute_DisableHeadAndBody) != 0;
const bool attr_enable_head_body = (disable_merge_attr & DisableMergeAttribute_EnableHeadAndBody) != 0;
const bool attr_disable_tail = (disable_merge_attr & DisableMergeAttribute_DisableTail) != 0;
const bool attr_enable_tail = (disable_merge_attr & DisableMergeAttribute_EnableTail) != 0;
const bool attr_enable_and_merge = (disable_merge_attr & DisableMergeAttribute_EnableAndMergeHeadBodyTail) != 0;
/* Begin traversal. */
TraversalContext context;
TraversalEntry next_entry;
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_virt_addr));
/* Continue changing properties until we've changed them for all pages. */
bool cleared_disable_merge_bits = false;
while (remaining_pages > 0) {
MESOSPHERE_ABORT_UNLESS(util::IsAligned(GetInteger(next_entry.phys_addr), next_entry.block_size));
MESOSPHERE_ABORT_UNLESS(next_entry.block_size <= remaining_pages * PageSize);
/* Determine if we're at the start. */
const bool is_start = (cur_virt_addr == orig_virt_addr);
const bool is_end = ((cur_virt_addr + next_entry.block_size) == end_virt_addr);
/* Determine the relevant merge attributes. */
bool disable_head_merge, disable_head_body_merge, disable_tail_merge;
if (next_entry.IsHeadMergeDisabled()) {
disable_head_merge = true;
} else if (attr_disable_head) {
disable_head_merge = is_start;
} else {
disable_head_merge = false;
}
if (is_start) {
if (attr_disable_head_body) {
disable_head_body_merge = true;
} else if (attr_enable_head_body) {
disable_head_body_merge = false;
} else {
disable_head_body_merge = (!attr_enable_and_merge && next_entry.IsHeadAndBodyMergeDisabled());
}
} else {
disable_head_body_merge = (!attr_enable_and_merge && next_entry.IsHeadAndBodyMergeDisabled());
cleared_disable_merge_bits |= (attr_enable_and_merge && next_entry.IsHeadAndBodyMergeDisabled());
}
if (is_end) {
if (attr_disable_tail) {
disable_tail_merge = true;
} else if (attr_enable_tail) {
disable_tail_merge = false;
} else {
disable_tail_merge = (!attr_enable_and_merge && next_entry.IsTailMergeDisabled());
}
} else {
disable_tail_merge = (!attr_enable_and_merge && next_entry.IsTailMergeDisabled());
cleared_disable_merge_bits |= (attr_enable_and_merge && next_entry.IsTailMergeDisabled());
}
/* Encode the merge disable flags into the software reserved bits. */
u8 sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(disable_head_merge, disable_head_body_merge, disable_tail_merge);
/* If we should flush entries, do so. */
if ((apply_option & ApplyOption_FlushDataCache) != 0) {
if (IsHeapPhysicalAddress(next_entry.phys_addr)) {
cpu::FlushDataCache(GetVoidPointer(GetHeapVirtualAddress(next_entry.phys_addr)), next_entry.block_size);
}
}
/* Apply the entry template. */
{
const size_t num_entries = context.is_contiguous ? BlocksPerContiguousBlock : 1;
auto * const pte = context.level_entries[context.level];
const size_t block_size = impl.GetBlockSize(context.level);
for (size_t i = 0; i < num_entries; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, next_entry.phys_addr + i * block_size, entry_template, sw_reserved_bits, context.is_contiguous, context.level == KPageTableImpl::EntryLevel_L3);
sw_reserved_bits &= ~(PageTableEntry::SoftwareReservedBit_DisableMergeHead);
}
}
/* If our option asks us to, try to merge mappings. */
bool merge = ((apply_option & ApplyOption_MergeMappings) != 0 || cleared_disable_merge_bits) && next_entry.block_size < L1BlockSize;
if (merge) {
const size_t larger_align = GetLargerAlignment(next_entry.block_size);
if (util::IsAligned(GetInteger(cur_virt_addr) + next_entry.block_size, larger_align)) {
const uintptr_t aligned_start = util::AlignDown(GetInteger(cur_virt_addr), larger_align);
if (orig_virt_addr <= aligned_start && aligned_start + larger_align - 1 < GetInteger(orig_virt_addr) + (num_pages * PageSize) - 1) {
merge = this->MergePages(std::addressof(context), page_list);
} else {
merge = false;
}
} else {
merge = false;
}
}
/* If we merged, correct the traversal to a sane state. */
if (merge) {
/* NOTE: Begin a new traversal, now that we've merged. */
MESOSPHERE_ABORT_UNLESS(impl.BeginTraversal(std::addressof(next_entry), std::addressof(context), cur_virt_addr));
/* The actual size needs to not take into account the portion of the block before our virtual address. */
const size_t actual_size = next_entry.block_size - (GetInteger(next_entry.phys_addr) & (next_entry.block_size - 1));
remaining_pages -= std::min(remaining_pages, actual_size / PageSize);
cur_virt_addr += actual_size;
} else {
/* If we didn't merge, just advance. */
remaining_pages -= next_entry.block_size / PageSize;
cur_virt_addr += next_entry.block_size;
}
/* Continue our traversal. */
if (remaining_pages == 0) {
break;
}
MESOSPHERE_ABORT_UNLESS(impl.ContinueTraversal(std::addressof(next_entry), std::addressof(context)));
}
};
/* ===================================================== */
/* If we don't need to refresh the pages, we can just apply the mappings. */
if (!refresh_mapping) {
ApplyEntryTemplate(entry_template, ApplyOption_None);
this->NoteUpdated();
} else {
/* We need to refresh the mappings. */
/* First, apply the changes without the mapped bit. This will cause all entries to page fault if accessed. */
{
PageTableEntry unmapped_template = entry_template;
unmapped_template.SetMapped(false);
ApplyEntryTemplate(unmapped_template, ApplyOption_MergeMappings);
this->NoteUpdated();
}
/* Next, take and immediately release the scheduler lock. This will force a reschedule. */
{
KScopedSchedulerLock sl;
}
/* Finally, apply the changes as directed, flushing the mappings before they're applied (if we should). */
ApplyEntryTemplate(entry_template, flush_mapping ? ApplyOption_FlushDataCache : ApplyOption_None);
}
/* We've succeeded, now perform what coalescing we can. */
this->MergePages(virt_addr, num_pages, page_list);
R_SUCCEED();
}
void KPageTable::FinalizeUpdateImpl(PageLinkedList *page_list) {
while (page_list->Peek()) {
KVirtualAddress page = KVirtualAddress(page_list->Pop());
MESOSPHERE_ASSERT(this->GetPageTableManager().IsInPageTableHeap(page));
MESOSPHERE_ASSERT(this->GetPageTableManager().GetRefCount(page) == 0);
this->GetPageTableManager().Free(page);
}
}
}
| 52,448
|
C++
|
.cpp
| 853
| 45.3517
| 302
| 0.564442
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,998
|
kern_cpu.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_cpu.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arch::arm64::cpu {
/* Declare prototype to be implemented in asm. */
void SynchronizeAllCoresImpl(s32 *sync_var, s32 num_cores);
namespace {
ALWAYS_INLINE void SetEventLocally() {
__asm__ __volatile__("sevl" ::: "memory");
}
ALWAYS_INLINE void WaitForEvent() {
__asm__ __volatile__("wfe" ::: "memory");
}
class KScopedCoreMigrationDisable {
public:
ALWAYS_INLINE KScopedCoreMigrationDisable() { GetCurrentThread().DisableCoreMigration(); }
ALWAYS_INLINE ~KScopedCoreMigrationDisable() { GetCurrentThread().EnableCoreMigration(); }
};
class KScopedCacheMaintenance {
private:
bool m_active;
public:
ALWAYS_INLINE KScopedCacheMaintenance() {
__asm__ __volatile__("" ::: "memory");
if (m_active = !GetCurrentThread().IsInCacheMaintenanceOperation(); m_active) {
GetCurrentThread().SetInCacheMaintenanceOperation();
}
}
ALWAYS_INLINE ~KScopedCacheMaintenance() {
if (m_active) {
GetCurrentThread().ClearInCacheMaintenanceOperation();
}
__asm__ __volatile__("" ::: "memory");
}
};
/* Nintendo registers a handler for a SGI on thread termination, but does not handle anything. */
/* This is sufficient, because post-interrupt scheduling is all they really intend to occur. */
class KThreadTerminationInterruptHandler : public KInterruptHandler {
public:
constexpr KThreadTerminationInterruptHandler() : KInterruptHandler() { /* ... */ }
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
MESOSPHERE_UNUSED(interrupt_id);
return nullptr;
}
};
class KPerformanceCounterInterruptHandler : public KInterruptHandler {
private:
static constinit inline KLightLock s_lock;
private:
u64 m_counter;
s32 m_which;
bool m_done;
public:
constexpr KPerformanceCounterInterruptHandler() : KInterruptHandler(), m_counter(), m_which(), m_done() { /* ... */ }
static KLightLock &GetLock() { return s_lock; }
void Setup(s32 w) {
m_done = false;
m_which = w;
}
void Wait() {
while (!m_done) {
cpu::Yield();
}
}
u64 GetCounter() const { return m_counter; }
/* Nintendo misuses this per their own API, but it's functional. */
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
MESOSPHERE_UNUSED(interrupt_id);
if (m_which < 0) {
m_counter = cpu::GetCycleCounter();
} else {
m_counter = cpu::GetPerformanceCounter(m_which);
}
DataMemoryBarrierInnerShareable();
m_done = true;
return nullptr;
}
};
class KCoreBarrierInterruptHandler : public KInterruptHandler {
private:
util::Atomic<u64> m_target_cores;
KSpinLock m_lock;
public:
constexpr KCoreBarrierInterruptHandler() : KInterruptHandler(), m_target_cores(0), m_lock() { /* ... */ }
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
MESOSPHERE_UNUSED(interrupt_id);
m_target_cores &= ~(1ul << GetCurrentCoreId());
return nullptr;
}
void SynchronizeCores(u64 core_mask) {
/* Disable dispatch while we synchronize. */
KScopedDisableDispatch dd;
/* Acquire exclusive access to ourselves. */
KScopedSpinLock lk(m_lock);
/* If necessary, force synchronization with other cores. */
if (const u64 other_cores_mask = core_mask & ~(1ul << GetCurrentCoreId()); other_cores_mask != 0) {
/* Send an interrupt to the other cores. */
m_target_cores = other_cores_mask;
cpu::DataSynchronizationBarrierInnerShareable();
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CoreBarrier, other_cores_mask);
/* Wait for all cores to acknowledge. */
{
u64 v;
__asm__ __volatile__("ldaxr %[v], %[p]\n"
"cbz %[v], 1f\n"
"0:\n"
"wfe\n"
"ldaxr %[v], %[p]\n"
"cbnz %[v], 0b\n"
"1:\n"
: [v]"=&r"(v)
: [p]"Q"(*reinterpret_cast<u64 *>(std::addressof(m_target_cores)))
: "memory");
}
}
}
};
class KCacheHelperInterruptHandler : public KInterruptHandler {
private:
static constexpr s32 ThreadPriority = 8;
public:
enum class Operation {
Idle,
InstructionMemoryBarrier,
StoreDataCache,
FlushDataCache,
};
private:
KLightLock m_lock;
KLightLock m_cv_lock;
KLightConditionVariable m_cv;
util::Atomic<u64> m_target_cores;
volatile Operation m_operation;
private:
static void ThreadFunction(uintptr_t _this) {
reinterpret_cast<KCacheHelperInterruptHandler *>(_this)->ThreadFunctionImpl();
}
void ThreadFunctionImpl() {
const u64 core_mask = (1ul << GetCurrentCoreId());
while (true) {
/* Wait for a request to come in. */
{
KScopedLightLock lk(m_cv_lock);
while ((m_target_cores.Load() & core_mask) == 0) {
m_cv.Wait(std::addressof(m_cv_lock));
}
}
/* Process the request. */
this->ProcessOperation();
/* Broadcast, if there's nothing pending. */
{
KScopedLightLock lk(m_cv_lock);
m_target_cores &= ~core_mask;
if (m_target_cores.Load() == 0) {
m_cv.Broadcast();
}
}
}
}
void ProcessOperation();
public:
constexpr KCacheHelperInterruptHandler() : KInterruptHandler(), m_lock(), m_cv_lock(), m_cv(util::ConstantInitialize), m_target_cores(0), m_operation(Operation::Idle) { /* ... */ }
void Initialize(s32 core_id) {
/* Reserve a thread from the system limit. */
MESOSPHERE_ABORT_UNLESS(Kernel::GetSystemResourceLimit().Reserve(ams::svc::LimitableResource_ThreadCountMax, 1));
/* Create a new thread. */
KThread *new_thread = KThread::Create();
MESOSPHERE_ABORT_UNLESS(new_thread != nullptr);
MESOSPHERE_R_ABORT_UNLESS(KThread::InitializeKernelThread(new_thread, ThreadFunction, reinterpret_cast<uintptr_t>(this), ThreadPriority, core_id));
/* Register the new thread. */
KThread::Register(new_thread);
/* Run the thread. */
new_thread->Run();
}
virtual KInterruptTask *OnInterrupt(s32 interrupt_id) override {
MESOSPHERE_UNUSED(interrupt_id);
this->ProcessOperation();
m_target_cores &= ~(1ul << GetCurrentCoreId());
return nullptr;
}
void RequestOperation(Operation op) {
KScopedLightLock lk(m_lock);
/* Create core masks for us to use. */
constexpr u64 AllCoresMask = (1ul << cpu::NumCores) - 1ul;
const u64 other_cores_mask = AllCoresMask & ~(1ul << GetCurrentCoreId());
if ((op == Operation::InstructionMemoryBarrier) || (Kernel::GetState() == Kernel::State::Initializing)) {
/* Check that there's no on-going operation. */
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
MESOSPHERE_ABORT_UNLESS(m_target_cores.Load() == 0);
/* Set operation. */
m_operation = op;
/* For certain operations, we want to send an interrupt. */
m_target_cores = other_cores_mask;
const u64 target_mask = m_target_cores.Load();
DataSynchronizationBarrierInnerShareable();
Kernel::GetInterruptManager().SendInterProcessorInterrupt(KInterruptName_CacheOperation, target_mask);
this->ProcessOperation();
while (m_target_cores.Load() != 0) {
cpu::Yield();
}
/* Go idle again. */
m_operation = Operation::Idle;
} else {
/* Lock condvar so that we can send and wait for acknowledgement of request. */
KScopedLightLock cv_lk(m_cv_lock);
/* Check that there's no on-going operation. */
MESOSPHERE_ABORT_UNLESS(m_operation == Operation::Idle);
MESOSPHERE_ABORT_UNLESS(m_target_cores.Load() == 0);
/* Set operation. */
m_operation = op;
/* Request all cores. */
m_target_cores = AllCoresMask;
/* Use the condvar. */
m_cv.Broadcast();
while (m_target_cores.Load() != 0) {
m_cv.Wait(std::addressof(m_cv_lock));
}
/* Go idle again. */
m_operation = Operation::Idle;
}
}
};
/* Instances of the interrupt handlers. */
constinit KThreadTerminationInterruptHandler g_thread_termination_handler;
constinit KCacheHelperInterruptHandler g_cache_operation_handler;
constinit KCoreBarrierInterruptHandler g_core_barrier_handler;
#if defined(MESOSPHERE_ENABLE_PERFORMANCE_COUNTER)
constinit KPerformanceCounterInterruptHandler g_performance_counter_handler[cpu::NumCores];
#endif
/* Expose this as a global, for asm to use. */
constinit s32 g_all_core_sync_count;
template<typename F>
ALWAYS_INLINE void PerformCacheOperationBySetWayImpl(int level, F f) {
/* Used in multiple locations. */
const u64 level_sel_value = static_cast<u64>(level << 1);
/* Get the cache size id register value with interrupts disabled. */
u64 ccsidr_value;
{
/* Disable interrupts. */
KScopedInterruptDisable di;
/* Configure the cache select register for our level. */
cpu::SetCsselrEl1(level_sel_value);
/* Ensure our configuration takes before reading the cache size id register. */
cpu::InstructionMemoryBarrier();
/* Get the cache size id register. */
ccsidr_value = cpu::GetCcsidrEl1();
}
/* Ensure that no memory inconsistencies occur between cache management invocations. */
cpu::DataSynchronizationBarrier();
/* Get cache size id info. */
CacheSizeIdRegisterAccessor ccsidr_el1(ccsidr_value);
const int num_sets = ccsidr_el1.GetNumberOfSets();
const int num_ways = ccsidr_el1.GetAssociativity();
const int line_size = ccsidr_el1.GetLineSize();
const u64 way_shift = static_cast<u64>(__builtin_clz(num_ways));
const u64 set_shift = static_cast<u64>(line_size + 4);
for (int way = 0; way <= num_ways; way++) {
for (int set = 0; set <= num_sets; set++) {
const u64 way_value = static_cast<u64>(way) << way_shift;
const u64 set_value = static_cast<u64>(set) << set_shift;
f(way_value | set_value | level_sel_value);
}
}
}
ALWAYS_INLINE void FlushDataCacheLineBySetWayImpl(const u64 sw_value) {
__asm__ __volatile__("dc cisw, %[v]" :: [v]"r"(sw_value) : "memory");
}
ALWAYS_INLINE void StoreDataCacheLineBySetWayImpl(const u64 sw_value) {
__asm__ __volatile__("dc csw, %[v]" :: [v]"r"(sw_value) : "memory");
}
void StoreDataCacheBySetWay(int level) {
PerformCacheOperationBySetWayImpl(level, StoreDataCacheLineBySetWayImpl);
}
void FlushDataCacheBySetWay(int level) {
PerformCacheOperationBySetWayImpl(level, FlushDataCacheLineBySetWayImpl);
}
void KCacheHelperInterruptHandler::ProcessOperation() {
switch (m_operation) {
case Operation::Idle:
break;
case Operation::InstructionMemoryBarrier:
InstructionMemoryBarrier();
break;
case Operation::StoreDataCache:
StoreDataCacheBySetWay(0);
cpu::DataSynchronizationBarrier();
break;
case Operation::FlushDataCache:
FlushDataCacheBySetWay(0);
cpu::DataSynchronizationBarrier();
break;
}
}
ALWAYS_INLINE Result InvalidateDataCacheRange(uintptr_t start, uintptr_t end) {
MESOSPHERE_ASSERT(util::IsAligned(start, DataCacheLineSize));
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
R_UNLESS(UserspaceAccess::InvalidateDataCache(start, end), svc::ResultInvalidCurrentMemory());
DataSynchronizationBarrier();
R_SUCCEED();
}
ALWAYS_INLINE Result StoreDataCacheRange(uintptr_t start, uintptr_t end) {
MESOSPHERE_ASSERT(util::IsAligned(start, DataCacheLineSize));
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
R_UNLESS(UserspaceAccess::StoreDataCache(start, end), svc::ResultInvalidCurrentMemory());
DataSynchronizationBarrier();
R_SUCCEED();
}
ALWAYS_INLINE Result FlushDataCacheRange(uintptr_t start, uintptr_t end) {
MESOSPHERE_ASSERT(util::IsAligned(start, DataCacheLineSize));
MESOSPHERE_ASSERT(util::IsAligned(end, DataCacheLineSize));
R_UNLESS(UserspaceAccess::FlushDataCache(start, end), svc::ResultInvalidCurrentMemory());
DataSynchronizationBarrier();
R_SUCCEED();
}
ALWAYS_INLINE void InvalidateEntireInstructionCacheLocalImpl() {
__asm__ __volatile__("ic iallu" ::: "memory");
}
ALWAYS_INLINE void InvalidateEntireInstructionCacheGlobalImpl() {
__asm__ __volatile__("ic ialluis" ::: "memory");
}
}
void SynchronizeCores(u64 core_mask) {
/* Request a core barrier interrupt. */
g_core_barrier_handler.SynchronizeCores(core_mask);
}
void StoreCacheForInit(void *addr, size_t size) {
/* Store the data cache for the specified range. */
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
const uintptr_t end = start + size;
for (uintptr_t cur = start; cur < end; cur += DataCacheLineSize) {
__asm__ __volatile__("dc cvac, %[cur]" :: [cur]"r"(cur) : "memory");
}
/* Data synchronization barrier. */
DataSynchronizationBarrierInnerShareable();
/* Invalidate instruction cache. */
InvalidateEntireInstructionCacheLocalImpl();
/* Ensure local instruction consistency. */
EnsureInstructionConsistency();
}
void FlushEntireDataCache() {
KScopedCoreMigrationDisable dm;
CacheLineIdRegisterAccessor clidr_el1;
const int levels_of_coherency = clidr_el1.GetLevelsOfCoherency();
/* Store cache from L2 up to the level of coherence (if there's an L3 cache or greater). */
for (int level = 2; level < levels_of_coherency; ++level) {
StoreDataCacheBySetWay(level - 1);
}
/* Flush cache from the level of coherence down to L2. */
for (int level = levels_of_coherency; level > 1; --level) {
FlushDataCacheBySetWay(level - 1);
}
/* Data synchronization barrier for full system. */
DataSynchronizationBarrier();
}
Result InvalidateDataCache(void *addr, size_t size) {
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
KScopedCacheMaintenance cm;
const uintptr_t start = reinterpret_cast<uintptr_t>(addr);
const uintptr_t end = start + size;
uintptr_t aligned_start = util::AlignDown(start, DataCacheLineSize);
uintptr_t aligned_end = util::AlignUp(end, DataCacheLineSize);
if (aligned_start != start) {
R_TRY(FlushDataCacheRange(aligned_start, aligned_start + DataCacheLineSize));
aligned_start += DataCacheLineSize;
}
if (aligned_start < aligned_end && (aligned_end != end)) {
aligned_end -= DataCacheLineSize;
R_TRY(FlushDataCacheRange(aligned_end, aligned_end + DataCacheLineSize));
}
if (aligned_start < aligned_end) {
R_TRY(InvalidateDataCacheRange(aligned_start, aligned_end));
}
R_SUCCEED();
}
Result StoreDataCache(const void *addr, size_t size) {
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
KScopedCacheMaintenance cm;
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
R_RETURN(StoreDataCacheRange(start, end));
}
Result FlushDataCache(const void *addr, size_t size) {
/* Mark ourselves as in a cache maintenance operation, and prevent re-ordering. */
KScopedCacheMaintenance cm;
const uintptr_t start = util::AlignDown(reinterpret_cast<uintptr_t>(addr), DataCacheLineSize);
const uintptr_t end = util::AlignUp( reinterpret_cast<uintptr_t>(addr) + size, DataCacheLineSize);
R_RETURN(FlushDataCacheRange(start, end));
}
void InvalidateEntireInstructionCache() {
KScopedCoreMigrationDisable dm;
/* Invalidate the instruction cache on all cores. */
InvalidateEntireInstructionCacheGlobalImpl();
EnsureInstructionConsistency();
/* Request the interrupt helper to perform an instruction memory barrier. */
g_cache_operation_handler.RequestOperation(KCacheHelperInterruptHandler::Operation::InstructionMemoryBarrier);
}
void InitializeInterruptThreads(s32 core_id) {
/* Initialize the cache operation handler. */
g_cache_operation_handler.Initialize(core_id);
/* Bind all handlers to the relevant interrupts. */
Kernel::GetInterruptManager().BindHandler(std::addressof(g_cache_operation_handler), KInterruptName_CacheOperation, core_id, KInterruptController::PriorityLevel_High, false, false);
Kernel::GetInterruptManager().BindHandler(std::addressof(g_thread_termination_handler), KInterruptName_ThreadTerminate, core_id, KInterruptController::PriorityLevel_Scheduler, false, false);
Kernel::GetInterruptManager().BindHandler(std::addressof(g_core_barrier_handler), KInterruptName_CoreBarrier, core_id, KInterruptController::PriorityLevel_Scheduler, false, false);
/* If we should, enable user access to the performance counter registers. */
if (KTargetSystem::IsUserPmuAccessEnabled()) { SetPmUserEnrEl0(1ul); }
/* If we should, enable the kernel performance counter interrupt handler. */
#if defined(MESOSPHERE_ENABLE_PERFORMANCE_COUNTER)
Kernel::GetInterruptManager().BindHandler(std::addressof(g_performance_counter_handler[core_id]), KInterruptName_PerformanceCounter, core_id, KInterruptController::PriorityLevel_Timer, false, false);
#endif
}
void SynchronizeAllCores() {
SynchronizeAllCoresImpl(&g_all_core_sync_count, static_cast<s32>(cpu::NumCores));
}
}
| 22,822
|
C++
|
.cpp
| 427
| 37.564403
| 211
| 0.552869
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
6,999
|
kern_k_page_table_impl.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arch::arm64 {
void KPageTableImpl::InitializeForKernel(void *tb, KVirtualAddress start, KVirtualAddress end) {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = true;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
}
void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = false;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
/* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */
PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table };
u32 level = EntryLevel_L1;
while (level != EntryLevel_L1 || (level_entries[EntryLevel_L1] - static_cast<PageTableEntry *>(m_table)) < m_num_entries) {
/* Get the pte; it must never have the validity-extension flag set. */
auto *pte = level_entries[level];
MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0);
/* While we're a table, recurse, fixing up the reference counts. */
while (level > EntryLevel_L3 && pte->IsMappedTable()) {
/* Count how many references are in the table. */
auto *table = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(pte->GetTable()));
size_t ref_count = 0;
for (size_t i = 0; i < BlocksPerTable; ++i) {
if (table[i].IsMapped()) {
++ref_count;
}
}
/* Set the reference count for our new page, adding one additional uncloseable reference; kernel pages must never be unreferenced. */
pte->SetTableReferenceCount(ref_count + 1).SetValid();
/* Iterate downwards. */
level -= 1;
level_entries[level] = table;
pte = level_entries[level];
/* Check that the entry isn't unexpected. */
MESOSPHERE_ASSERT((pte->GetSoftwareReservedBits() & PageTableEntry::SoftwareReservedBit_Valid) == 0);
}
/* We're dealing with some block. If it's mapped, set it valid. */
if (pte->IsMapped()) {
pte->SetValid();
}
/* Advance. */
while (true) {
/* Advance to the next entry at the current level. */
++level_entries[level];
if (!util::IsAligned(reinterpret_cast<uintptr_t>(++level_entries[level]), PageSize)) {
break;
}
/* If we're at the end of a level, advance upwards. */
level_entries[level++] = nullptr;
if (level > EntryLevel_L1) {
return;
}
}
}
}
L1PageTableEntry *KPageTableImpl::Finalize() {
return m_table;
}
bool KPageTableImpl::BeginTraversal(TraversalEntry *out_entry, TraversalContext *out_context, KProcessAddress address) const {
/* Setup invalid defaults. */
*out_entry = {};
*out_context = {};
/* Validate that we can read the actual entry. */
const size_t l0_index = GetL0Index(address);
const size_t l1_index = GetL1Index(address);
if (m_is_kernel) {
/* Kernel entries must be accessed via TTBR1. */
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) {
return false;
}
} else {
/* User entries must be accessed with TTBR0. */
if ((l0_index != 0) || l1_index >= m_num_entries) {
return false;
}
}
/* Get the L1 entry, and check if it's a table. */
out_context->level_entries[EntryLevel_L1] = this->GetL1Entry(address);
if (out_context->level_entries[EntryLevel_L1]->IsMappedTable()) {
/* Get the L2 entry, and check if it's a table. */
out_context->level_entries[EntryLevel_L2] = this->GetL2EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L1]->GetTable()), address);
if (out_context->level_entries[EntryLevel_L2]->IsMappedTable()) {
/* Get the L3 entry. */
out_context->level_entries[EntryLevel_L3] = this->GetL3EntryFromTable(GetPageTableVirtualAddress(out_context->level_entries[EntryLevel_L2]->GetTable()), address);
/* It's either a page or not. */
out_context->level = EntryLevel_L3;
} else {
/* Not a L2 table, so possibly an L2 block. */
out_context->level = EntryLevel_L2;
}
} else {
/* Not a L1 table, so possibly an L1 block. */
out_context->level = EntryLevel_L1;
}
/* Determine other fields. */
const auto *pte = out_context->level_entries[out_context->level];
out_context->is_contiguous = pte->IsContiguous();
out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits();
out_entry->attr = 0;
out_entry->phys_addr = this->GetBlock(pte, out_context->level) + this->GetOffset(address, out_context->level);
out_entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * out_context->level + 4 * out_context->is_contiguous);
return out_context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock();
}
bool KPageTableImpl::ContinueTraversal(TraversalEntry *out_entry, TraversalContext *context) const {
/* Advance entry. */
auto *cur_pte = context->level_entries[context->level];
auto *next_pte = reinterpret_cast<PageTableEntry *>(context->is_contiguous ? util::AlignDown(reinterpret_cast<uintptr_t>(cur_pte), BlocksPerContiguousBlock * sizeof(PageTableEntry)) + BlocksPerContiguousBlock * sizeof(PageTableEntry) : reinterpret_cast<uintptr_t>(cur_pte) + sizeof(PageTableEntry));
/* Set the pte. */
context->level_entries[context->level] = next_pte;
/* Advance appropriately. */
while (context->level < EntryLevel_L1 && util::IsAligned(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), PageSize)) {
/* Advance the above table by one entry. */
context->level_entries[context->level + 1]++;
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) + 1);
}
/* Check if we've hit the end of the L1 table. */
if (context->level == EntryLevel_L1) {
if (context->level_entries[EntryLevel_L1] - static_cast<const PageTableEntry *>(m_table) >= m_num_entries) {
*context = {};
*out_entry = {};
return false;
}
}
/* We may have advanced to a new table, and if we have we should descend. */
while (context->level > EntryLevel_L3 && context->level_entries[context->level]->IsMappedTable()) {
context->level_entries[context->level - 1] = GetPointer<PageTableEntry>(GetPageTableVirtualAddress(context->level_entries[context->level]->GetTable()));
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) - 1);
}
const auto *pte = context->level_entries[context->level];
context->is_contiguous = pte->IsContiguous();
out_entry->sw_reserved_bits = pte->GetSoftwareReservedBits();
out_entry->attr = 0;
out_entry->phys_addr = this->GetBlock(pte, context->level);
out_entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);
return context->level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock();
}
bool KPageTableImpl::GetPhysicalAddress(KPhysicalAddress *out, KProcessAddress address) const {
/* Validate that we can read the actual entry. */
const size_t l0_index = GetL0Index(address);
const size_t l1_index = GetL1Index(address);
if (m_is_kernel) {
/* Kernel entries must be accessed via TTBR1. */
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) {
return false;
}
} else {
/* User entries must be accessed with TTBR0. */
if ((l0_index != 0) || l1_index >= m_num_entries) {
return false;
}
}
/* Get the L1 entry, and check if it's a table. */
const PageTableEntry *pte = this->GetL1Entry(address);
EntryLevel level = EntryLevel_L1;
if (pte->IsMappedTable()) {
/* Get the L2 entry, and check if it's a table. */
pte = this->GetL2EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address);
level = EntryLevel_L2;
if (pte->IsMappedTable()) {
pte = this->GetL3EntryFromTable(GetPageTableVirtualAddress(pte->GetTable()), address);
level = EntryLevel_L3;
}
}
const bool is_block = level == EntryLevel_L3 ? pte->IsPage() : pte->IsBlock();
if (is_block) {
*out = this->GetBlock(pte, level) + this->GetOffset(address, level);
} else {
*out = Null<KPhysicalAddress>;
}
return is_block;
}
bool KPageTableImpl::MergePages(KVirtualAddress *out, TraversalContext *context) {
/* We want to upgrade the pages by one step. */
if (context->is_contiguous) {
/* We can't merge an L1 table. */
if (context->level == EntryLevel_L1) {
return false;
}
/* We want to upgrade a contiguous mapping in a table to a block. */
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerTable * sizeof(PageTableEntry)));
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(static_cast<EntryLevel>(context->level + 1), false));
/* First, check that all entries are valid for us to merge. */
const u64 entry_template = pte->GetEntryTemplateForMerge();
for (size_t i = 0; i < BlocksPerTable; ++i) {
if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | PageTableEntry::ContigType_Contiguous | pte->GetTestTableMask())) {
return false;
}
if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) {
return false;
}
if (i < BlocksPerTable - 1 && pte[i].IsTailMergeDisabled()) {
return false;
}
}
/* The entries are valid for us to merge, so merge them. */
const auto *head_pte = pte;
const auto *tail_pte = pte + BlocksPerTable - 1;
const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled());
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr, PageTableEntry(entry_template), sw_reserved_bits, false, false);
/* Update our context. */
context->is_contiguous = false;
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) + 1);
/* Set the output to the table we just freed. */
*out = KVirtualAddress(pte);
} else {
/* We want to upgrade a non-contiguous mapping to a contiguous mapping. */
PageTableEntry *pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
const KPhysicalAddress phys_addr = util::AlignDown(GetBlock(pte, context->level), GetBlockSize(context->level, true));
/* First, check that all entries are valid for us to merge. */
const u64 entry_template = pte->GetEntryTemplateForMerge();
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
if (!pte[i].IsForMerge(entry_template | GetInteger(phys_addr + (i << (PageBits + LevelBits * context->level))) | pte->GetTestTableMask())) {
return false;
}
if (i > 0 && pte[i].IsHeadOrHeadAndBodyMergeDisabled()) {
return false;
}
if (i < BlocksPerContiguousBlock - 1 && pte[i].IsTailMergeDisabled()) {
return false;
}
}
/* The entries are valid for us to merge, so merge them. */
const auto *head_pte = pte;
const auto *tail_pte = pte + BlocksPerContiguousBlock - 1;
const auto sw_reserved_bits = PageTableEntry::EncodeSoftwareReservedBits(head_pte->IsHeadMergeDisabled(), head_pte->IsHeadAndBodyMergeDisabled(), tail_pte->IsTailMergeDisabled());
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, phys_addr + (i << (PageBits + LevelBits * context->level)), PageTableEntry(entry_template), sw_reserved_bits, true, context->level == EntryLevel_L3);
}
/* Update our context. */
context->level_entries[context->level] = pte;
context->is_contiguous = true;
}
return true;
}
void KPageTableImpl::SeparatePages(TraversalEntry *entry, TraversalContext *context, KProcessAddress address, PageTableEntry *pte) const {
/* We want to downgrade the pages by one step. */
if (context->is_contiguous) {
/* We want to downgrade a contiguous mapping to a non-contiguous mapping. */
pte = reinterpret_cast<PageTableEntry *>(util::AlignDown(reinterpret_cast<uintptr_t>(context->level_entries[context->level]), BlocksPerContiguousBlock * sizeof(PageTableEntry)));
auto * const first = pte;
const KPhysicalAddress block = this->GetBlock(first, context->level);
for (size_t i = 0; i < BlocksPerContiguousBlock; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * context->level)), PageTableEntry(first->GetEntryTemplateForSeparateContiguous(i)), PageTableEntry::SeparateContiguousTag{});
}
context->is_contiguous = false;
context->level_entries[context->level] = pte + (this->GetLevelIndex(address, context->level) & (BlocksPerContiguousBlock - 1));
} else {
/* We want to downgrade a block into a table. */
auto * const first = context->level_entries[context->level];
const KPhysicalAddress block = this->GetBlock(first, context->level);
for (size_t i = 0; i < BlocksPerTable; ++i) {
pte[i] = PageTableEntry(PageTableEntry::BlockTag{}, block + (i << (PageBits + LevelBits * (context->level - 1))), PageTableEntry(first->GetEntryTemplateForSeparate(i)), PageTableEntry::SoftwareReservedBit_None, true, context->level - 1 == EntryLevel_L3);
}
context->is_contiguous = true;
context->level = static_cast<EntryLevel>(util::ToUnderlying(context->level) - 1);
/* Wait for pending stores to complete. */
cpu::DataSynchronizationBarrierInnerShareableStore();
/* Update the block entry to be a table entry. */
*context->level_entries[context->level + 1] = PageTableEntry(PageTableEntry::TableTag{}, KPageTable::GetPageTablePhysicalAddress(KVirtualAddress(pte)), m_is_kernel, true, BlocksPerTable);
context->level_entries[context->level] = pte + this->GetLevelIndex(address, context->level);
}
entry->sw_reserved_bits = 0;
entry->attr = 0;
entry->phys_addr = this->GetBlock(context->level_entries[context->level], context->level) + this->GetOffset(address, context->level);
entry->block_size = static_cast<size_t>(1) << (PageBits + LevelBits * context->level + 4 * context->is_contiguous);
}
void KPageTableImpl::Dump(uintptr_t start, size_t size) const {
/* If zero size, there's nothing to dump. */
if (size == 0) {
return;
}
/* Define extents. */
const uintptr_t end = start + size;
const uintptr_t last = end - 1;
/* Define tracking variables. */
bool unmapped = false;
uintptr_t unmapped_start = 0;
/* Walk the table. */
uintptr_t cur = start;
while (cur < end) {
/* Validate that we can read the actual entry. */
const size_t l0_index = GetL0Index(cur);
const size_t l1_index = GetL1Index(cur);
if (m_is_kernel) {
/* Kernel entries must be accessed via TTBR1. */
if ((l0_index != MaxPageTableEntries - 1) || (l1_index < MaxPageTableEntries - m_num_entries)) {
return;
}
} else {
/* User entries must be accessed with TTBR0. */
if ((l0_index != 0) || l1_index >= m_num_entries) {
return;
}
}
/* Try to get from l1 table. */
const L1PageTableEntry *l1_entry = this->GetL1Entry(cur);
if (l1_entry->IsBlock()) {
/* Update. */
cur = util::AlignDown(cur, L1BlockSize);
if (unmapped) {
unmapped = false;
MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, cur - 1);
}
/* Print. */
MESOSPHERE_RELEASE_LOG("%016lx: %016lx PA=%p SZ=1G Mapped=%d UXN=%d PXN=%d Cont=%d nG=%d AF=%d SH=%x RO=%d UA=%d NS=%d AttrIndx=%d NoMerge=%d,%d,%d\n", cur,
*reinterpret_cast<const u64 *>(l1_entry),
reinterpret_cast<void *>(GetInteger(l1_entry->GetBlock())),
l1_entry->IsMapped(),
l1_entry->IsUserExecuteNever(),
l1_entry->IsPrivilegedExecuteNever(),
l1_entry->IsContiguous(),
!l1_entry->IsGlobal(),
static_cast<int>(l1_entry->GetAccessFlagInteger()),
static_cast<unsigned int>(l1_entry->GetShareableInteger()),
l1_entry->IsReadOnly(),
l1_entry->IsUserAccessible(),
l1_entry->IsNonSecure(),
static_cast<int>(l1_entry->GetPageAttributeInteger()),
l1_entry->IsHeadMergeDisabled(),
l1_entry->IsHeadAndBodyMergeDisabled(),
l1_entry->IsTailMergeDisabled());
/* Advance. */
cur += L1BlockSize;
continue;
} else if (!l1_entry->IsTable()) {
/* Update. */
cur = util::AlignDown(cur, L1BlockSize);
if (!unmapped) {
unmapped_start = cur;
unmapped = true;
}
/* Advance. */
cur += L1BlockSize;
continue;
}
/* Try to get from l2 table. */
const L2PageTableEntry *l2_entry = this->GetL2Entry(l1_entry, cur);
if (l2_entry->IsBlock()) {
/* Update. */
cur = util::AlignDown(cur, L2BlockSize);
if (unmapped) {
unmapped = false;
MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, cur - 1);
}
/* Print. */
MESOSPHERE_RELEASE_LOG("%016lx: %016lx PA=%p SZ=2M Mapped=%d UXN=%d PXN=%d Cont=%d nG=%d AF=%d SH=%x RO=%d UA=%d NS=%d AttrIndx=%d NoMerge=%d,%d,%d\n", cur,
*reinterpret_cast<const u64 *>(l2_entry),
reinterpret_cast<void *>(GetInteger(l2_entry->GetBlock())),
l2_entry->IsMapped(),
l2_entry->IsUserExecuteNever(),
l2_entry->IsPrivilegedExecuteNever(),
l2_entry->IsContiguous(),
!l2_entry->IsGlobal(),
static_cast<int>(l2_entry->GetAccessFlagInteger()),
static_cast<unsigned int>(l2_entry->GetShareableInteger()),
l2_entry->IsReadOnly(),
l2_entry->IsUserAccessible(),
l2_entry->IsNonSecure(),
static_cast<int>(l2_entry->GetPageAttributeInteger()),
l2_entry->IsHeadMergeDisabled(),
l2_entry->IsHeadAndBodyMergeDisabled(),
l2_entry->IsTailMergeDisabled());
/* Advance. */
cur += L2BlockSize;
continue;
} else if (!l2_entry->IsTable()) {
/* Update. */
cur = util::AlignDown(cur, L2BlockSize);
if (!unmapped) {
unmapped_start = cur;
unmapped = true;
}
/* Advance. */
cur += L2BlockSize;
continue;
}
/* Try to get from l3 table. */
const L3PageTableEntry *l3_entry = this->GetL3Entry(l2_entry, cur);
if (l3_entry->IsBlock()) {
/* Update. */
cur = util::AlignDown(cur, L3BlockSize);
if (unmapped) {
unmapped = false;
MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, cur - 1);
}
/* Print. */
MESOSPHERE_RELEASE_LOG("%016lx: %016lx PA=%p SZ=4K Mapped=%d UXN=%d PXN=%d Cont=%d nG=%d AF=%d SH=%x RO=%d UA=%d NS=%d AttrIndx=%d NoMerge=%d,%d,%d\n", cur,
*reinterpret_cast<const u64 *>(l3_entry),
reinterpret_cast<void *>(GetInteger(l3_entry->GetBlock())),
l3_entry->IsMapped(),
l3_entry->IsUserExecuteNever(),
l3_entry->IsPrivilegedExecuteNever(),
l3_entry->IsContiguous(),
!l3_entry->IsGlobal(),
static_cast<int>(l3_entry->GetAccessFlagInteger()),
static_cast<unsigned int>(l3_entry->GetShareableInteger()),
l3_entry->IsReadOnly(),
l3_entry->IsUserAccessible(),
l3_entry->IsNonSecure(),
static_cast<int>(l3_entry->GetPageAttributeInteger()),
l3_entry->IsHeadMergeDisabled(),
l3_entry->IsHeadAndBodyMergeDisabled(),
l3_entry->IsTailMergeDisabled());
/* Advance. */
cur += L3BlockSize;
continue;
} else {
/* Update. */
cur = util::AlignDown(cur, L3BlockSize);
if (!unmapped) {
unmapped_start = cur;
unmapped = true;
}
/* Advance. */
cur += L3BlockSize;
continue;
}
}
/* Print the last unmapped range if necessary. */
if (unmapped) {
MESOSPHERE_RELEASE_LOG("%016lx - %016lx: not mapped\n", unmapped_start, last);
}
}
size_t KPageTableImpl::CountPageTables() const {
size_t num_tables = 0;
#if defined(MESOSPHERE_BUILD_FOR_DEBUGGING)
{
++num_tables;
for (size_t l1_index = 0; l1_index < m_num_entries; ++l1_index) {
auto &l1_entry = m_table[l1_index];
if (l1_entry.IsTable()) {
++num_tables;
for (size_t l2_index = 0; l2_index < MaxPageTableEntries; ++l2_index) {
auto *l2_entry = GetPointer<L2PageTableEntry>(GetTableEntry(KMemoryLayout::GetLinearVirtualAddress(l1_entry.GetTable()), l2_index));
if (l2_entry->IsTable()) {
++num_tables;
}
}
}
}
}
#endif
return num_tables;
}
}
| 32,754
|
C++
|
.cpp
| 461
| 41.78308
| 307
| 0.437112
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,000
|
kern_exception_handlers.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_exception_handlers.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
void RestoreContext(uintptr_t sp);
}
namespace ams::kern::arch::arm64 {
namespace {
enum EsrEc : u32 {
EsrEc_Unknown = 0b000000,
EsrEc_WaitForInterruptOrEvent = 0b000001,
EsrEc_Cp15McrMrc = 0b000011,
EsrEc_Cp15McrrMrrc = 0b000100,
EsrEc_Cp14McrMrc = 0b000101,
EsrEc_FpAccess = 0b000111,
EsrEc_Cp14Mrrc = 0b001100,
EsrEc_BranchTarget = 0b001101,
EsrEc_IllegalExecution = 0b001110,
EsrEc_Svc32 = 0b010001,
EsrEc_Svc64 = 0b010101,
EsrEc_SystemInstruction64 = 0b011000,
EsrEc_SveZen = 0b011001,
EsrEc_PointerAuthInstruction = 0b011100,
EsrEc_InstructionAbortEl0 = 0b100000,
EsrEc_InstructionAbortEl1 = 0b100001,
EsrEc_PcAlignmentFault = 0b100010,
EsrEc_DataAbortEl0 = 0b100100,
EsrEc_DataAbortEl1 = 0b100101,
EsrEc_SpAlignmentFault = 0b100110,
EsrEc_FpException32 = 0b101000,
EsrEc_FpException64 = 0b101100,
EsrEc_SErrorInterrupt = 0b101111,
EsrEc_BreakPointEl0 = 0b110000,
EsrEc_BreakPointEl1 = 0b110001,
EsrEc_SoftwareStepEl0 = 0b110010,
EsrEc_SoftwareStepEl1 = 0b110011,
EsrEc_WatchPointEl0 = 0b110100,
EsrEc_WatchPointEl1 = 0b110101,
EsrEc_BkptInstruction = 0b111000,
EsrEc_BrkInstruction = 0b111100,
};
u32 GetInstructionDataSupervisorMode(const KExceptionContext *context, u64 esr) {
/* Check for THUMB usermode */
if ((context->psr & 0x3F) == 0x30) {
u32 insn = *reinterpret_cast<u16 *>(context->pc & ~0x1);
/* Check if the instruction was 32-bit. */
if ((esr >> 25) & 1) {
insn = (insn << 16) | *reinterpret_cast<u16 *>((context->pc & ~0x1) + sizeof(u16));
}
return insn;
} else {
/* Not thumb, so just get the instruction. */
return *reinterpret_cast<u32 *>(context->pc);
}
}
u32 GetInstructionDataUserMode(const KExceptionContext *context) {
/* Check for THUMB usermode */
u32 insn = 0;
if ((context->psr & 0x3F) == 0x30) {
u16 insn_high = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_high), reinterpret_cast<u16 *>(context->pc & ~0x1), sizeof(insn_high))) {
insn = insn_high;
/* Check if the instruction was a THUMB mode branch prefix. */
if (((insn >> 11) & 0b11110) == 0b11110) {
u16 insn_low = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_low), reinterpret_cast<u16 *>((context->pc & ~0x1) + sizeof(u16)), sizeof(insn_low))) {
insn = (static_cast<u32>(insn_high) << 16) | (static_cast<u32>(insn_low) << 0);
} else {
insn = 0;
}
}
} else {
insn = 0;
}
} else {
u32 insn_value = 0;
if (UserspaceAccess::CopyMemoryFromUser(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc), sizeof(insn_value))) {
insn = insn_value;
} else if (KTargetSystem::IsDebugMode() && (context->pc & 3) == 0 && UserspaceAccess::CopyMemoryFromUserSize32BitWithSupervisorAccess(std::addressof(insn_value), reinterpret_cast<u32 *>(context->pc))) {
insn = insn_value;
} else {
insn = 0;
}
}
return insn;
}
void HandleUserException(KExceptionContext *context, u64 esr, u64 far, u64 afsr0, u64 afsr1, u32 data) {
KProcess &cur_process = GetCurrentProcess();
bool should_process_user_exception = KTargetSystem::IsUserExceptionHandlersEnabled();
const u64 ec = (esr >> 26) & 0x3F;
/* In the event that we return from this exception, we want SPSR.SS set so that we advance an instruction if single-stepping. */
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
context->psr |= (1ul << 21);
#endif
/* If we should process the user exception (and it's not a breakpoint), try to enter. */
const bool is_software_break = (ec == EsrEc_Unknown || ec == EsrEc_IllegalExecution || ec == EsrEc_BkptInstruction || ec == EsrEc_BrkInstruction);
const bool is_breakpoint = (ec == EsrEc_BreakPointEl0 || ec == EsrEc_SoftwareStepEl0 || ec == EsrEc_WatchPointEl0);
if ((should_process_user_exception) &&
!(is_software_break && cur_process.IsAttachedToDebugger() && KDebug::IsBreakInstruction(data, context->psr)) &&
!(is_breakpoint))
{
if (cur_process.EnterUserException()) {
/* Fill out the exception info. */
const bool is_aarch64 = (context->psr & 0x10) == 0;
if (is_aarch64) {
/* 64-bit. */
ams::svc::aarch64::ExceptionInfo *info = std::addressof(static_cast<ams::svc::aarch64::ProcessLocalRegion *>(cur_process.GetProcessLocalRegionHeapAddress())->exception_info);
for (size_t i = 0; i < util::size(info->r); ++i) {
info->r[i] = context->x[i];
}
info->sp = context->sp;
info->lr = context->x[30];
info->pc = context->pc;
info->pstate = (context->psr & cpu::El0Aarch64PsrMask);
info->afsr0 = afsr0;
info->afsr1 = afsr1;
info->esr = esr;
info->far = far;
} else {
/* 32-bit. */
ams::svc::aarch32::ExceptionInfo *info = std::addressof(static_cast<ams::svc::aarch32::ProcessLocalRegion *>(cur_process.GetProcessLocalRegionHeapAddress())->exception_info);
for (size_t i = 0; i < util::size(info->r); ++i) {
info->r[i] = context->x[i];
}
info->sp = context->x[13];
info->lr = context->x[14];
info->pc = context->pc;
info->flags = 1;
info->status_64.pstate = (context->psr & cpu::El0Aarch32PsrMask);
info->status_64.afsr0 = afsr0;
info->status_64.afsr1 = afsr1;
info->status_64.esr = esr;
info->status_64.far = far;
}
/* Save the debug parameters to the current thread. */
GetCurrentThread().SaveDebugParams(far, esr, data);
/* Get the exception type. */
u32 type;
switch (ec) {
case EsrEc_Unknown:
case EsrEc_IllegalExecution:
case EsrEc_Cp15McrMrc:
case EsrEc_Cp15McrrMrrc:
case EsrEc_Cp14McrMrc:
case EsrEc_Cp14Mrrc:
case EsrEc_SystemInstruction64:
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
type = ams::svc::ExceptionType_InstructionAbort;
break;
case EsrEc_PcAlignmentFault:
type = ams::svc::ExceptionType_UnalignedInstruction;
break;
case EsrEc_SpAlignmentFault:
type = ams::svc::ExceptionType_UnalignedData;
break;
case EsrEc_Svc32:
case EsrEc_Svc64:
type = ams::svc::ExceptionType_InvalidSystemCall;
break;
case EsrEc_SErrorInterrupt:
type = ams::svc::ExceptionType_MemorySystemError;
break;
case EsrEc_InstructionAbortEl0:
type = ams::svc::ExceptionType_InstructionAbort;
break;
case EsrEc_DataAbortEl0:
/* If esr.IFSC is "Alignment Fault", return UnalignedData instead of DataAbort. */
if ((esr & 0x3F) == 0b100001) {
type = ams::svc::ExceptionType_UnalignedData;
} else {
type = ams::svc::ExceptionType_DataAbort;
}
break;
default:
type = ams::svc::ExceptionType_DataAbort;
break;
}
/* We want to enter at the process entrypoint, with x0 = type. */
context->pc = GetInteger(cur_process.GetEntryPoint());
context->x[0] = type;
if (is_aarch64) {
context->x[1] = GetInteger(cur_process.GetProcessLocalRegionAddress() + AMS_OFFSETOF(ams::svc::aarch64::ProcessLocalRegion, exception_info));
const auto *plr = GetPointer<ams::svc::aarch64::ProcessLocalRegion>(cur_process.GetProcessLocalRegionAddress());
context->sp = util::AlignDown(reinterpret_cast<uintptr_t>(plr->data) + sizeof(plr->data), 0x10);
context->psr = 0;
} else {
context->x[1] = GetInteger(cur_process.GetProcessLocalRegionAddress() + AMS_OFFSETOF(ams::svc::aarch32::ProcessLocalRegion, exception_info));
const auto *plr = GetPointer<ams::svc::aarch32::ProcessLocalRegion>(cur_process.GetProcessLocalRegionAddress());
context->x[13] = util::AlignDown(reinterpret_cast<uintptr_t>(plr->data) + sizeof(plr->data), 0x08);
context->psr = 0x10;
}
/* Process that we're entering a usermode exception on the current thread. */
GetCurrentThread().OnEnterUsermodeException();
return;
}
}
/* If we should, clear the thread's state as single-step. */
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
if (AMS_UNLIKELY(GetCurrentThread().IsHardwareSingleStep())) {
GetCurrentThread().ClearHardwareSingleStep();
cpu::MonitorDebugSystemControlRegisterAccessor().SetSoftwareStep(false).Store();
cpu::InstructionMemoryBarrier();
}
#endif
{
/* Collect additional information based on the ec. */
uintptr_t params[3] = {};
switch (ec) {
case EsrEc_Unknown:
case EsrEc_IllegalExecution:
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
{
params[0] = ams::svc::DebugException_UndefinedInstruction;
params[1] = far;
params[2] = data;
}
break;
case EsrEc_PcAlignmentFault:
case EsrEc_SpAlignmentFault:
{
params[0] = ams::svc::DebugException_AlignmentFault;
params[1] = far;
}
break;
case EsrEc_Svc32:
case EsrEc_Svc64:
{
params[0] = ams::svc::DebugException_UndefinedSystemCall;
params[1] = far;
params[2] = (esr & 0xFF);
}
break;
case EsrEc_BreakPointEl0:
case EsrEc_SoftwareStepEl0:
{
params[0] = ams::svc::DebugException_BreakPoint;
params[1] = far;
params[2] = ams::svc::BreakPointType_HardwareInstruction;
}
break;
case EsrEc_WatchPointEl0:
{
params[0] = ams::svc::DebugException_BreakPoint;
params[1] = far;
params[2] = ams::svc::BreakPointType_HardwareData;
}
break;
case EsrEc_SErrorInterrupt:
{
params[0] = ams::svc::DebugException_MemorySystemError;
params[1] = far;
}
break;
case EsrEc_InstructionAbortEl0:
{
params[0] = ams::svc::DebugException_InstructionAbort;
params[1] = far;
}
break;
case EsrEc_DataAbortEl0:
default:
{
params[0] = ams::svc::DebugException_DataAbort;
params[1] = far;
}
break;
}
/* Process the debug event. */
Result result = KDebug::OnDebugEvent(ams::svc::DebugEvent_Exception, params, util::size(params));
/* If we should stop processing the exception, do so. */
if (svc::ResultStopProcessingException::Includes(result)) {
return;
}
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
{
if (ec != EsrEc_SoftwareStepEl0) {
/* If the exception wasn't single-step, print details. */
MESOSPHERE_EXCEPTION_LOG("Exception occurred. ");
{
/* Print the current thread's registers. */
KDebug::PrintRegister();
/* Print a backtrace. */
KDebug::PrintBacktrace();
}
} else {
/* If the exception was single-step and we have no debug object, we should just return. */
if (AMS_UNLIKELY(!cur_process.IsAttachedToDebugger())) {
return;
}
}
}
#else
{
/* Print that an exception occurred. */
MESOSPHERE_EXCEPTION_LOG("Exception occurred. ");
{
/* Print the current thread's registers. */
KDebug::PrintRegister();
/* Print a backtrace. */
KDebug::PrintBacktrace();
}
}
#endif
/* If the SVC is handled, handle it. */
if (!svc::ResultNotHandled::Includes(result)) {
/* If we successfully enter jit debug, stop processing the exception. */
if (cur_process.EnterJitDebug(ams::svc::DebugEvent_Exception, static_cast<ams::svc::DebugException>(params[0]), params[1], params[2])) {
return;
}
}
}
/* Exit the current process. */
cur_process.Exit();
}
}
/* NOTE: This function is called from ASM. */
void FpuContextSwitchHandler() {
KThreadContext::FpuContextSwitchHandler(GetCurrentThreadPointer());
}
/* NOTE: This function is called from ASM. */
void ReturnFromException(Result user_result) {
/* Get the current thread. */
KThread *cur_thread = GetCurrentThreadPointer();
/* Get the current exception context. */
KExceptionContext *e_ctx = GetExceptionContext(cur_thread);
/* Get the current process. */
KProcess &cur_process = GetCurrentProcess();
/* Read the exception info that userland put in tls. */
union {
ams::svc::aarch64::ExceptionInfo info64;
ams::svc::aarch32::ExceptionInfo info32;
} info = {};
const bool is_aarch64 = (e_ctx->psr & 0x10) == 0;
if (is_aarch64) {
/* We're 64-bit. */
info.info64 = static_cast<const ams::svc::aarch64::ProcessLocalRegion *>(cur_process.GetProcessLocalRegionHeapAddress())->exception_info;
} else {
/* We're 32-bit. */
info.info32 = static_cast<const ams::svc::aarch32::ProcessLocalRegion *>(cur_process.GetProcessLocalRegionHeapAddress())->exception_info;
}
/* Try to leave the user exception. */
if (cur_process.LeaveUserException()) {
/* Process that we're leaving a usermode exception on the current thread. */
GetCurrentThread().OnLeaveUsermodeException();
/* Copy the user context to the thread context. */
if (is_aarch64) {
for (size_t i = 0; i < util::size(info.info64.r); ++i) {
e_ctx->x[i] = info.info64.r[i];
}
e_ctx->x[30] = info.info64.lr;
e_ctx->sp = info.info64.sp;
e_ctx->pc = info.info64.pc;
e_ctx->psr = (info.info64.pstate & cpu::El0Aarch64PsrMask) | (e_ctx->psr & ~cpu::El0Aarch64PsrMask);
} else {
for (size_t i = 0; i < util::size(info.info32.r); ++i) {
e_ctx->x[i] = info.info32.r[i];
}
e_ctx->x[14] = info.info32.lr;
e_ctx->x[13] = info.info32.sp;
e_ctx->pc = info.info32.pc;
e_ctx->psr = (info.info32.status_64.pstate & cpu::El0Aarch32PsrMask) | (e_ctx->psr & ~cpu::El0Aarch32PsrMask);
}
/* Note that PC was adjusted. */
e_ctx->write = 1;
if (R_SUCCEEDED(user_result)) {
/* If result handling succeeded, just restore the context. */
svc::RestoreContext(reinterpret_cast<uintptr_t>(e_ctx));
} else {
/* Restore the debug params for the exception. */
uintptr_t far, esr, data;
GetCurrentThread().RestoreDebugParams(std::addressof(far), std::addressof(esr), std::addressof(data));
/* Collect additional information based on the ec. */
uintptr_t params[3] = {};
switch ((esr >> 26) & 0x3F) {
case EsrEc_Unknown:
case EsrEc_IllegalExecution:
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
{
params[0] = ams::svc::DebugException_UndefinedInstruction;
params[1] = far;
params[2] = data;
}
break;
case EsrEc_PcAlignmentFault:
case EsrEc_SpAlignmentFault:
{
params[0] = ams::svc::DebugException_AlignmentFault;
params[1] = far;
}
break;
case EsrEc_Svc32:
case EsrEc_Svc64:
{
params[0] = ams::svc::DebugException_UndefinedSystemCall;
params[1] = far;
params[2] = (esr & 0xFF);
}
break;
case EsrEc_SErrorInterrupt:
{
params[0] = ams::svc::DebugException_MemorySystemError;
params[1] = far;
}
break;
case EsrEc_InstructionAbortEl0:
{
params[0] = ams::svc::DebugException_InstructionAbort;
params[1] = far;
}
break;
case EsrEc_DataAbortEl0:
default:
{
params[0] = ams::svc::DebugException_DataAbort;
params[1] = far;
}
break;
}
/* Process the debug event. */
Result result = KDebug::OnDebugEvent(ams::svc::DebugEvent_Exception, params, util::size(params));
/* If the SVC is handled, handle it. */
if (!svc::ResultNotHandled::Includes(result)) {
/* If we should stop processing the exception, restore. */
if (svc::ResultStopProcessingException::Includes(result)) {
svc::RestoreContext(reinterpret_cast<uintptr_t>(e_ctx));
}
/* If we successfully enter jit debug, restore. */
if (cur_process.EnterJitDebug(ams::svc::DebugEvent_Exception, static_cast<ams::svc::DebugException>(params[0]), params[1], params[2])) {
svc::RestoreContext(reinterpret_cast<uintptr_t>(e_ctx));
}
}
/* Otherwise, if result debug was returned, restore. */
if (svc::ResultDebug::Includes(result)) {
svc::RestoreContext(reinterpret_cast<uintptr_t>(e_ctx));
}
}
}
/* Print that an exception occurred. */
MESOSPHERE_EXCEPTION_LOG("Exception occurred. ");
/* Exit the current process. */
GetCurrentProcess().Exit();
}
/* NOTE: This function is called from ASM. */
void HandleException(KExceptionContext *context) {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Retrieve information about the exception. */
const bool is_user_mode = (context->psr & 0xF) == 0;
const u64 esr = cpu::GetEsrEl1();
const u64 afsr0 = cpu::GetAfsr0El1();
const u64 afsr1 = cpu::GetAfsr1El1();
u64 far = 0;
u32 data = 0;
/* Collect far and data based on the ec. */
switch ((esr >> 26) & 0x3F) {
case EsrEc_Unknown:
case EsrEc_IllegalExecution:
case EsrEc_BkptInstruction:
case EsrEc_BrkInstruction:
far = context->pc;
/* NOTE: Nintendo always calls GetInstructionDataUserMode. */
if (is_user_mode) {
data = GetInstructionDataUserMode(context);
} else {
data = GetInstructionDataSupervisorMode(context, esr);
}
break;
case EsrEc_Svc32:
if (context->psr & 0x20) {
/* Thumb mode. */
context->pc -= 2;
} else {
/* ARM mode. */
context->pc -= 4;
}
far = context->pc;
break;
case EsrEc_Svc64:
context->pc -= 4;
far = context->pc;
break;
case EsrEc_BreakPointEl0:
far = context->pc;
break;
default:
far = cpu::GetFarEl1();
break;
}
/* Note that we're in an exception handler. */
GetCurrentThread().SetInExceptionHandler();
/* Verify that spsr's M is allowable (EL0t). */
{
if (is_user_mode) {
/* If the user disable count is set, we may need to pin the current thread. */
if (GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) {
KScopedSchedulerLock lk;
/* Pin the current thread. */
GetCurrentProcess().PinCurrentThread();
/* Set the interrupt flag for the thread. */
GetCurrentThread().SetInterruptFlag();
}
/* Enable interrupts while we process the usermode exception. */
{
KScopedInterruptEnable ei;
/* Terminate the thread, if we should. */
if (GetCurrentThread().IsTerminationRequested()) {
GetCurrentThread().Exit();
}
HandleUserException(context, esr, far, afsr0, afsr1, data);
}
} else {
const s32 core_id = GetCurrentCoreId();
MESOSPHERE_LOG("%d: Unhandled Exception in Supervisor Mode\n", core_id);
if (GetCurrentProcessPointer() != nullptr) {
MESOSPHERE_LOG("%d: Current Process = %s\n", core_id, GetCurrentProcess().GetName());
}
for (size_t i = 0; i < 31; i++) {
MESOSPHERE_LOG("%d: X[%02zu] = %016lx\n", core_id, i, context->x[i]);
}
MESOSPHERE_LOG("%d: PC = %016lx\n", core_id, context->pc);
MESOSPHERE_LOG("%d: SP = %016lx\n", core_id, context->sp);
MESOSPHERE_PANIC("Unhandled Exception in Supervisor Mode\n");
}
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Handle any DPC requests. */
while (GetCurrentThread().HasDpc()) {
KDpcManager::HandleDpc();
}
}
/* Note that we're no longer in an exception handler. */
GetCurrentThread().ClearInExceptionHandler();
}
}
| 28,074
|
C++
|
.cpp
| 548
| 32.390511
| 218
| 0.468403
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,001
|
kern_k_interrupt_manager.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_manager.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arch::arm64 {
void KInterruptManager::Initialize(s32 core_id) {
m_interrupt_controller.Initialize(core_id);
}
void KInterruptManager::Finalize(s32 core_id) {
m_interrupt_controller.Finalize(core_id);
}
void KInterruptManager::Save(s32 core_id) {
/* Verify core id. */
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* If on core 0, save the global interrupts. */
if (core_id == 0) {
MESOSPHERE_ABORT_UNLESS(!m_global_state_saved);
m_interrupt_controller.SaveGlobal(std::addressof(m_global_state));
m_global_state_saved = true;
}
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Save all local interrupts. */
MESOSPHERE_ABORT_UNLESS(!m_local_state_saved[core_id]);
m_interrupt_controller.SaveCoreLocal(std::addressof(m_local_states[core_id]));
m_local_state_saved[core_id] = true;
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Finalize all cores other than core 0. */
if (core_id != 0) {
this->Finalize(core_id);
}
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Finalize core 0. */
if (core_id == 0) {
this->Finalize(core_id);
}
}
void KInterruptManager::Restore(s32 core_id) {
/* Verify core id. */
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Initialize core 0. */
if (core_id == 0) {
this->Initialize(core_id);
}
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Initialize all cores other than core 0. */
if (core_id != 0) {
this->Initialize(core_id);
}
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* Restore all local interrupts. */
MESOSPHERE_ASSERT(m_local_state_saved[core_id]);
m_interrupt_controller.RestoreCoreLocal(std::addressof(m_local_states[core_id]));
m_local_state_saved[core_id] = false;
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
/* If on core 0, restore the global interrupts. */
if (core_id == 0) {
MESOSPHERE_ASSERT(m_global_state_saved);
m_interrupt_controller.RestoreGlobal(std::addressof(m_global_state));
m_global_state_saved = false;
}
/* Ensure all cores get to this point before continuing. */
cpu::SynchronizeAllCores();
}
bool KInterruptManager::OnHandleInterrupt() {
/* Get the interrupt id. */
const u32 raw_irq = m_interrupt_controller.GetIrq();
const s32 irq = KInterruptController::ConvertRawIrq(raw_irq);
/* Trace the interrupt. */
MESOSPHERE_KTRACE_INTERRUPT(irq);
/* If the IRQ is spurious, we don't need to reschedule. */
if (irq < 0) {
return false;
}
KInterruptTask *task = nullptr;
if (KInterruptController::IsLocal(irq)) {
/* Get local interrupt entry. */
auto &entry = GetLocalInterruptEntry(irq);
if (entry.handler != nullptr) {
/* Set manual clear needed if relevant. */
if (entry.manually_cleared) {
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
entry.needs_clear = true;
}
/* Set the handler. */
task = entry.handler->OnInterrupt(irq);
} else {
MESOSPHERE_LOG("Core%d: Unhandled local interrupt %d\n", GetCurrentCoreId(), irq);
}
} else if (KInterruptController::IsGlobal(irq)) {
KScopedSpinLock lk(this->GetGlobalInterruptLock());
/* Get global interrupt entry. */
auto &entry = GetGlobalInterruptEntry(irq);
if (entry.handler != nullptr) {
/* Set manual clear needed if relevant. */
if (entry.manually_cleared) {
m_interrupt_controller.Disable(irq);
entry.needs_clear = true;
}
/* Set the handler. */
task = entry.handler->OnInterrupt(irq);
} else {
MESOSPHERE_LOG("Core%d: Unhandled global interrupt %d\n", GetCurrentCoreId(), irq);
}
} else {
MESOSPHERE_LOG("Invalid interrupt %d\n", irq);
}
/* Acknowledge the interrupt. */
m_interrupt_controller.EndOfInterrupt(raw_irq);
/* If we found no task, then we don't need to reschedule. */
if (task == nullptr) {
return false;
}
/* If the task isn't the dummy task, we should add it to the queue. */
if (task != GetDummyInterruptTask()) {
Kernel::GetInterruptTaskManager().EnqueueTask(task);
}
return true;
}
void KInterruptManager::HandleInterrupt(bool user_mode) {
/* On interrupt, call OnHandleInterrupt() to determine if we need rescheduling and handle. */
const bool needs_scheduling = Kernel::GetInterruptManager().OnHandleInterrupt();
/* If we need scheduling, */
if (needs_scheduling) {
if (user_mode) {
/* If the interrupt occurred in the middle of a userland cache maintenance operation, ensure memory consistency before rescheduling. */
if (GetCurrentThread().IsInUserCacheMaintenanceOperation()) {
cpu::DataSynchronizationBarrier();
}
/* If the user disable count is set, we may need to pin the current thread. */
if (GetCurrentThread().GetUserDisableCount() != 0 && GetCurrentProcess().GetPinnedThread(GetCurrentCoreId()) == nullptr) {
KScopedSchedulerLock sl;
/* Pin the current thread. */
GetCurrentProcess().PinCurrentThread();
/* Set the interrupt flag for the thread. */
GetCurrentThread().SetInterruptFlag();
/* Request interrupt scheduling. */
Kernel::GetScheduler().RequestScheduleOnInterrupt();
} else {
/* Request interrupt scheduling. */
Kernel::GetScheduler().RequestScheduleOnInterrupt();
}
} else {
/* If the interrupt occurred in the middle of a cache maintenance operation, ensure memory consistency before rescheduling. */
if (GetCurrentThread().IsInCacheMaintenanceOperation()) {
cpu::DataSynchronizationBarrier();
} else if (GetCurrentThread().IsInTlbMaintenanceOperation()) {
/* Otherwise, if we're in the middle of a tlb maintenance operation, ensure inner shareable memory consistency before rescheduling. */
cpu::DataSynchronizationBarrierInnerShareable();
}
/* Request interrupt scheduling. */
Kernel::GetScheduler().RequestScheduleOnInterrupt();
}
}
/* If user mode, check if the thread needs termination. */
/* If it does, we can take advantage of this to terminate it. */
if (user_mode) {
KThread *cur_thread = GetCurrentThreadPointer();
if (cur_thread->IsTerminationRequested()) {
EnableInterrupts();
cur_thread->Exit();
}
}
}
Result KInterruptManager::BindHandler(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
MESOSPHERE_UNUSED(core_id);
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
if (KInterruptController::IsGlobal(irq)) {
KScopedInterruptDisable di;
KScopedSpinLock lk(this->GetGlobalInterruptLock());
R_RETURN(this->BindGlobal(handler, irq, core_id, priority, manual_clear, level));
} else {
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
KScopedInterruptDisable di;
R_RETURN(this->BindLocal(handler, irq, priority, manual_clear));
}
}
Result KInterruptManager::UnbindHandler(s32 irq, s32 core_id) {
MESOSPHERE_UNUSED(core_id);
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
if (KInterruptController::IsGlobal(irq)) {
KScopedInterruptDisable di;
KScopedSpinLock lk(this->GetGlobalInterruptLock());
R_RETURN(this->UnbindGlobal(irq));
} else {
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
KScopedInterruptDisable di;
R_RETURN(this->UnbindLocal(irq));
}
}
Result KInterruptManager::ClearInterrupt(s32 irq, s32 core_id) {
MESOSPHERE_UNUSED(core_id);
R_UNLESS(KInterruptController::IsGlobal(irq) || KInterruptController::IsLocal(irq), svc::ResultOutOfRange());
if (KInterruptController::IsGlobal(irq)) {
KScopedInterruptDisable di;
KScopedSpinLock lk(this->GetGlobalInterruptLock());
R_RETURN(this->ClearGlobal(irq));
} else {
MESOSPHERE_ASSERT(core_id == GetCurrentCoreId());
KScopedInterruptDisable di;
R_RETURN(this->ClearLocal(irq));
}
}
Result KInterruptManager::BindGlobal(KInterruptHandler *handler, s32 irq, s32 core_id, s32 priority, bool manual_clear, bool level) {
/* Ensure the priority level is valid. */
R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange());
R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange());
/* Ensure we aren't already bound. */
auto &entry = GetGlobalInterruptEntry(irq);
R_UNLESS(entry.handler == nullptr, svc::ResultBusy());
/* Set entry fields. */
entry.needs_clear = false;
entry.manually_cleared = manual_clear;
entry.handler = handler;
/* Configure the interrupt as level or edge. */
if (level) {
m_interrupt_controller.SetLevel(irq);
} else {
m_interrupt_controller.SetEdge(irq);
}
/* Configure the interrupt. */
m_interrupt_controller.Clear(irq);
m_interrupt_controller.SetTarget(irq, core_id);
m_interrupt_controller.SetPriorityLevel(irq, priority);
m_interrupt_controller.Enable(irq);
R_SUCCEED();
}
Result KInterruptManager::BindLocal(KInterruptHandler *handler, s32 irq, s32 priority, bool manual_clear) {
/* Ensure the priority level is valid. */
R_UNLESS(KInterruptController::PriorityLevel_High <= priority, svc::ResultOutOfRange());
R_UNLESS(priority <= KInterruptController::PriorityLevel_Low, svc::ResultOutOfRange());
/* Ensure we aren't already bound. */
auto &entry = this->GetLocalInterruptEntry(irq);
R_UNLESS(entry.handler == nullptr, svc::ResultBusy());
/* Set entry fields. */
entry.needs_clear = false;
entry.manually_cleared = manual_clear;
entry.handler = handler;
entry.priority = static_cast<u8>(priority);
/* Configure the interrupt. */
m_interrupt_controller.Clear(irq);
m_interrupt_controller.SetPriorityLevel(irq, priority);
m_interrupt_controller.Enable(irq);
R_SUCCEED();
}
Result KInterruptManager::UnbindGlobal(s32 irq) {
for (size_t core_id = 0; core_id < cpu::NumCores; core_id++) {
m_interrupt_controller.ClearTarget(irq, static_cast<s32>(core_id));
}
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
m_interrupt_controller.Disable(irq);
GetGlobalInterruptEntry(irq).handler = nullptr;
R_SUCCEED();
}
Result KInterruptManager::UnbindLocal(s32 irq) {
auto &entry = this->GetLocalInterruptEntry(irq);
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
m_interrupt_controller.SetPriorityLevel(irq, KInterruptController::PriorityLevel_Low);
m_interrupt_controller.Disable(irq);
entry.handler = nullptr;
R_SUCCEED();
}
Result KInterruptManager::ClearGlobal(s32 irq) {
/* We can't clear an entry with no handler. */
auto &entry = GetGlobalInterruptEntry(irq);
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
/* If auto-cleared, we can succeed immediately. */
R_SUCCEED_IF(!entry.manually_cleared);
R_SUCCEED_IF(!entry.needs_clear);
/* Clear and enable. */
entry.needs_clear = false;
m_interrupt_controller.Enable(irq);
R_SUCCEED();
}
Result KInterruptManager::ClearLocal(s32 irq) {
/* We can't clear an entry with no handler. */
auto &entry = this->GetLocalInterruptEntry(irq);
R_UNLESS(entry.handler != nullptr, svc::ResultInvalidState());
/* If auto-cleared, we can succeed immediately. */
R_SUCCEED_IF(!entry.manually_cleared);
R_SUCCEED_IF(!entry.needs_clear);
/* Clear and set priority. */
entry.needs_clear = false;
m_interrupt_controller.SetPriorityLevel(irq, entry.priority);
R_SUCCEED();
}
}
| 14,715
|
C++
|
.cpp
| 308
| 37.285714
| 154
| 0.617828
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,002
|
kern_k_hardware_timer.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_hardware_timer.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arch::arm64 {
void KHardwareTimer::Initialize() {
/* Setup the global timer for the core. */
InitializeGlobalTimer();
/* Set maximum time. */
m_maximum_time = static_cast<s64>(std::min<u64>(std::numeric_limits<s64>::max(), cpu::CounterTimerPhysicalTimerCompareValueRegisterAccessor().GetCompareValue()));
/* Bind the interrupt task for this core. */
Kernel::GetInterruptManager().BindHandler(this, KInterruptName_NonSecurePhysicalTimer, GetCurrentCoreId(), KInterruptController::PriorityLevel_Timer, true, true);
}
void KHardwareTimer::Finalize() {
/* Stop the hardware timer. */
StopTimer();
}
void KHardwareTimer::DoTask() {
/* Handle the interrupt. */
{
KScopedSchedulerLock slk;
KScopedSpinLock lk(this->GetLock());
/* Disable the timer interrupt while we handle this. */
DisableInterrupt();
if (const s64 next_time = this->DoInterruptTaskImpl(GetTick()); 0 < next_time && next_time <= m_maximum_time) {
/* We have a next time, so we should set the time to interrupt and turn the interrupt on. */
SetCompareValue(next_time);
EnableInterrupt();
}
}
/* Clear the timer interrupt. */
Kernel::GetInterruptManager().ClearInterrupt(KInterruptName_NonSecurePhysicalTimer, GetCurrentCoreId());
}
}
| 2,133
|
C++
|
.cpp
| 46
| 39.478261
| 170
| 0.674687
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,003
|
kern_k_interrupt_controller.board.generic.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_interrupt_controller.board.generic.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
/* Include the common implementation. */
#include "../arm/kern_generic_interrupt_controller.inc"
| 759
|
C++
|
.cpp
| 18
| 40.333333
| 76
| 0.759459
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,004
|
kern_k_thread_context.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/kern_k_thread_context.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::arch::arm64 {
/* These are implemented elsewhere (asm). */
void UserModeThreadStarter();
void SupervisorModeThreadStarter();
void InvokeSupervisorModeThread(uintptr_t argument, uintptr_t entrypoint) {
/* Invoke the function. */
using SupervisorModeFunctionType = void (*)(uintptr_t);
reinterpret_cast<SupervisorModeFunctionType>(entrypoint)(argument);
/* Wait forever. */
AMS_INFINITE_LOOP();
}
void OnThreadStart() {
MESOSPHERE_ASSERT(!KInterruptManager::AreInterruptsEnabled());
/* Send KDebug event for this thread's creation. */
{
KScopedInterruptEnable ei;
const uintptr_t params[2] = { GetCurrentThread().GetId(), GetInteger(GetCurrentThread().GetThreadLocalRegionAddress()) };
KDebug::OnDebugEvent(ams::svc::DebugEvent_CreateThread, params, util::size(params));
}
/* Handle any pending dpc. */
while (GetCurrentThread().HasDpc()) {
KDpcManager::HandleDpc();
}
/* Clear our status as in an exception handler */
GetCurrentThread().ClearInExceptionHandler();
}
namespace {
ALWAYS_INLINE bool IsFpuEnabled() {
return cpu::ArchitecturalFeatureAccessControlRegisterAccessor().IsFpEnabled();
}
ALWAYS_INLINE void EnableFpu() {
cpu::ArchitecturalFeatureAccessControlRegisterAccessor().SetFpEnabled(true).Store();
cpu::InstructionMemoryBarrier();
}
uintptr_t SetupStackForUserModeThreadStarter(KVirtualAddress pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, const bool is_64_bit) {
/* NOTE: Stack layout on entry looks like following: */
/* SP */
/* | */
/* v */
/* | KExceptionContext (size 0x120) | KThread::StackParameters (size 0x130) | */
KExceptionContext *ctx = GetPointer<KExceptionContext>(k_sp) - 1;
/* Clear context. */
std::memset(ctx, 0, sizeof(*ctx));
/* Set PC and argument. */
ctx->pc = GetInteger(pc) & ~(UINT64_C(1));
ctx->x[0] = arg;
/* Set PSR. */
if (is_64_bit) {
ctx->psr = 0;
} else {
constexpr u64 PsrArmValue = 0x00;
constexpr u64 PsrThumbValue = 0x20;
ctx->psr = ((pc & 1) == 0 ? PsrArmValue : PsrThumbValue) | (0x10);
MESOSPHERE_LOG("Creating User 32-Thread, %016lx\n", GetInteger(pc));
}
/* Set CFI-value. */
if (is_64_bit) {
ctx->x[18] = KSystemControl::GenerateRandomU64() | 1;
}
/* Set stack pointer. */
if (is_64_bit) {
ctx->sp = GetInteger(u_sp);
} else {
ctx->x[13] = GetInteger(u_sp);
}
return reinterpret_cast<uintptr_t>(ctx);
}
uintptr_t SetupStackForSupervisorModeThreadStarter(KVirtualAddress pc, KVirtualAddress sp, uintptr_t arg) {
/* NOTE: Stack layout on entry looks like following: */
/* SP */
/* | */
/* v */
/* | u64 argument | u64 entrypoint | KThread::StackParameters (size 0x140) | */
static_assert(sizeof(KThread::StackParameters) == 0x140);
u64 *stack = GetPointer<u64>(sp);
*(--stack) = GetInteger(pc);
*(--stack) = arg;
return reinterpret_cast<uintptr_t>(stack);
}
}
Result KThreadContext::Initialize(KVirtualAddress u_pc, KVirtualAddress k_sp, KVirtualAddress u_sp, uintptr_t arg, bool is_user, bool is_64_bit, bool is_main) {
MESOSPHERE_ASSERT(k_sp != Null<KVirtualAddress>);
/* Ensure that the stack pointers are aligned. */
k_sp = util::AlignDown(GetInteger(k_sp), 16);
u_sp = util::AlignDown(GetInteger(u_sp), 16);
/* Determine LR and SP. */
if (is_user) {
/* Usermode thread. */
m_lr = reinterpret_cast<uintptr_t>(::ams::kern::arch::arm64::UserModeThreadStarter);
m_sp = SetupStackForUserModeThreadStarter(u_pc, k_sp, u_sp, arg, is_64_bit);
} else {
/* Kernel thread. */
MESOSPHERE_ASSERT(is_64_bit);
if (is_main) {
/* Main thread. */
m_lr = GetInteger(u_pc);
m_sp = GetInteger(k_sp);
} else {
/* Generic Kernel thread. */
m_lr = reinterpret_cast<uintptr_t>(::ams::kern::arch::arm64::SupervisorModeThreadStarter);
m_sp = SetupStackForSupervisorModeThreadStarter(u_pc, k_sp, arg);
}
}
/* Clear callee-saved registers. */
for (size_t i = 0; i < util::size(m_callee_saved.registers); i++) {
m_callee_saved.registers[i] = 0;
}
/* Clear FPU state. */
m_fpcr = 0;
m_fpsr = 0;
for (size_t i = 0; i < util::size(m_callee_saved_fpu.fpu64.v); ++i) {
m_callee_saved_fpu.fpu64.v[i] = 0;
}
/* Lock the context, if we're a main thread. */
m_locked = is_main;
R_SUCCEED();
}
void KThreadContext::SetArguments(uintptr_t arg0, uintptr_t arg1) {
u64 *stack = reinterpret_cast<u64 *>(m_sp);
stack[0] = arg0;
stack[1] = arg1;
}
void KThreadContext::CloneFpuStatus() {
u64 pcr, psr;
cpu::InstructionMemoryBarrier();
KScopedInterruptDisable di;
if (IsFpuEnabled()) {
__asm__ __volatile__("mrs %[pcr], fpcr" : [pcr]"=r"(pcr) :: "memory");
__asm__ __volatile__("mrs %[psr], fpsr" : [psr]"=r"(psr) :: "memory");
} else {
pcr = GetCurrentThread().GetContext().GetFpcr();
psr = GetCurrentThread().GetContext().GetFpsr();
}
this->SetFpcr(pcr);
this->SetFpsr(psr);
}
void GetUserContext(ams::svc::ThreadContext *out, const KThread *thread) {
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
MESOSPHERE_ASSERT(thread->IsSuspended());
MESOSPHERE_ASSERT(thread->GetOwnerProcess() != nullptr);
/* Get the contexts. */
const KExceptionContext *e_ctx = GetExceptionContext(thread);
const KThreadContext *t_ctx = std::addressof(thread->GetContext());
if (thread->GetOwnerProcess()->Is64Bit()) {
/* Set special registers. */
out->fp = e_ctx->x[29];
out->lr = e_ctx->x[30];
out->sp = e_ctx->sp;
out->pc = e_ctx->pc;
out->pstate = e_ctx->psr & cpu::El0Aarch64PsrMask;
/* Get the thread's general purpose registers. */
if (thread->IsCallingSvc()) {
for (size_t i = 19; i < 29; ++i) {
out->r[i] = e_ctx->x[i];
}
if (e_ctx->write == 0) {
out->pc -= sizeof(u32);
}
} else {
for (size_t i = 0; i < 29; ++i) {
out->r[i] = e_ctx->x[i];
}
}
/* Copy tpidr. */
out->tpidr = e_ctx->tpidr;
/* Copy fpu registers. */
static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters);
static_assert(KThreadContext::NumCallerSavedFpuRegisters == KThreadContext::NumCalleeSavedFpuRegisters * 3);
static_assert(KThreadContext::NumFpuRegisters == KThreadContext::NumCallerSavedFpuRegisters + KThreadContext::NumCalleeSavedFpuRegisters);
const auto &caller_save_fpu = thread->GetCallerSaveFpuRegisters().fpu64;
const auto &callee_save_fpu = t_ctx->GetCalleeSaveFpuRegisters().fpu64;
if (!thread->IsCallingSvc() || thread->IsInUsermodeExceptionHandler()) {
KThreadContext::GetFpuRegisters(out->v, caller_save_fpu, callee_save_fpu);
} else {
for (size_t i = 0; i < KThreadContext::NumCalleeSavedFpuRegisters; ++i) {
out->v[(KThreadContext::NumCallerSavedFpuRegisters / 3) + i] = caller_save_fpu.v[i];
}
}
} else {
/* Set special registers. */
out->pc = static_cast<u32>(e_ctx->pc);
out->pstate = e_ctx->psr & cpu::El0Aarch32PsrMask;
/* Get the thread's general purpose registers. */
for (size_t i = 0; i < 15; ++i) {
out->r[i] = static_cast<u32>(e_ctx->x[i]);
}
/* Adjust PC, if the thread is calling svc. */
if (thread->IsCallingSvc()) {
if (e_ctx->write == 0) {
/* Adjust by 2 if thumb mode, 4 if arm mode. */
out->pc -= ((e_ctx->psr & 0x20) == 0) ? sizeof(u32) : sizeof(u16);
}
}
/* Copy tpidr. */
out->tpidr = static_cast<u32>(e_ctx->tpidr);
/* Copy fpu registers. */
static_assert(util::size(ams::svc::ThreadContext{}.v) == KThreadContext::NumFpuRegisters);
static_assert(KThreadContext::NumCallerSavedFpuRegisters == KThreadContext::NumCalleeSavedFpuRegisters * 3);
static_assert(KThreadContext::NumFpuRegisters == KThreadContext::NumCallerSavedFpuRegisters + KThreadContext::NumCalleeSavedFpuRegisters);
const auto &caller_save_fpu = thread->GetCallerSaveFpuRegisters().fpu32;
const auto &callee_save_fpu = t_ctx->GetCalleeSaveFpuRegisters().fpu32;
if (!thread->IsCallingSvc() || thread->IsInUsermodeExceptionHandler()) {
KThreadContext::GetFpuRegisters(out->v, caller_save_fpu, callee_save_fpu);
} else {
for (size_t i = 0; i < KThreadContext::NumCalleeSavedFpuRegisters / 2; ++i) {
out->v[((KThreadContext::NumCallerSavedFpuRegisters / 3) / 2) + i] = caller_save_fpu.v[i];
}
}
}
/* Copy fpcr/fpsr. */
out->fpcr = t_ctx->GetFpcr();
out->fpsr = t_ctx->GetFpsr();
}
void KThreadContext::OnThreadTerminating(const KThread *thread) {
MESOSPHERE_UNUSED(thread);
/* ... */
}
}
| 11,522
|
C++
|
.cpp
| 234
| 37.935897
| 164
| 0.540706
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,005
|
kern_svc_tables.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_tables.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifdef MESOSPHERE_USE_STUBBED_SVC_TABLES
#include <mesosphere/kern_debug_log.hpp>
#endif
#include <mesosphere/svc/kern_svc_tables.hpp>
#include <vapours/svc/svc_codegen.hpp>
namespace ams::kern::svc {
/* Declare special prototypes for the light ipc handlers. */
void CallSendSyncRequestLight64();
void CallSendSyncRequestLight64From32();
void CallReplyAndReceiveLight64();
void CallReplyAndReceiveLight64From32();
/* Declare special prototypes for ReturnFromException. */
void CallReturnFromException64();
void CallReturnFromException64From32();
/* Declare special prototype for (unsupported) CallCallSecureMonitor64From32. */
void CallCallSecureMonitor64From32();
/* Declare special prototypes for WaitForAddress. */
void CallWaitForAddress64();
void CallWaitForAddress64From32();
namespace {
#ifndef MESOSPHERE_USE_STUBBED_SVC_TABLES
#define DECLARE_SVC_STRUCT(ID, RETURN_TYPE, NAME, ...) \
class NAME { \
private: \
using Impl = ::ams::svc::codegen::KernelSvcWrapper<::ams::kern::svc::NAME##64, ::ams::kern::svc::NAME##64From32>; \
public: \
static NOINLINE void Call64() { return Impl::Call64(); } \
static NOINLINE void Call64From32() { return Impl::Call64From32(); } \
};
#else
#define DECLARE_SVC_STRUCT(ID, RETURN_TYPE, NAME, ...) \
class NAME { \
public: \
static NOINLINE void Call64() { MESOSPHERE_PANIC("Stubbed Svc"#NAME"64 was called"); } \
static NOINLINE void Call64From32() { MESOSPHERE_PANIC("Stubbed Svc"#NAME"64From32 was called"); } \
};
#endif
/* Set omit-frame-pointer to prevent GCC from emitting MOV X29, SP instructions. */
#pragma GCC push_options
#pragma GCC optimize ("-O3")
#pragma GCC optimize ("omit-frame-pointer")
AMS_SVC_FOREACH_KERN_DEFINITION(DECLARE_SVC_STRUCT, _)
#pragma GCC pop_options
constexpr const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64From32Impl = [] {
std::array<SvcTableEntry, NumSupervisorCalls> table = {};
#define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
if (table[ID] == nullptr) { table[ID] = NAME::Call64From32; }
AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
#undef AMS_KERN_SVC_SET_TABLE_ENTRY
table[svc::SvcId_SendSyncRequestLight] = CallSendSyncRequestLight64From32;
table[svc::SvcId_ReplyAndReceiveLight] = CallReplyAndReceiveLight64From32;
table[svc::SvcId_ReturnFromException] = CallReturnFromException64From32;
table[svc::SvcId_CallSecureMonitor] = CallCallSecureMonitor64From32;
table[svc::SvcId_WaitForAddress] = CallWaitForAddress64From32;
return table;
}();
constexpr const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64Impl = [] {
std::array<SvcTableEntry, NumSupervisorCalls> table = {};
#define AMS_KERN_SVC_SET_TABLE_ENTRY(ID, RETURN_TYPE, NAME, ...) \
if (table[ID] == nullptr) { table[ID] = NAME::Call64; }
AMS_SVC_FOREACH_KERN_DEFINITION(AMS_KERN_SVC_SET_TABLE_ENTRY, _)
#undef AMS_KERN_SVC_SET_TABLE_ENTRY
table[svc::SvcId_SendSyncRequestLight] = CallSendSyncRequestLight64;
table[svc::SvcId_ReplyAndReceiveLight] = CallReplyAndReceiveLight64;
table[svc::SvcId_ReturnFromException] = CallReturnFromException64;
table[svc::SvcId_WaitForAddress] = CallWaitForAddress64;
return table;
}();
constexpr bool IsValidSvcTable(const std::array<SvcTableEntry, NumSupervisorCalls> &table) {
for (size_t i = 0; i < NumSupervisorCalls; i++) {
if (table[i] != nullptr) {
return true;
}
}
return false;
}
static_assert(IsValidSvcTable(SvcTable64Impl));
static_assert(IsValidSvcTable(SvcTable64From32Impl));
}
constinit const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64 = SvcTable64Impl;
constinit const std::array<SvcTableEntry, NumSupervisorCalls> SvcTable64From32 = SvcTable64From32Impl;
void PatchSvcTableEntry(const SvcTableEntry *table, u32 id, SvcTableEntry entry);
namespace {
/* NOTE: Although the SVC tables are constants, our global constructor will run before .rodata is protected R--. */
class SvcTablePatcher {
private:
using SvcTable = std::array<SvcTableEntry, NumSupervisorCalls>;
private:
static SvcTablePatcher s_instance;
private:
ALWAYS_INLINE const SvcTableEntry *GetTableData(const SvcTable *table) {
if (table != nullptr) {
return table->data();
} else {
return nullptr;
}
}
NOINLINE void PatchTables(const SvcTableEntry *table_64, const SvcTableEntry *table_64_from_32) {
/* Get the target firmware. */
const auto target_fw = kern::GetTargetFirmware();
/* 10.0.0 broke the ABI for QueryIoMapping, and renamed it to QueryMemoryMapping. */
if (target_fw < TargetFirmware_10_0_0) {
if (table_64) { ::ams::kern::svc::PatchSvcTableEntry(table_64, svc::SvcId_QueryMemoryMapping, LegacyQueryIoMapping::Call64); }
if (table_64_from_32) { ::ams::kern::svc::PatchSvcTableEntry(table_64_from_32, svc::SvcId_QueryMemoryMapping, LegacyQueryIoMapping::Call64From32); }
}
/* 6.0.0 broke the ABI for GetFutureThreadInfo, and renamed it to GetDebugFutureThreadInfo. */
if (target_fw < TargetFirmware_6_0_0) {
static_assert(svc::SvcId_GetDebugFutureThreadInfo == svc::SvcId_LegacyGetFutureThreadInfo);
if (table_64) { ::ams::kern::svc::PatchSvcTableEntry(table_64, svc::SvcId_GetDebugFutureThreadInfo, LegacyGetFutureThreadInfo::Call64); }
if (table_64_from_32) { ::ams::kern::svc::PatchSvcTableEntry(table_64_from_32, svc::SvcId_GetDebugFutureThreadInfo, LegacyGetFutureThreadInfo::Call64From32); }
}
/* 3.0.0 broke the ABI for ContinueDebugEvent. */
if (target_fw < TargetFirmware_3_0_0) {
if (table_64) { ::ams::kern::svc::PatchSvcTableEntry(table_64, svc::SvcId_ContinueDebugEvent, LegacyContinueDebugEvent::Call64); }
if (table_64_from_32) { ::ams::kern::svc::PatchSvcTableEntry(table_64_from_32, svc::SvcId_ContinueDebugEvent, LegacyContinueDebugEvent::Call64From32); }
}
}
public:
SvcTablePatcher(const SvcTable *table_64, const SvcTable *table_64_from_32) {
PatchTables(GetTableData(table_64), GetTableData(table_64_from_32));
}
};
SvcTablePatcher SvcTablePatcher::s_instance(std::addressof(SvcTable64), std::addressof(SvcTable64From32));
}
}
| 9,035
|
C++
|
.cpp
| 140
| 51.992857
| 183
| 0.565119
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,006
|
kern_svc_handlers.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/arch/arm64/svc/kern_svc_handlers.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
void TraceSvcEntry(const u64 *data) {
MESOSPHERE_KTRACE_SVC_ENTRY(GetCurrentThread().GetSvcId(), data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]);
}
void TraceSvcExit(const u64 *data) {
MESOSPHERE_KTRACE_SVC_EXIT(GetCurrentThread().GetSvcId(), data[0], data[1], data[2], data[3], data[4], data[5], data[6], data[7]);
}
}
| 1,068
|
C++
|
.cpp
| 24
| 41.416667
| 139
| 0.711538
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,007
|
kern_init_elf.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/init/kern_init_elf.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::init::Elf {
/* API to apply relocations or call init array. */
void ApplyRelocations(uintptr_t base_address, const Dyn *dynamic) {
uintptr_t dyn_rel = 0;
uintptr_t dyn_rela = 0;
uintptr_t dyn_relr = 0;
uintptr_t rel_count = 0;
uintptr_t rela_count = 0;
uintptr_t relr_sz = 0;
uintptr_t rel_ent = 0;
uintptr_t rela_ent = 0;
uintptr_t relr_ent = 0;
/* Iterate over all tags, identifying important extents. */
for (const Dyn *cur_entry = dynamic; cur_entry->GetTag() != DT_NULL; cur_entry++) {
switch (cur_entry->GetTag()) {
case DT_REL:
dyn_rel = base_address + cur_entry->GetPtr();
break;
case DT_RELA:
dyn_rela = base_address + cur_entry->GetPtr();
break;
case DT_RELR:
dyn_relr = base_address + cur_entry->GetPtr();
break;
case DT_RELENT:
rel_ent = cur_entry->GetValue();
break;
case DT_RELAENT:
rela_ent = cur_entry->GetValue();
break;
case DT_RELRENT:
relr_ent = cur_entry->GetValue();
break;
case DT_RELCOUNT:
rel_count = cur_entry->GetValue();
break;
case DT_RELACOUNT:
rela_count = cur_entry->GetValue();
break;
case DT_RELRSZ:
relr_sz = cur_entry->GetValue();
break;
}
}
/* Apply all Rel relocations */
if (rel_count > 0) {
/* Check that the rel relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_rel != 0);
MESOSPHERE_INIT_ABORT_UNLESS(rel_ent == sizeof(Elf::Rel));
for (size_t i = 0; i < rel_count; ++i) {
const auto &rel = reinterpret_cast<const Elf::Rel *>(dyn_rel)[i];
/* Only allow architecture-specific relocations. */
while (rel.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
/* Apply the relocation. */
Elf::Addr *target_address = reinterpret_cast<Elf::Addr *>(base_address + rel.GetOffset());
*target_address += base_address;
}
}
/* Apply all Rela relocations. */
if (rela_count > 0) {
/* Check that the rela relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_rela != 0);
MESOSPHERE_INIT_ABORT_UNLESS(rela_ent == sizeof(Elf::Rela));
for (size_t i = 0; i < rela_count; ++i) {
const auto &rela = reinterpret_cast<const Elf::Rela *>(dyn_rela)[i];
/* Only allow architecture-specific relocations. */
while (rela.GetType() != R_ARCHITECTURE_RELATIVE) { /* ... */ }
/* Apply the relocation. */
Elf::Addr *target_address = reinterpret_cast<Elf::Addr *>(base_address + rela.GetOffset());
*target_address = base_address + rela.GetAddend();
}
}
/* Apply all Relr relocations. */
if (relr_sz >= sizeof(Elf::Relr)) {
/* Check that the relr relocations are applyable. */
MESOSPHERE_INIT_ABORT_UNLESS(dyn_relr != 0);
MESOSPHERE_INIT_ABORT_UNLESS(relr_ent == sizeof(Elf::Relr));
const size_t relr_count = relr_sz / sizeof(Elf::Relr);
Elf::Addr *where = nullptr;
for (size_t i = 0; i < relr_count; ++i) {
const auto &relr = reinterpret_cast<const Elf::Relr *>(dyn_relr)[i];
if (relr.IsLocation()) {
/* Update location. */
where = reinterpret_cast<Elf::Addr *>(base_address + relr.GetLocation());
/* Apply the relocation. */
*(where++) += base_address;
} else {
/* Get the bitmap. */
u64 bitmap = relr.GetBitmap();
/* Apply all relocations. */
while (bitmap != 0) {
const u64 next = util::CountTrailingZeros(bitmap);
bitmap &= ~(static_cast<u64>(1) << next);
where[next] += base_address;
}
/* Advance. */
where += BITSIZEOF(bitmap) - 1;
}
}
}
}
void CallInitArrayFuncs(uintptr_t init_array_start, uintptr_t init_array_end) {
for (uintptr_t cur_entry = init_array_start; cur_entry < init_array_end; cur_entry += sizeof(void *)) {
(*(void (**)())(cur_entry))();
}
}
}
| 5,628
|
C++
|
.cpp
| 123
| 32.325203
| 111
| 0.509843
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,008
|
kern_init_slab_setup.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/init/kern_init_slab_setup.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::init {
/* For macro convenience. */
using KSessionRequestMappings = KSessionRequest::SessionMappings::DynamicMappings;
using KThreadLockInfo = KThread::LockWithPriorityInheritanceInfo;
#define SLAB_COUNT(CLASS) g_slab_resource_counts.num_##CLASS
#define FOREACH_SLAB_TYPE(HANDLER, ...) \
HANDLER(KProcess, (SLAB_COUNT(KProcess)), ## __VA_ARGS__) \
HANDLER(KThread, (SLAB_COUNT(KThread)), ## __VA_ARGS__) \
HANDLER(KEvent, (SLAB_COUNT(KEvent)), ## __VA_ARGS__) \
HANDLER(KInterruptEvent, (SLAB_COUNT(KInterruptEvent)), ## __VA_ARGS__) \
HANDLER(KPort, (SLAB_COUNT(KPort)), ## __VA_ARGS__) \
HANDLER(KSharedMemory, (SLAB_COUNT(KSharedMemory)), ## __VA_ARGS__) \
HANDLER(KSharedMemoryInfo, (SLAB_COUNT(KSharedMemory) * 8), ## __VA_ARGS__) \
HANDLER(KTransferMemory, (SLAB_COUNT(KTransferMemory)), ## __VA_ARGS__) \
HANDLER(KCodeMemory, (SLAB_COUNT(KCodeMemory)), ## __VA_ARGS__) \
HANDLER(KDeviceAddressSpace, (SLAB_COUNT(KDeviceAddressSpace)), ## __VA_ARGS__) \
HANDLER(KSession, (SLAB_COUNT(KSession)), ## __VA_ARGS__) \
HANDLER(KSessionRequest, (SLAB_COUNT(KSession) * 2), ## __VA_ARGS__) \
HANDLER(KLightSession, (SLAB_COUNT(KLightSession)), ## __VA_ARGS__) \
HANDLER(KThreadLocalPage, (SLAB_COUNT(KProcess) + (SLAB_COUNT(KProcess) + SLAB_COUNT(KThread)) / 8), ## __VA_ARGS__) \
HANDLER(KObjectName, (SLAB_COUNT(KObjectName)), ## __VA_ARGS__) \
HANDLER(KResourceLimit, (SLAB_COUNT(KResourceLimit)), ## __VA_ARGS__) \
HANDLER(KEventInfo, (SLAB_COUNT(KThread) + SLAB_COUNT(KDebug)), ## __VA_ARGS__) \
HANDLER(KDebug, (SLAB_COUNT(KDebug)), ## __VA_ARGS__) \
HANDLER(KIoPool, (SLAB_COUNT(KIoPool)), ## __VA_ARGS__) \
HANDLER(KIoRegion, (SLAB_COUNT(KIoRegion)), ## __VA_ARGS__) \
HANDLER(KSessionRequestMappings, (SLAB_COUNT(KSessionRequestMappings)), ## __VA_ARGS__) \
HANDLER(KSecureSystemResource, (SLAB_COUNT(KProcess)), ## __VA_ARGS__) \
HANDLER(KThreadLockInfo, (SLAB_COUNT(KThread)), ## __VA_ARGS__)
namespace {
#define DEFINE_SLAB_TYPE_ENUM_MEMBER(NAME, COUNT, ...) KSlabType_##NAME,
enum KSlabType : u32 {
FOREACH_SLAB_TYPE(DEFINE_SLAB_TYPE_ENUM_MEMBER)
KSlabType_Count,
};
#undef DEFINE_SLAB_TYPE_ENUM_MEMBER
/* Constexpr counts. */
constexpr size_t SlabCountKProcess = 80;
constexpr size_t SlabCountKThread = 800;
constexpr size_t SlabCountKEvent = 900;
constexpr size_t SlabCountKInterruptEvent = 100;
constexpr size_t SlabCountKPort = 384;
constexpr size_t SlabCountKSharedMemory = 80;
constexpr size_t SlabCountKTransferMemory = 200;
constexpr size_t SlabCountKCodeMemory = 10;
constexpr size_t SlabCountKDeviceAddressSpace = 300;
constexpr size_t SlabCountKSession = 1133;
constexpr size_t SlabCountKLightSession = 100;
constexpr size_t SlabCountKObjectName = 7;
constexpr size_t SlabCountKResourceLimit = 5;
constexpr size_t SlabCountKDebug = cpu::NumCores;
constexpr size_t SlabCountKIoPool = 1;
constexpr size_t SlabCountKIoRegion = 6;
constexpr size_t SlabcountKSessionRequestMappings = 40;
constexpr size_t SlabCountExtraKThread = (1024 + 256 + 256) - SlabCountKThread;
namespace test {
constexpr size_t RequiredSizeForExtraThreadCount = SlabCountExtraKThread * (sizeof(KThread) + (sizeof(KThreadLocalPage) / 8) + sizeof(KEventInfo));
static_assert(RequiredSizeForExtraThreadCount <= KernelSlabHeapAdditionalSize);
static_assert(KernelPageBufferHeapSize == 2 * PageSize + (SlabCountKProcess + SlabCountKThread + (SlabCountKProcess + SlabCountKThread) / 8) * PageSize);
static_assert(KernelPageBufferAdditionalSize == (SlabCountExtraKThread + (SlabCountExtraKThread / 8)) * PageSize);
}
/* Global to hold our resource counts. */
constinit KSlabResourceCounts g_slab_resource_counts = {
.num_KProcess = SlabCountKProcess,
.num_KThread = SlabCountKThread,
.num_KEvent = SlabCountKEvent,
.num_KInterruptEvent = SlabCountKInterruptEvent,
.num_KPort = SlabCountKPort,
.num_KSharedMemory = SlabCountKSharedMemory,
.num_KTransferMemory = SlabCountKTransferMemory,
.num_KCodeMemory = SlabCountKCodeMemory,
.num_KDeviceAddressSpace = SlabCountKDeviceAddressSpace,
.num_KSession = SlabCountKSession,
.num_KLightSession = SlabCountKLightSession,
.num_KObjectName = SlabCountKObjectName,
.num_KResourceLimit = SlabCountKResourceLimit,
.num_KDebug = SlabCountKDebug,
.num_KIoPool = SlabCountKIoPool,
.num_KIoRegion = SlabCountKIoRegion,
.num_KSessionRequestMappings = SlabcountKSessionRequestMappings,
};
template<typename T>
NOINLINE KVirtualAddress InitializeSlabHeap(KVirtualAddress address, size_t num_objects) {
const size_t size = util::AlignUp(sizeof(T) * num_objects, alignof(void *));
KVirtualAddress start = util::AlignUp(GetInteger(address), alignof(T));
if (size > 0) {
const KMemoryRegion *region = KMemoryLayout::Find(start + size - 1);
MESOSPHERE_ABORT_UNLESS(region != nullptr);
MESOSPHERE_ABORT_UNLESS(region->IsDerivedFrom(KMemoryRegionType_KernelSlab));
T::InitializeSlabHeap(GetVoidPointer(start), size);
}
return start + size;
}
}
const KSlabResourceCounts &GetSlabResourceCounts() {
return g_slab_resource_counts;
}
void InitializeSlabResourceCounts() {
/* Note: Nintendo initializes all fields here, but we initialize all constants at compile-time. */
if (KSystemControl::Init::ShouldIncreaseThreadResourceLimit()) {
g_slab_resource_counts.num_KThread += SlabCountExtraKThread;
}
}
size_t CalculateSlabHeapGapSize() {
constexpr size_t KernelSlabHeapGapSize = 2_MB - 356_KB;
static_assert(KernelSlabHeapGapSize <= KernelSlabHeapGapsSizeMax);
return KernelSlabHeapGapSize;
}
size_t CalculateTotalSlabHeapSize() {
size_t size = 0;
#define ADD_SLAB_SIZE(NAME, COUNT, ...) ({ \
size += alignof(NAME); \
size += util::AlignUp(sizeof(NAME) * (COUNT), alignof(void *)); \
});
/* Add the size required for each slab. */
FOREACH_SLAB_TYPE(ADD_SLAB_SIZE)
#undef ADD_SLAB_SIZE
/* Add the reserved size. */
size += CalculateSlabHeapGapSize();
return size;
}
void InitializeSlabHeaps() {
/* Get the slab region, since that's where we'll be working. */
const KMemoryRegion &slab_region = KMemoryLayout::GetSlabRegion();
KVirtualAddress address = slab_region.GetAddress();
/* Clear the slab region. */
std::memset(GetVoidPointer(address), 0, slab_region.GetSize());
/* Initialize slab type array to be in sorted order. */
KSlabType slab_types[KSlabType_Count];
for (size_t i = 0; i < util::size(slab_types); i++) { slab_types[i] = static_cast<KSlabType>(i); }
/* N shuffles the slab type array with the following simple algorithm. */
for (size_t i = 0; i < util::size(slab_types); i++) {
const size_t rnd = KSystemControl::GenerateRandomRange(0, util::size(slab_types) - 1);
std::swap(slab_types[i], slab_types[rnd]);
}
/* Create an array to represent the gaps between the slabs. */
const size_t total_gap_size = CalculateSlabHeapGapSize();
size_t slab_gaps[util::size(slab_types)];
for (size_t i = 0; i < util::size(slab_gaps); i++) {
/* Note: This is an off-by-one error from Nintendo's intention, because GenerateRandomRange is inclusive. */
/* However, Nintendo also has the off-by-one error, and it's "harmless", so we will include it ourselves. */
slab_gaps[i] = KSystemControl::GenerateRandomRange(0, total_gap_size);
}
/* Sort the array, so that we can treat differences between values as offsets to the starts of slabs. */
for (size_t i = 1; i < util::size(slab_gaps); i++) {
for (size_t j = i; j > 0 && slab_gaps[j-1] > slab_gaps[j]; j--) {
std::swap(slab_gaps[j], slab_gaps[j-1]);
}
}
/* Track the gaps, so that we can free them to the unused slab tree. */
KVirtualAddress gap_start = address;
size_t gap_size = 0;
for (size_t i = 0; i < util::size(slab_types); i++) {
/* Add the random gap to the address. */
const auto cur_gap = (i == 0) ? slab_gaps[0] : slab_gaps[i] - slab_gaps[i - 1];
address += cur_gap;
gap_size += cur_gap;
#define INITIALIZE_SLAB_HEAP(NAME, COUNT, ...) \
case KSlabType_##NAME: \
if (COUNT > 0) { \
address = InitializeSlabHeap<NAME>(address, COUNT); \
} \
break;
/* Initialize the slabheap. */
switch (slab_types[i]) {
/* For each of the slab types, we want to initialize that heap. */
FOREACH_SLAB_TYPE(INITIALIZE_SLAB_HEAP)
/* If we somehow get an invalid type, abort. */
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
/* If we've hit the end of a gap, free it. */
if (gap_start + gap_size != address) {
FreeUnusedSlabMemory(gap_start, gap_size);
gap_start = address;
gap_size = 0;
}
}
/* Free the end of the slab region. */
FreeUnusedSlabMemory(gap_start, gap_size + (slab_region.GetEndAddress() - GetInteger(address)));
}
}
namespace ams::kern {
void KPageBufferSlabHeap::Initialize(KDynamicPageManager &allocator) {
/* Get slab resource counts. */
const auto &counts = init::GetSlabResourceCounts();
/* If size is correct, account for thread local pages. */
if (BufferSize == PageSize) {
s_buffer_count += counts.num_KProcess + counts.num_KThread + (counts.num_KProcess + counts.num_KThread) / 8;
}
/* Set our object size. */
m_obj_size = BufferSize;
/* Initialize the base allocator. */
KSlabHeapImpl::Initialize();
/* Allocate the desired page count. */
for (size_t i = 0; i < s_buffer_count; ++i) {
/* Allocate an appropriate buffer. */
auto * const pb = (BufferSize <= PageSize) ? allocator.Allocate() : allocator.Allocate(BufferSize / PageSize);
MESOSPHERE_ABORT_UNLESS(pb != nullptr);
/* Free to our slab. */
KSlabHeapImpl::Free(pb);
}
}
}
| 13,817
|
C++
|
.cpp
| 219
| 53.155251
| 165
| 0.533697
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,009
|
kern_svc_power_management.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_power_management.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
void SleepSystem() {
return KSystemControl::SleepSystem();
}
}
/* ============================= 64 ABI ============================= */
void SleepSystem64() {
return SleepSystem();
}
/* ============================= 64From32 ABI ============================= */
void SleepSystem64From32() {
return SleepSystem();
}
}
| 1,192
|
C++
|
.cpp
| 32
| 33
| 82
| 0.573043
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,010
|
kern_svc_thread.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_thread.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr bool IsValidVirtualCoreId(int32_t core_id) {
return (0 <= core_id && core_id < static_cast<int32_t>(cpu::NumVirtualCores));
}
Result CreateThread(ams::svc::Handle *out, ams::svc::ThreadFunc f, uintptr_t arg, uintptr_t stack_bottom, int32_t priority, int32_t core_id) {
/* Adjust core id, if it's the default magic. */
KProcess &process = GetCurrentProcess();
if (core_id == ams::svc::IdealCoreUseProcessValue) {
core_id = process.GetIdealCoreId();
}
/* Validate arguments. */
R_UNLESS(IsValidVirtualCoreId(core_id), svc::ResultInvalidCoreId());
R_UNLESS(((1ul << core_id) & process.GetCoreMask()) != 0, svc::ResultInvalidCoreId());
R_UNLESS(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority, svc::ResultInvalidPriority());
R_UNLESS(process.CheckThreadPriority(priority), svc::ResultInvalidPriority());
/* Reserve a new thread from the process resource limit (waiting up to 100ms). */
KScopedResourceReservation thread_reservation(std::addressof(process), ams::svc::LimitableResource_ThreadCountMax, 1, KHardwareTimer::GetTick() + ams::svc::Tick(TimeSpan::FromMilliSeconds(100)));
R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached());
/* Create the thread. */
KThread *thread = KThread::Create();
R_UNLESS(thread != nullptr, svc::ResultOutOfResource());
ON_SCOPE_EXIT { thread->Close(); };
/* Initialize the thread. */
{
KScopedLightLock lk(process.GetStateLock());
R_TRY(KThread::InitializeUserThread(thread, reinterpret_cast<KThreadFunction>(static_cast<uintptr_t>(f)), arg, stack_bottom, priority, core_id, std::addressof(process)));
}
/* Commit the thread reservation. */
thread_reservation.Commit();
/* Clone the current fpu status to the new thread. */
thread->GetContext().CloneFpuStatus();
/* Register the new thread. */
KThread::Register(thread);
/* Add the thread to the handle table. */
R_TRY(process.GetHandleTable().Add(out, thread));
R_SUCCEED();
}
Result StartThread(ams::svc::Handle thread_handle) {
/* Get the thread from its handle. */
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Try to start the thread. */
R_RETURN(thread->Run());
}
void ExitThread() {
GetCurrentThread().Exit();
MESOSPHERE_PANIC("Thread survived call to exit");
}
void SleepThread(int64_t ns) {
/* When the input tick is positive, sleep. */
if (AMS_LIKELY(ns > 0)) {
/* Convert the timeout from nanoseconds to ticks. */
/* NOTE: Nintendo does not use this conversion logic in WaitSynchronization... */
s64 timeout;
const ams::svc::Tick offset_tick(TimeSpan::FromNanoSeconds(ns));
if (AMS_LIKELY(offset_tick > 0)) {
timeout = KHardwareTimer::GetTick() + offset_tick + 2;
if (AMS_UNLIKELY(timeout <= 0)) {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = std::numeric_limits<s64>::max();
}
/* Sleep. */
/* NOTE: Nintendo does not check the result of this sleep. */
GetCurrentThread().Sleep(timeout);
} else if (ns == ams::svc::YieldType_WithoutCoreMigration) {
KScheduler::YieldWithoutCoreMigration();
} else if (ns == ams::svc::YieldType_WithCoreMigration) {
KScheduler::YieldWithCoreMigration();
} else if (ns == ams::svc::YieldType_ToAnyThread) {
KScheduler::YieldToAnyThread();
} else {
/* Nintendo does nothing at all if an otherwise invalid value is passed. */
}
}
Result GetThreadPriority(int32_t *out_priority, ams::svc::Handle thread_handle) {
/* Get the thread from its handle. */
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Get the thread's priority. */
*out_priority = thread->GetPriority();
R_SUCCEED();
}
Result SetThreadPriority(ams::svc::Handle thread_handle, int32_t priority) {
/* Get the current process. */
KProcess &process = GetCurrentProcess();
/* Get the thread from its handle. */
KScopedAutoObject thread = process.GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Validate the thread is owned by the current process. */
R_UNLESS(thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultInvalidHandle());
/* Validate the priority. */
R_UNLESS(ams::svc::HighestThreadPriority <= priority && priority <= ams::svc::LowestThreadPriority, svc::ResultInvalidPriority());
R_UNLESS(process.CheckThreadPriority(priority), svc::ResultInvalidPriority());
/* Set the thread priority. */
thread->SetBasePriority(priority);
R_SUCCEED();
}
Result GetThreadCoreMask(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) {
/* Get the thread from its handle. */
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Get the core mask. */
R_TRY(thread->GetCoreMask(out_core_id, out_affinity_mask));
R_SUCCEED();
}
Result SetThreadCoreMask(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) {
/* Get the thread from its handle. */
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Validate the thread is owned by the current process. */
R_UNLESS(thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultInvalidHandle());
/* Determine the core id/affinity mask. */
if (core_id == ams::svc::IdealCoreUseProcessValue) {
core_id = GetCurrentProcess().GetIdealCoreId();
affinity_mask = (1ul << core_id);
} else {
/* Validate the affinity mask. */
const u64 process_core_mask = GetCurrentProcess().GetCoreMask();
R_UNLESS((affinity_mask | process_core_mask) == process_core_mask, svc::ResultInvalidCoreId());
R_UNLESS(affinity_mask != 0, svc::ResultInvalidCombination());
/* Validate the core id. */
if (IsValidVirtualCoreId(core_id)) {
R_UNLESS(((1ul << core_id) & affinity_mask) != 0, svc::ResultInvalidCombination());
} else {
R_UNLESS(core_id == ams::svc::IdealCoreNoUpdate || core_id == ams::svc::IdealCoreDontCare, svc::ResultInvalidCoreId());
}
}
/* Set the core mask. */
R_TRY(thread->SetCoreMask(core_id, affinity_mask));
R_SUCCEED();
}
Result GetThreadId(uint64_t *out_thread_id, ams::svc::Handle thread_handle) {
/* Get the thread from its handle. */
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Get the thread's id. */
*out_thread_id = thread->GetId();
R_SUCCEED();
}
Result GetThreadContext3(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle thread_handle) {
/* Get the thread from its handle. */
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Require the handle be to a non-current thread in the current process. */
R_UNLESS(thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultInvalidHandle());
R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(), svc::ResultBusy());
/* Get the thread context. */
ams::svc::ThreadContext context = {};
R_TRY(thread->GetThreadContext3(std::addressof(context)));
/* Copy the thread context to user space. */
R_TRY(out_context.CopyFrom(std::addressof(context)));
R_SUCCEED();
}
Result GetThreadList(int32_t *out_num_threads, KUserPointer<uint64_t *> out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Validate that the out count is valid. */
R_UNLESS((0 <= max_out_count && max_out_count <= static_cast<int32_t>(std::numeric_limits<int32_t>::max() / sizeof(u64))), svc::ResultOutOfRange());
/* Validate that the pointer is in range. */
if (max_out_count > 0) {
R_UNLESS(GetCurrentProcess().GetPageTable().Contains(KProcessAddress(out_thread_ids.GetUnsafePointer()), max_out_count * sizeof(u64)), svc::ResultInvalidCurrentMemory());
}
/* Get the handle table. */
auto &handle_table = GetCurrentProcess().GetHandleTable();
/* Try to get as a debug object. */
KScopedAutoObject debug = handle_table.GetObject<KDebug>(debug_handle);
if (debug.IsNotNull()) {
/* Check that the debug object has a process. */
R_UNLESS(debug->IsAttached(), svc::ResultProcessTerminated());
R_UNLESS(debug->OpenProcess(), svc::ResultProcessTerminated());
ON_SCOPE_EXIT { debug->CloseProcess(); };
/* Get the thread list. */
R_TRY(debug->GetProcessUnsafe()->GetThreadList(out_num_threads, out_thread_ids, max_out_count));
} else {
/* Only allow getting as a process (or global) if the caller does not have ForceDebugProd. */
R_UNLESS(!GetCurrentProcess().CanForceDebugProd(), svc::ResultInvalidHandle());
/* Try to get as a process. */
KScopedAutoObject process = handle_table.GetObjectWithoutPseudoHandle<KProcess>(debug_handle);
if (process.IsNotNull()) {
/* Get the thread list. */
R_TRY(process->GetThreadList(out_num_threads, out_thread_ids, max_out_count));
} else {
/* If the object is not a process, the caller may want the global thread list. */
R_UNLESS(debug_handle == ams::svc::InvalidHandle, svc::ResultInvalidHandle());
/* If passed invalid handle, we should return the global thread list. */
R_TRY(KThread::GetThreadList(out_num_threads, out_thread_ids, max_out_count));
}
}
R_SUCCEED();
}
}
/* ============================= 64 ABI ============================= */
Result CreateThread64(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) {
R_RETURN(CreateThread(out_handle, func, arg, stack_bottom, priority, core_id));
}
Result StartThread64(ams::svc::Handle thread_handle) {
R_RETURN(StartThread(thread_handle));
}
void ExitThread64() {
return ExitThread();
}
void SleepThread64(int64_t ns) {
return SleepThread(ns);
}
Result GetThreadPriority64(int32_t *out_priority, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadPriority(out_priority, thread_handle));
}
Result SetThreadPriority64(ams::svc::Handle thread_handle, int32_t priority) {
R_RETURN(SetThreadPriority(thread_handle, priority));
}
Result GetThreadCoreMask64(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle));
}
Result SetThreadCoreMask64(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) {
R_RETURN(SetThreadCoreMask(thread_handle, core_id, affinity_mask));
}
Result GetThreadId64(uint64_t *out_thread_id, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadId(out_thread_id, thread_handle));
}
Result GetThreadContext364(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadContext3(out_context, thread_handle));
}
Result GetThreadList64(int32_t *out_num_threads, KUserPointer<uint64_t *> out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) {
R_RETURN(GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle));
}
/* ============================= 64From32 ABI ============================= */
Result CreateThread64From32(ams::svc::Handle *out_handle, ams::svc::ThreadFunc func, ams::svc::Address arg, ams::svc::Address stack_bottom, int32_t priority, int32_t core_id) {
R_RETURN(CreateThread(out_handle, func, arg, stack_bottom, priority, core_id));
}
Result StartThread64From32(ams::svc::Handle thread_handle) {
R_RETURN(StartThread(thread_handle));
}
void ExitThread64From32() {
return ExitThread();
}
void SleepThread64From32(int64_t ns) {
return SleepThread(ns);
}
Result GetThreadPriority64From32(int32_t *out_priority, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadPriority(out_priority, thread_handle));
}
Result SetThreadPriority64From32(ams::svc::Handle thread_handle, int32_t priority) {
R_RETURN(SetThreadPriority(thread_handle, priority));
}
Result GetThreadCoreMask64From32(int32_t *out_core_id, uint64_t *out_affinity_mask, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadCoreMask(out_core_id, out_affinity_mask, thread_handle));
}
Result SetThreadCoreMask64From32(ams::svc::Handle thread_handle, int32_t core_id, uint64_t affinity_mask) {
R_RETURN(SetThreadCoreMask(thread_handle, core_id, affinity_mask));
}
Result GetThreadId64From32(uint64_t *out_thread_id, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadId(out_thread_id, thread_handle));
}
Result GetThreadContext364From32(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle thread_handle) {
R_RETURN(GetThreadContext3(out_context, thread_handle));
}
Result GetThreadList64From32(int32_t *out_num_threads, KUserPointer<uint64_t *> out_thread_ids, int32_t max_out_count, ams::svc::Handle debug_handle) {
R_RETURN(GetThreadList(out_num_threads, out_thread_ids, max_out_count, debug_handle));
}
}
| 16,767
|
C++
|
.cpp
| 280
| 48.407143
| 207
| 0.6108
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,011
|
kern_svc_event.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_event.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
Result SignalEvent(ams::svc::Handle event_handle) {
/* Get the current handle table. */
auto &handle_table = GetCurrentProcess().GetHandleTable();
/* Get the writable event. */
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
R_UNLESS(event.IsNotNull(), svc::ResultInvalidHandle());
R_RETURN(event->Signal());
}
Result ClearEvent(ams::svc::Handle event_handle) {
/* Get the current handle table. */
auto &handle_table = GetCurrentProcess().GetHandleTable();
/* Try to clear the writable event. */
{
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
if (event.IsNotNull()) {
R_RETURN(event->Clear());
}
}
/* Try to clear the readable event. */
{
KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
if (readable_event.IsNotNull()) {
if (auto * const interrupt_event = readable_event->DynamicCast<KInterruptEvent *>(); interrupt_event != nullptr) {
R_RETURN(interrupt_event->Clear());
} else {
R_RETURN(readable_event->Clear());
}
}
}
R_THROW(svc::ResultInvalidHandle());
}
Result CreateEvent(ams::svc::Handle *out_write, ams::svc::Handle *out_read) {
/* Get the current process and handle table. */
auto &process = GetCurrentProcess();
auto &handle_table = process.GetHandleTable();
/* Declare the event we're going to allocate. */
KEvent *event;
/* Reserve a new event from the process resource limit. */
KScopedResourceReservation event_reservation(std::addressof(process), ams::svc::LimitableResource_EventCountMax);
if (event_reservation.Succeeded()) {
/* Allocate an event normally. */
event = KEvent::Create();
} else {
/* We couldn't reserve an event. Check that we support dynamically expanding the resource limit. */
R_UNLESS(process.GetResourceLimit() == std::addressof(Kernel::GetSystemResourceLimit()), svc::ResultLimitReached());
R_UNLESS(KTargetSystem::IsDynamicResourceLimitsEnabled(), svc::ResultLimitReached());
/* Try to allocate an event from unused slab memory. */
event = KEvent::CreateFromUnusedSlabMemory();
R_UNLESS(event != nullptr, svc::ResultLimitReached());
/* We successfully allocated an event, so add the object we allocated to the resource limit. */
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_EventCountMax, 1);
}
/* Check that we successfully created an event. */
R_UNLESS(event != nullptr, svc::ResultOutOfResource());
/* Initialize the event. */
event->Initialize();
/* Commit the event reservation. */
event_reservation.Commit();
/* Ensure that we clean up the event (and its only references are handle table) on function end. */
ON_SCOPE_EXIT {
event->GetReadableEvent().Close();
event->Close();
};
/* Register the event. */
KEvent::Register(event);
/* Add the event to the handle table. */
R_TRY(handle_table.Add(out_write, event));
/* Ensure that we maintaing a clean handle state on exit. */
ON_RESULT_FAILURE { handle_table.Remove(*out_write); };
/* Add the readable event to the handle table. */
R_RETURN(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
}
}
/* ============================= 64 ABI ============================= */
Result SignalEvent64(ams::svc::Handle event_handle) {
R_RETURN(SignalEvent(event_handle));
}
Result ClearEvent64(ams::svc::Handle event_handle) {
R_RETURN(ClearEvent(event_handle));
}
Result CreateEvent64(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) {
R_RETURN(CreateEvent(out_write_handle, out_read_handle));
}
/* ============================= 64From32 ABI ============================= */
Result SignalEvent64From32(ams::svc::Handle event_handle) {
R_RETURN(SignalEvent(event_handle));
}
Result ClearEvent64From32(ams::svc::Handle event_handle) {
R_RETURN(ClearEvent(event_handle));
}
Result CreateEvent64From32(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) {
R_RETURN(CreateEvent(out_write_handle, out_read_handle));
}
}
| 5,811
|
C++
|
.cpp
| 113
| 40.415929
| 134
| 0.581642
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,012
|
kern_svc_kernel_debug.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_kernel_debug.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
void KernelDebug(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) {
MESOSPHERE_UNUSED(kern_debug_type, arg0, arg1, arg2);
#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING
{
switch (kern_debug_type) {
case ams::svc::KernelDebugType_Thread:
if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpThread();
} else {
KDumpObject::DumpThread(arg0);
}
break;
case ams::svc::KernelDebugType_ThreadCallStack:
if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpThreadCallStack();
} else {
KDumpObject::DumpThreadCallStack(arg0);
}
break;
case ams::svc::KernelDebugType_KernelObject:
KDumpObject::DumpKernelObject();
break;
case ams::svc::KernelDebugType_Handle:
if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpHandle();
} else {
KDumpObject::DumpHandle(arg0);
}
break;
case ams::svc::KernelDebugType_Memory:
if (arg0 == static_cast<u64>(-2)) {
KDumpObject::DumpKernelMemory();
} else if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpMemory();
} else {
KDumpObject::DumpMemory(arg0);
}
break;
case ams::svc::KernelDebugType_PageTable:
if (arg0 == static_cast<u64>(-2)) {
KDumpObject::DumpKernelPageTable();
} else if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpPageTable();
} else {
KDumpObject::DumpPageTable(arg0);
}
break;
case ams::svc::KernelDebugType_CpuUtilization:
{
const auto old_prio = GetCurrentThread().GetBasePriority();
GetCurrentThread().SetBasePriority(3);
if (arg0 == static_cast<u64>(-2)) {
KDumpObject::DumpKernelCpuUtilization();
} else if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpCpuUtilization();
} else {
KDumpObject::DumpCpuUtilization(arg0);
}
GetCurrentThread().SetBasePriority(old_prio);
}
break;
case ams::svc::KernelDebugType_Process:
if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpProcess();
} else {
KDumpObject::DumpProcess(arg0);
}
break;
case ams::svc::KernelDebugType_SuspendProcess:
if (KProcess *process = KProcess::GetProcessFromId(arg0); process != nullptr) {
ON_SCOPE_EXIT { process->Close(); };
if (R_SUCCEEDED(process->SetActivity(ams::svc::ProcessActivity_Paused))) {
MESOSPHERE_RELEASE_LOG("Suspend Process ID=%3lu\n", process->GetId());
}
}
break;
case ams::svc::KernelDebugType_ResumeProcess:
if (KProcess *process = KProcess::GetProcessFromId(arg0); process != nullptr) {
ON_SCOPE_EXIT { process->Close(); };
if (R_SUCCEEDED(process->SetActivity(ams::svc::ProcessActivity_Runnable))) {
MESOSPHERE_RELEASE_LOG("Resume Process ID=%3lu\n", process->GetId());
}
}
break;
case ams::svc::KernelDebugType_Port:
if (arg0 == static_cast<u64>(-1)) {
KDumpObject::DumpPort();
} else {
KDumpObject::DumpPort(arg0);
}
break;
default:
break;
}
}
#endif
}
void ChangeKernelTraceState(ams::svc::KernelTraceState kern_trace_state) {
#ifdef MESOSPHERE_BUILD_FOR_DEBUGGING
{
switch (kern_trace_state) {
case ams::svc::KernelTraceState_Enabled:
{
MESOSPHERE_KTRACE_RESUME();
}
break;
case ams::svc::KernelTraceState_Disabled:
{
MESOSPHERE_KTRACE_PAUSE();
}
break;
default:
break;
}
}
#else
{
MESOSPHERE_UNUSED(kern_trace_state);
}
#endif
}
}
/* ============================= 64 ABI ============================= */
void KernelDebug64(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) {
return KernelDebug(kern_debug_type, arg0, arg1, arg2);
}
void ChangeKernelTraceState64(ams::svc::KernelTraceState kern_trace_state) {
return ChangeKernelTraceState(kern_trace_state);
}
/* ============================= 64From32 ABI ============================= */
void KernelDebug64From32(ams::svc::KernelDebugType kern_debug_type, uint64_t arg0, uint64_t arg1, uint64_t arg2) {
return KernelDebug(kern_debug_type, arg0, arg1, arg2);
}
void ChangeKernelTraceState64From32(ams::svc::KernelTraceState kern_trace_state) {
return ChangeKernelTraceState(kern_trace_state);
}
}
| 7,419
|
C++
|
.cpp
| 156
| 28.121795
| 118
| 0.443892
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,013
|
kern_svc_debug.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_debug.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr inline int32_t MaximumDebuggableThreadCount = 0x60;
Result DebugActiveProcess(ams::svc::Handle *out_handle, uint64_t process_id) {
/* Check that the SVC can be used. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the process from its id. */
KProcess *process = KProcess::GetProcessFromId(process_id);
R_UNLESS(process != nullptr, svc::ResultInvalidProcessId());
/* Close the reference we opened to the process on scope exit. */
ON_SCOPE_EXIT { process->Close(); };
/* Check that the debugging is allowed. */
const bool allowable = process->IsPermittedDebug() || GetCurrentProcess().CanForceDebug() || GetCurrentProcess().CanForceDebugProd();
R_UNLESS(allowable, svc::ResultInvalidState());
/* Disallow debugging one's own processs, to prevent softlocks. */
R_UNLESS(process != GetCurrentProcessPointer(), svc::ResultInvalidState());
/* Get the current handle table. */
auto &handle_table = GetCurrentProcess().GetHandleTable();
/* Create a new debug object. */
KDebug *debug = KDebug::Create();
R_UNLESS(debug != nullptr, svc::ResultOutOfResource());
ON_SCOPE_EXIT { debug->Close(); };
/* Initialize the debug object. */
debug->Initialize();
/* Register the debug object. */
KDebug::Register(debug);
/* Try to attach to the target process. */
R_TRY(debug->Attach(process));
/* Add the new debug object to the handle table. */
R_TRY(handle_table.Add(out_handle, debug));
R_SUCCEED();
}
Result BreakDebugProcess(ams::svc::Handle debug_handle) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Break the process. */
R_TRY(debug->BreakProcess());
R_SUCCEED();
}
Result TerminateDebugProcess(ams::svc::Handle debug_handle) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Terminate the process. */
R_TRY(debug->TerminateProcess());
R_SUCCEED();
}
template<typename EventInfoType>
Result GetDebugEvent(KUserPointer<EventInfoType *> out_info, ams::svc::Handle debug_handle) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Create and clear a new event info. */
EventInfoType info;
std::memset(std::addressof(info), 0, sizeof(info));
/* Get the next info from the debug object. */
R_TRY(debug->GetDebugEventInfo(std::addressof(info)));
/* Copy the info out to the user. */
R_TRY(out_info.CopyFrom(std::addressof(info)));
R_SUCCEED();
}
Result ContinueDebugEventImpl(ams::svc::Handle debug_handle, uint32_t flags, const uint64_t *thread_ids, int32_t num_thread_ids) {
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Continue the event. */
R_TRY(debug->ContinueDebug(flags, thread_ids, num_thread_ids));
R_SUCCEED();
}
Result ContinueDebugEvent(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer<const uint64_t *> user_thread_ids, int32_t num_thread_ids) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Verify that the flags are valid. */
R_UNLESS((flags | ams::svc::ContinueFlag_AllMask) == ams::svc::ContinueFlag_AllMask, svc::ResultInvalidEnumValue());
/* Verify that continue all and continue others flags are exclusive. */
constexpr u32 AllAndOthersMask = ams::svc::ContinueFlag_ContinueAll | ams::svc::ContinueFlag_ContinueOthers;
R_UNLESS((flags & AllAndOthersMask) != AllAndOthersMask, svc::ResultInvalidEnumValue());
/* Verify that the number of thread ids is valid. */
R_UNLESS((0 <= num_thread_ids && num_thread_ids <= MaximumDebuggableThreadCount), svc::ResultOutOfRange());
/* Copy the threads from userspace. */
uint64_t thread_ids[MaximumDebuggableThreadCount];
if (num_thread_ids > 0) {
R_TRY(user_thread_ids.CopyArrayTo(thread_ids, num_thread_ids));
}
/* Continue the event. */
R_TRY(ContinueDebugEventImpl(debug_handle, flags, thread_ids, num_thread_ids));
R_SUCCEED();
}
Result LegacyContinueDebugEvent(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Verify that the flags are valid. */
R_UNLESS((flags | ams::svc::ContinueFlag_AllMask) == ams::svc::ContinueFlag_AllMask, svc::ResultInvalidEnumValue());
/* Verify that continue all and continue others flags are exclusive. */
constexpr u32 AllAndOthersMask = ams::svc::ContinueFlag_ContinueAll | ams::svc::ContinueFlag_ContinueOthers;
R_UNLESS((flags & AllAndOthersMask) != AllAndOthersMask, svc::ResultInvalidEnumValue());
/* Continue the event. */
R_TRY(ContinueDebugEventImpl(debug_handle, flags, std::addressof(thread_id), 1));
R_SUCCEED();
}
Result GetDebugThreadContext(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Validate the context flags. */
R_UNLESS((context_flags | ams::svc::ThreadContextFlag_All) == ams::svc::ThreadContextFlag_All, svc::ResultInvalidEnumValue());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Get the thread context. */
ams::svc::ThreadContext context = {};
R_TRY(debug->GetThreadContext(std::addressof(context), thread_id, context_flags));
/* Copy the context to userspace. */
R_TRY(out_context.CopyFrom(std::addressof(context)));
R_SUCCEED();
}
Result SetDebugThreadContext(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer<const ams::svc::ThreadContext *> user_context, uint32_t context_flags) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Validate the context flags. */
#if defined(MESOSPHERE_ENABLE_HARDWARE_SINGLE_STEP)
{
/* Check that the flags are a subset of the allowable. */
constexpr u32 AllFlagsMask = ams::svc::ThreadContextFlag_All | ams::svc::ThreadContextFlag_SetSingleStep | ams::svc::ThreadContextFlag_ClearSingleStep;
R_UNLESS((context_flags | AllFlagsMask) == AllFlagsMask, svc::ResultInvalidEnumValue());
/* Check that thread isn't both setting and clearing single step. */
const bool set_ss = (context_flags & ams::svc::ThreadContextFlag_SetSingleStep) != 0;
const bool clear_ss = (context_flags & ams::svc::ThreadContextFlag_ClearSingleStep) != 0;
R_UNLESS(!(set_ss && clear_ss), svc::ResultInvalidEnumValue());
}
#else
{
/* Check that the flags are a subset of the allowable. */
R_UNLESS((context_flags | ams::svc::ThreadContextFlag_All) == ams::svc::ThreadContextFlag_All, svc::ResultInvalidEnumValue());
}
#endif
/* Copy the thread context from userspace. */
ams::svc::ThreadContext context;
R_TRY(user_context.CopyTo(std::addressof(context)));
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Set the thread context. */
R_TRY(debug->SetThreadContext(context, thread_id, context_flags));
R_SUCCEED();
}
Result QueryDebugProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, uintptr_t address) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Query the mapping's info. */
R_TRY(debug->QueryMemoryInfo(out_memory_info, out_page_info, address));
R_SUCCEED();
}
template<typename T>
Result QueryDebugProcessMemory(KUserPointer<T *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, uint64_t address) {
/* Get an ams::svc::MemoryInfo for the region. */
ams::svc::MemoryInfo info = {};
R_TRY(QueryDebugProcessMemory(std::addressof(info), out_page_info, debug_handle, address));
/* Copy the info to userspace. */
if constexpr (std::same_as<T, ams::svc::MemoryInfo>) {
R_TRY(out_memory_info.CopyFrom(std::addressof(info)));
} else {
/* Convert the info. */
T converted_info = {};
static_assert(std::same_as<decltype(T{}.base_address), decltype(ams::svc::MemoryInfo{}.base_address)>);
static_assert(std::same_as<decltype(T{}.size), decltype(ams::svc::MemoryInfo{}.size)>);
converted_info.base_address = info.base_address;
converted_info.size = info.size;
converted_info.state = info.state;
converted_info.attribute = info.attribute;
converted_info.permission = info.permission;
converted_info.ipc_count = info.ipc_count;
converted_info.device_count = info.device_count;
/* Copy it. */
R_TRY(out_memory_info.CopyFrom(std::addressof(converted_info)));
}
R_SUCCEED();
}
Result ReadDebugProcessMemory(uintptr_t buffer, ams::svc::Handle debug_handle, uintptr_t address, size_t size) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Validate address / size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((buffer < buffer + size), svc::ResultInvalidCurrentMemory());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Read the memory. */
R_TRY(debug->ReadMemory(buffer, address, size));
R_SUCCEED();
}
Result WriteDebugProcessMemory(ams::svc::Handle debug_handle, uintptr_t buffer, uintptr_t address, size_t size) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Validate address / size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((buffer < buffer + size), svc::ResultInvalidCurrentMemory());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Write the memory. */
R_TRY(debug->WriteMemory(buffer, address, size));
R_SUCCEED();
}
Result SetHardwareBreakPoint(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {
/* Only allow invoking the svc on development hardware. */
R_UNLESS(KTargetSystem::IsDebugMode(), svc::ResultNotImplemented());
/* Set the breakpoint. */
R_TRY(KDebug::SetHardwareBreakPoint(name, flags, value));
R_SUCCEED();
}
Result GetDebugThreadParam(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) {
/* Only allow invoking the svc on development hardware or if force debug prod. */
R_UNLESS(KTargetSystem::IsDebugMode() || GetCurrentProcess().CanForceDebugProd(), svc::ResultNotImplemented());
/* Get the debug object. */
KScopedAutoObject debug = GetCurrentProcess().GetHandleTable().GetObject<KDebug>(debug_handle);
R_UNLESS(debug.IsNotNull(), svc::ResultInvalidHandle());
/* Get the thread from its id. */
KThread *thread = KThread::GetThreadFromId(thread_id);
R_UNLESS(thread != nullptr, svc::ResultInvalidThreadId());
ON_SCOPE_EXIT { thread->Close(); };
/* Get the process from the debug object. */
R_UNLESS(debug->IsAttached(), svc::ResultProcessTerminated());
R_UNLESS(debug->OpenProcess(), svc::ResultProcessTerminated());
/* Close the process when we're done. */
ON_SCOPE_EXIT { debug->CloseProcess(); };
/* Get the proces. */
KProcess * const process = debug->GetProcessUnsafe();
/* Verify that the process is the thread's parent. */
R_UNLESS(process == thread->GetOwnerProcess(), svc::ResultInvalidThreadId());
/* Get the parameter. */
switch (param) {
case ams::svc::DebugThreadParam_Priority:
{
/* Get the priority. */
*out_32 = thread->GetPriority();
}
break;
case ams::svc::DebugThreadParam_State:
{
/* Get the thread state and suspend status. */
KThread::ThreadState state;
bool suspended_user;
bool suspended_debug;
{
KScopedSchedulerLock sl;
state = thread->GetState();
suspended_user = thread->IsSuspendRequested(KThread::SuspendType_Thread);
suspended_debug = thread->IsSuspendRequested(KThread::SuspendType_Debug);
}
/* Set the suspend flags. */
*out_32 = 0;
if (suspended_user) {
*out_32 |= ams::svc::ThreadSuspend_User;
}
if (suspended_debug) {
*out_32 |= ams::svc::ThreadSuspend_Debug;
}
/* Set the state. */
switch (state) {
case KThread::ThreadState_Initialized:
{
*out_64 = ams::svc::ThreadState_Initializing;
}
break;
case KThread::ThreadState_Waiting:
{
*out_64 = ams::svc::ThreadState_Waiting;
}
break;
case KThread::ThreadState_Runnable:
{
*out_64 = ams::svc::ThreadState_Running;
}
break;
case KThread::ThreadState_Terminated:
{
*out_64 = ams::svc::ThreadState_Terminated;
}
break;
default:
R_THROW(svc::ResultInvalidState());
}
}
break;
case ams::svc::DebugThreadParam_IdealCore:
{
/* Get the ideal core. */
s32 core_id;
u64 affinity_mask;
thread->GetPhysicalCoreMask(std::addressof(core_id), std::addressof(affinity_mask));
*out_32 = core_id;
}
break;
case ams::svc::DebugThreadParam_CurrentCore:
{
/* Get the current core. */
*out_32 = thread->GetActiveCore();
}
break;
case ams::svc::DebugThreadParam_AffinityMask:
{
/* Get the affinity mask. */
s32 core_id;
u64 affinity_mask;
thread->GetPhysicalCoreMask(std::addressof(core_id), std::addressof(affinity_mask));
*out_32 = affinity_mask;
}
break;
default:
R_THROW(ams::svc::ResultInvalidEnumValue());
}
R_SUCCEED();
}
}
/* ============================= 64 ABI ============================= */
Result DebugActiveProcess64(ams::svc::Handle *out_handle, uint64_t process_id) {
R_RETURN(DebugActiveProcess(out_handle, process_id));
}
Result BreakDebugProcess64(ams::svc::Handle debug_handle) {
R_RETURN(BreakDebugProcess(debug_handle));
}
Result TerminateDebugProcess64(ams::svc::Handle debug_handle) {
R_RETURN(TerminateDebugProcess(debug_handle));
}
Result GetDebugEvent64(KUserPointer<ams::svc::lp64::DebugEventInfo *> out_info, ams::svc::Handle debug_handle) {
R_RETURN(GetDebugEvent(out_info, debug_handle));
}
Result ContinueDebugEvent64(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer<const uint64_t *> thread_ids, int32_t num_thread_ids) {
R_RETURN(ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids));
}
Result LegacyContinueDebugEvent64(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) {
R_RETURN(LegacyContinueDebugEvent(debug_handle, flags, thread_id));
}
Result GetDebugThreadContext64(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) {
R_RETURN(GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags));
}
Result SetDebugThreadContext64(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer<const ams::svc::ThreadContext *> context, uint32_t context_flags) {
R_RETURN(SetDebugThreadContext(debug_handle, thread_id, context, context_flags));
}
Result QueryDebugProcessMemory64(KUserPointer<ams::svc::lp64::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, ams::svc::Address address) {
R_RETURN(QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address));
}
Result ReadDebugProcessMemory64(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(ReadDebugProcessMemory(buffer, debug_handle, address, size));
}
Result WriteDebugProcessMemory64(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(WriteDebugProcessMemory(debug_handle, buffer, address, size));
}
Result SetHardwareBreakPoint64(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {
R_RETURN(SetHardwareBreakPoint(name, flags, value));
}
Result GetDebugThreadParam64(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) {
R_RETURN(GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param));
}
/* ============================= 64From32 ABI ============================= */
Result DebugActiveProcess64From32(ams::svc::Handle *out_handle, uint64_t process_id) {
R_RETURN(DebugActiveProcess(out_handle, process_id));
}
Result BreakDebugProcess64From32(ams::svc::Handle debug_handle) {
R_RETURN(BreakDebugProcess(debug_handle));
}
Result TerminateDebugProcess64From32(ams::svc::Handle debug_handle) {
R_RETURN(TerminateDebugProcess(debug_handle));
}
Result GetDebugEvent64From32(KUserPointer<ams::svc::ilp32::DebugEventInfo *> out_info, ams::svc::Handle debug_handle) {
R_RETURN(GetDebugEvent(out_info, debug_handle));
}
Result ContinueDebugEvent64From32(ams::svc::Handle debug_handle, uint32_t flags, KUserPointer<const uint64_t *> thread_ids, int32_t num_thread_ids) {
R_RETURN(ContinueDebugEvent(debug_handle, flags, thread_ids, num_thread_ids));
}
Result LegacyContinueDebugEvent64From32(ams::svc::Handle debug_handle, uint32_t flags, uint64_t thread_id) {
R_RETURN(LegacyContinueDebugEvent(debug_handle, flags, thread_id));
}
Result GetDebugThreadContext64From32(KUserPointer<ams::svc::ThreadContext *> out_context, ams::svc::Handle debug_handle, uint64_t thread_id, uint32_t context_flags) {
R_RETURN(GetDebugThreadContext(out_context, debug_handle, thread_id, context_flags));
}
Result SetDebugThreadContext64From32(ams::svc::Handle debug_handle, uint64_t thread_id, KUserPointer<const ams::svc::ThreadContext *> context, uint32_t context_flags) {
R_RETURN(SetDebugThreadContext(debug_handle, thread_id, context, context_flags));
}
Result QueryDebugProcessMemory64From32(KUserPointer<ams::svc::ilp32::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle debug_handle, ams::svc::Address address) {
R_RETURN(QueryDebugProcessMemory(out_memory_info, out_page_info, debug_handle, address));
}
Result ReadDebugProcessMemory64From32(ams::svc::Address buffer, ams::svc::Handle debug_handle, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(ReadDebugProcessMemory(buffer, debug_handle, address, size));
}
Result WriteDebugProcessMemory64From32(ams::svc::Handle debug_handle, ams::svc::Address buffer, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(WriteDebugProcessMemory(debug_handle, buffer, address, size));
}
Result SetHardwareBreakPoint64From32(ams::svc::HardwareBreakPointRegisterName name, uint64_t flags, uint64_t value) {
R_RETURN(SetHardwareBreakPoint(name, flags, value));
}
Result GetDebugThreadParam64From32(uint64_t *out_64, uint32_t *out_32, ams::svc::Handle debug_handle, uint64_t thread_id, ams::svc::DebugThreadParam param) {
R_RETURN(GetDebugThreadParam(out_64, out_32, debug_handle, thread_id, param));
}
}
| 26,277
|
C++
|
.cpp
| 424
| 48.127358
| 198
| 0.601485
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,014
|
kern_svc_process_memory.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_process_memory.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr bool IsValidProcessMemoryPermission(ams::svc::MemoryPermission perm) {
switch (perm) {
case ams::svc::MemoryPermission_None:
case ams::svc::MemoryPermission_Read:
case ams::svc::MemoryPermission_ReadWrite:
case ams::svc::MemoryPermission_ReadExecute:
case ams::svc::MemoryPermission_Execute:
return true;
default:
return false;
}
}
Result SetProcessMemoryPermission(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(address == static_cast<uintptr_t>(address), svc::ResultInvalidCurrentMemory());
R_UNLESS(size == static_cast<size_t>(size), svc::ResultInvalidCurrentMemory());
/* Validate the memory permission. */
R_UNLESS(IsValidProcessMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission());
/* Get the process from its handle. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Validate that the address is in range. */
auto &page_table = process->GetPageTable();
R_UNLESS(page_table.Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Set the memory permission. */
R_RETURN(page_table.SetProcessMemoryPermission(address, size, perm));
}
Result MapProcessMemory(uintptr_t dst_address, ams::svc::Handle process_handle, uint64_t src_address, size_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(src_address == static_cast<uintptr_t>(src_address), svc::ResultInvalidCurrentMemory());
/* Get the processes. */
KProcess *dst_process = GetCurrentProcessPointer();
KScopedAutoObject src_process = dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
R_UNLESS(src_process.IsNotNull(), svc::ResultInvalidHandle());
/* Get the page tables. */
auto &dst_pt = dst_process->GetPageTable();
auto &src_pt = src_process->GetPageTable();
/* Validate that the mapping is in range. */
R_UNLESS(src_pt.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState_SharedCode), svc::ResultInvalidMemoryRegion());
/* Create a new page group. */
KPageGroup pg(dst_pt.GetBlockInfoManager());
/* Make the page group. */
R_TRY(src_pt.MakeAndOpenPageGroup(std::addressof(pg),
src_address, size / PageSize,
KMemoryState_FlagCanMapProcess, KMemoryState_FlagCanMapProcess,
KMemoryPermission_None, KMemoryPermission_None,
KMemoryAttribute_All, KMemoryAttribute_None));
/* Close the page group when we're done. */
ON_SCOPE_EXIT { pg.Close(); };
/* Map the group. */
R_TRY(dst_pt.MapPageGroup(dst_address, pg, KMemoryState_SharedCode, KMemoryPermission_UserReadWrite));
R_SUCCEED();
}
Result UnmapProcessMemory(uintptr_t dst_address, ams::svc::Handle process_handle, uint64_t src_address, size_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(src_address == static_cast<uintptr_t>(src_address), svc::ResultInvalidCurrentMemory());
/* Get the processes. */
KProcess *dst_process = GetCurrentProcessPointer();
KScopedAutoObject src_process = dst_process->GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
R_UNLESS(src_process.IsNotNull(), svc::ResultInvalidHandle());
/* Get the page tables. */
auto &dst_pt = dst_process->GetPageTable();
auto &src_pt = src_process->GetPageTable();
/* Validate that the mapping is in range. */
R_UNLESS(src_pt.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_pt.CanContain(dst_address, size, KMemoryState_SharedCode), svc::ResultInvalidMemoryRegion());
/* Unmap the memory. */
R_TRY(dst_pt.UnmapProcessMemory(dst_address, size, src_pt, src_address));
R_SUCCEED();
}
Result MapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(src_address == static_cast<uintptr_t>(src_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_address == static_cast<uintptr_t>(dst_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(size == static_cast<size_t>(size), svc::ResultInvalidCurrentMemory());
/* Get the process from its handle. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Validate that the mapping is in range. */
auto &page_table = process->GetPageTable();
R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidCurrentMemory());
/* Map the memory. */
R_TRY(page_table.MapCodeMemory(dst_address, src_address, size));
R_SUCCEED();
}
Result UnmapProcessCodeMemory(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
/* Validate the address/size. */
R_UNLESS(util::IsAligned(dst_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(src_address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((dst_address < dst_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS((src_address < src_address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(src_address == static_cast<uintptr_t>(src_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(dst_address == static_cast<uintptr_t>(dst_address), svc::ResultInvalidCurrentMemory());
R_UNLESS(size == static_cast<size_t>(size), svc::ResultInvalidCurrentMemory());
/* Get the process from its handle. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObjectWithoutPseudoHandle<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Validate that the mapping is in range. */
auto &page_table = process->GetPageTable();
R_UNLESS(page_table.Contains(src_address, size), svc::ResultInvalidCurrentMemory());
R_UNLESS(page_table.CanContain(dst_address, size, KMemoryState_AliasCode), svc::ResultInvalidCurrentMemory());
/* Unmap the memory. */
R_TRY(page_table.UnmapCodeMemory(dst_address, src_address, size));
R_SUCCEED();
}
}
/* ============================= 64 ABI ============================= */
Result SetProcessMemoryPermission64(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
R_RETURN(SetProcessMemoryPermission(process_handle, address, size, perm));
}
Result MapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
R_RETURN(MapProcessMemory(dst_address, process_handle, src_address, size));
}
Result UnmapProcessMemory64(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
R_RETURN(UnmapProcessMemory(dst_address, process_handle, src_address, size));
}
Result MapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
R_RETURN(MapProcessCodeMemory(process_handle, dst_address, src_address, size));
}
Result UnmapProcessCodeMemory64(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
R_RETURN(UnmapProcessCodeMemory(process_handle, dst_address, src_address, size));
}
/* ============================= 64From32 ABI ============================= */
Result SetProcessMemoryPermission64From32(ams::svc::Handle process_handle, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
R_RETURN(SetProcessMemoryPermission(process_handle, address, size, perm));
}
Result MapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
R_RETURN(MapProcessMemory(dst_address, process_handle, src_address, size));
}
Result UnmapProcessMemory64From32(ams::svc::Address dst_address, ams::svc::Handle process_handle, uint64_t src_address, ams::svc::Size size) {
R_RETURN(UnmapProcessMemory(dst_address, process_handle, src_address, size));
}
Result MapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
R_RETURN(MapProcessCodeMemory(process_handle, dst_address, src_address, size));
}
Result UnmapProcessCodeMemory64From32(ams::svc::Handle process_handle, uint64_t dst_address, uint64_t src_address, uint64_t size) {
R_RETURN(UnmapProcessCodeMemory(process_handle, dst_address, src_address, size));
}
}
| 13,446
|
C++
|
.cpp
| 184
| 61.521739
| 146
| 0.61397
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,015
|
kern_svc_address_arbiter.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_address_arbiter.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr bool IsKernelAddress(uintptr_t address) {
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
}
constexpr bool IsValidSignalType(ams::svc::SignalType type) {
switch (type) {
case ams::svc::SignalType_Signal:
case ams::svc::SignalType_SignalAndIncrementIfEqual:
case ams::svc::SignalType_SignalAndModifyByWaitingCountIfEqual:
return true;
default:
return false;
}
}
constexpr bool IsValidArbitrationType(ams::svc::ArbitrationType type) {
switch (type) {
case ams::svc::ArbitrationType_WaitIfLessThan:
case ams::svc::ArbitrationType_DecrementAndWaitIfLessThan:
case ams::svc::ArbitrationType_WaitIfEqual:
case ams::svc::ArbitrationType_WaitIfEqual64:
return true;
default:
return false;
}
}
Result WaitForAddress(uintptr_t address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) {
/* Validate input. */
R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory());
if (arb_type == ams::svc::ArbitrationType_WaitIfEqual64) {
R_UNLESS(util::IsAligned(address, sizeof(int64_t)), svc::ResultInvalidAddress());
} else {
R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress());
}
R_UNLESS(IsValidArbitrationType(arb_type), svc::ResultInvalidEnumValue());
/* Convert timeout from nanoseconds to ticks. */
s64 timeout;
if (timeout_ns > 0) {
const ams::svc::Tick offset_tick(TimeSpan::FromNanoSeconds(timeout_ns));
if (AMS_LIKELY(offset_tick > 0)) {
timeout = KHardwareTimer::GetTick() + offset_tick + 2;
if (AMS_UNLIKELY(timeout <= 0)) {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = timeout_ns;
}
R_RETURN(GetCurrentProcess().WaitAddressArbiter(address, arb_type, value, timeout));
}
Result SignalToAddress(uintptr_t address, ams::svc::SignalType signal_type, int32_t value, int32_t count) {
/* Validate input. */
R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory());
R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress());
R_UNLESS(IsValidSignalType(signal_type), svc::ResultInvalidEnumValue());
R_RETURN(GetCurrentProcess().SignalAddressArbiter(address, signal_type, value, count));
}
}
/* ============================= 64 ABI ============================= */
Result WaitForAddress64(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) {
R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns));
}
Result SignalToAddress64(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) {
R_RETURN(SignalToAddress(address, signal_type, value, count));
}
/* ============================= 64From32 ABI ============================= */
Result WaitForAddress64From32(ams::svc::Address address, ams::svc::ArbitrationType arb_type, int64_t value, int64_t timeout_ns) {
R_RETURN(WaitForAddress(address, arb_type, value, timeout_ns));
}
Result SignalToAddress64From32(ams::svc::Address address, ams::svc::SignalType signal_type, int32_t value, int32_t count) {
R_RETURN(SignalToAddress(address, signal_type, value, count));
}
}
| 4,848
|
C++
|
.cpp
| 92
| 41.902174
| 133
| 0.595736
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,016
|
kern_svc_query_memory.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_query_memory.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
Result QueryProcessMemory(ams::svc::MemoryInfo *out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uintptr_t address) {
/* Get the process. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Query the mapping's info. */
KMemoryInfo info;
R_TRY(process->GetPageTable().QueryInfo(std::addressof(info), out_page_info, address));
/* Write output. */
*out_memory_info = info.GetSvcMemoryInfo();
R_SUCCEED();
}
template<typename T>
Result QueryProcessMemory(KUserPointer<T *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) {
/* Get an ams::svc::MemoryInfo for the region. */
ams::svc::MemoryInfo info = {};
R_TRY(QueryProcessMemory(std::addressof(info), out_page_info, process_handle, address));
/* Copy the info to userspace. */
if constexpr (std::same_as<T, ams::svc::MemoryInfo>) {
R_TRY(out_memory_info.CopyFrom(std::addressof(info)));
} else {
/* Convert the info. */
T converted_info = {};
static_assert(std::same_as<decltype(T{}.base_address), decltype(ams::svc::MemoryInfo{}.base_address)>);
static_assert(std::same_as<decltype(T{}.size), decltype(ams::svc::MemoryInfo{}.size)>);
converted_info.base_address = info.base_address;
converted_info.size = info.size;
converted_info.state = info.state;
converted_info.attribute = info.attribute;
converted_info.permission = info.permission;
converted_info.ipc_count = info.ipc_count;
converted_info.device_count = info.device_count;
/* Copy it. */
R_TRY(out_memory_info.CopyFrom(std::addressof(converted_info)));
}
R_SUCCEED();
}
template<typename T>
Result QueryMemory(KUserPointer<T *> out_memory_info, ams::svc::PageInfo *out_page_info, uintptr_t address) {
/* Query memory is just QueryProcessMemory on the current process. */
R_RETURN(QueryProcessMemory(out_memory_info, out_page_info, ams::svc::PseudoHandle::CurrentProcess, address));
}
}
/* ============================= 64 ABI ============================= */
Result QueryMemory64(KUserPointer<ams::svc::lp64::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) {
R_RETURN(QueryMemory(out_memory_info, out_page_info, address));
}
Result QueryProcessMemory64(KUserPointer<ams::svc::lp64::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) {
R_RETURN(QueryProcessMemory(out_memory_info, out_page_info, process_handle, address));
}
/* ============================= 64From32 ABI ============================= */
Result QueryMemory64From32(KUserPointer<ams::svc::ilp32::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Address address) {
R_RETURN(QueryMemory(out_memory_info, out_page_info, address));
}
Result QueryProcessMemory64From32(KUserPointer<ams::svc::ilp32::MemoryInfo *> out_memory_info, ams::svc::PageInfo *out_page_info, ams::svc::Handle process_handle, uint64_t address) {
R_RETURN(QueryProcessMemory(out_memory_info, out_page_info, process_handle, address));
}
}
| 4,547
|
C++
|
.cpp
| 76
| 50.473684
| 186
| 0.619101
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,017
|
kern_svc_code_memory.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_code_memory.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr bool IsValidMapCodeMemoryPermission(ams::svc::MemoryPermission perm) {
return perm == ams::svc::MemoryPermission_ReadWrite;
}
constexpr bool IsValidMapToOwnerCodeMemoryPermission(ams::svc::MemoryPermission perm) {
return perm == ams::svc::MemoryPermission_Read || perm == ams::svc::MemoryPermission_ReadExecute;
}
constexpr bool IsValidUnmapCodeMemoryPermission(ams::svc::MemoryPermission perm) {
return perm == ams::svc::MemoryPermission_None;
}
constexpr bool IsValidUnmapFromOwnerCodeMemoryPermission(ams::svc::MemoryPermission perm) {
return perm == ams::svc::MemoryPermission_None;
}
Result CreateCodeMemory(ams::svc::Handle *out, uintptr_t address, size_t size) {
/* Validate address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Create the code memory. */
KCodeMemory *code_mem = KCodeMemory::Create();
R_UNLESS(code_mem != nullptr, svc::ResultOutOfResource());
ON_SCOPE_EXIT { code_mem->Close(); };
/* Verify that the region is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().Contains(address, size), svc::ResultInvalidCurrentMemory());
/* Initialize the code memory. */
R_TRY(code_mem->Initialize(address, size));
/* Register the code memory. */
KCodeMemory::Register(code_mem);
/* Add the code memory to the handle table. */
R_TRY(GetCurrentProcess().GetHandleTable().Add(out, code_mem));
R_SUCCEED();
}
Result ControlCodeMemory(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
/* Validate the address / size. */
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
R_UNLESS(address == static_cast<uintptr_t>(address), svc::ResultInvalidCurrentMemory());
R_UNLESS(size == static_cast<size_t>(size), svc::ResultInvalidCurrentMemory());
/* Get the code memory from its handle. */
KScopedAutoObject code_mem = GetCurrentProcess().GetHandleTable().GetObject<KCodeMemory>(code_memory_handle);
R_UNLESS(code_mem.IsNotNull(), svc::ResultInvalidHandle());
/* NOTE: Here, Atmosphere extends the SVC to allow code memory operations on one's own process. */
/* This enables homebrew usage of these SVCs for JIT. */
/* R_UNLESS(code_mem->GetOwner() != GetCurrentProcessPointer(), svc::ResultInvalidHandle()); */
/* Perform the operation. */
switch (operation) {
case ams::svc::CodeMemoryOperation_Map:
{
/* Check that the region is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_CodeOut), svc::ResultInvalidMemoryRegion());
/* Check the memory permission. */
R_UNLESS(IsValidMapCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission());
/* Map the memory. */
R_TRY(code_mem->Map(address, size));
}
break;
case ams::svc::CodeMemoryOperation_Unmap:
{
/* Check that the region is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, KMemoryState_CodeOut), svc::ResultInvalidMemoryRegion());
/* Check the memory permission. */
R_UNLESS(IsValidUnmapCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission());
/* Unmap the memory. */
R_TRY(code_mem->Unmap(address, size));
}
break;
case ams::svc::CodeMemoryOperation_MapToOwner:
{
/* Check that the region is in range. */
R_UNLESS(code_mem->GetOwner()->GetPageTable().CanContain(address, size, KMemoryState_GeneratedCode), svc::ResultInvalidMemoryRegion());
/* Check the memory permission. */
R_UNLESS(IsValidMapToOwnerCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission());
/* Map the memory to its owner. */
R_TRY(code_mem->MapToOwner(address, size, perm));
}
break;
case ams::svc::CodeMemoryOperation_UnmapFromOwner:
{
/* Check that the region is in range. */
R_UNLESS(code_mem->GetOwner()->GetPageTable().CanContain(address, size, KMemoryState_GeneratedCode), svc::ResultInvalidMemoryRegion());
/* Check the memory permission. */
R_UNLESS(IsValidUnmapFromOwnerCodeMemoryPermission(perm), svc::ResultInvalidNewMemoryPermission());
/* Unmap the memory from its owner. */
R_TRY(code_mem->UnmapFromOwner(address, size));
}
break;
default:
R_THROW(svc::ResultInvalidEnumValue());
}
R_SUCCEED();
}
}
/* ============================= 64 ABI ============================= */
Result CreateCodeMemory64(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(CreateCodeMemory(out_handle, address, size));
}
Result ControlCodeMemory64(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
R_RETURN(ControlCodeMemory(code_memory_handle, operation, address, size, perm));
}
/* ============================= 64From32 ABI ============================= */
Result CreateCodeMemory64From32(ams::svc::Handle *out_handle, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(CreateCodeMemory(out_handle, address, size));
}
Result ControlCodeMemory64From32(ams::svc::Handle code_memory_handle, ams::svc::CodeMemoryOperation operation, uint64_t address, uint64_t size, ams::svc::MemoryPermission perm) {
R_RETURN(ControlCodeMemory(code_memory_handle, operation, address, size, perm));
}
}
| 8,014
|
C++
|
.cpp
| 128
| 49.007813
| 182
| 0.586677
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,018
|
kern_svc_condition_variable.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_condition_variable.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr bool IsKernelAddress(uintptr_t address) {
return KernelVirtualAddressSpaceBase <= address && address < KernelVirtualAddressSpaceEnd;
}
Result WaitProcessWideKeyAtomic(uintptr_t address, uintptr_t cv_key, uint32_t tag, int64_t timeout_ns) {
/* Validate input. */
R_UNLESS(AMS_LIKELY(!IsKernelAddress(address)), svc::ResultInvalidCurrentMemory());
R_UNLESS(util::IsAligned(address, sizeof(int32_t)), svc::ResultInvalidAddress());
/* Convert timeout from nanoseconds to ticks. */
s64 timeout;
if (timeout_ns > 0) {
const ams::svc::Tick offset_tick(TimeSpan::FromNanoSeconds(timeout_ns));
if (AMS_LIKELY(offset_tick > 0)) {
timeout = KHardwareTimer::GetTick() + offset_tick + 2;
if (AMS_UNLIKELY(timeout <= 0)) {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = std::numeric_limits<s64>::max();
}
} else {
timeout = timeout_ns;
}
/* Wait on the condition variable. */
R_RETURN(GetCurrentProcess().WaitConditionVariable(address, util::AlignDown(cv_key, sizeof(u32)), tag, timeout));
}
void SignalProcessWideKey(uintptr_t cv_key, int32_t count) {
/* Signal the condition variable. */
return GetCurrentProcess().SignalConditionVariable(util::AlignDown(cv_key, sizeof(u32)), count);
}
}
/* ============================= 64 ABI ============================= */
Result WaitProcessWideKeyAtomic64(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) {
R_RETURN(WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns));
}
void SignalProcessWideKey64(ams::svc::Address cv_key, int32_t count) {
return SignalProcessWideKey(cv_key, count);
}
/* ============================= 64From32 ABI ============================= */
Result WaitProcessWideKeyAtomic64From32(ams::svc::Address address, ams::svc::Address cv_key, uint32_t tag, int64_t timeout_ns) {
R_RETURN(WaitProcessWideKeyAtomic(address, cv_key, tag, timeout_ns));
}
void SignalProcessWideKey64From32(ams::svc::Address cv_key, int32_t count) {
return SignalProcessWideKey(cv_key, count);
}
}
| 3,271
|
C++
|
.cpp
| 64
| 42.265625
| 132
| 0.602946
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,019
|
kern_svc_activity.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_activity.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr bool IsValidThreadActivity(ams::svc::ThreadActivity thread_activity) {
switch (thread_activity) {
case ams::svc::ThreadActivity_Runnable:
case ams::svc::ThreadActivity_Paused:
return true;
default:
return false;
}
}
constexpr bool IsValidProcessActivity(ams::svc::ProcessActivity process_activity) {
switch (process_activity) {
case ams::svc::ProcessActivity_Runnable:
case ams::svc::ProcessActivity_Paused:
return true;
default:
return false;
}
}
Result SetThreadActivity(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) {
/* Validate the activity. */
R_UNLESS(IsValidThreadActivity(thread_activity), svc::ResultInvalidEnumValue());
/* Get the thread from its handle. */
KScopedAutoObject thread = GetCurrentProcess().GetHandleTable().GetObject<KThread>(thread_handle);
R_UNLESS(thread.IsNotNull(), svc::ResultInvalidHandle());
/* Check that the activity is being set on a non-current thread for the current process. */
R_UNLESS(thread->GetOwnerProcess() == GetCurrentProcessPointer(), svc::ResultInvalidHandle());
R_UNLESS(thread.GetPointerUnsafe() != GetCurrentThreadPointer(), svc::ResultBusy());
/* Set the activity. */
R_TRY(thread->SetActivity(thread_activity));
R_SUCCEED();
}
Result SetProcessActivity(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) {
/* Validate the activity. */
R_UNLESS(IsValidProcessActivity(process_activity), svc::ResultInvalidEnumValue());
/* Get the process from its handle. */
KScopedAutoObject process = GetCurrentProcess().GetHandleTable().GetObject<KProcess>(process_handle);
R_UNLESS(process.IsNotNull(), svc::ResultInvalidHandle());
/* Check that the activity isn't being set on the current process. */
R_UNLESS(process.GetPointerUnsafe() != GetCurrentProcessPointer(), svc::ResultBusy());
/* Set the activity. */
R_TRY(process->SetActivity(process_activity));
R_SUCCEED();
}
}
/* ============================= 64 ABI ============================= */
Result SetThreadActivity64(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) {
R_RETURN(SetThreadActivity(thread_handle, thread_activity));
}
Result SetProcessActivity64(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) {
R_RETURN(SetProcessActivity(process_handle, process_activity));
}
/* ============================= 64From32 ABI ============================= */
Result SetThreadActivity64From32(ams::svc::Handle thread_handle, ams::svc::ThreadActivity thread_activity) {
R_RETURN(SetThreadActivity(thread_handle, thread_activity));
}
Result SetProcessActivity64From32(ams::svc::Handle process_handle, ams::svc::ProcessActivity process_activity) {
R_RETURN(SetProcessActivity(process_handle, process_activity));
}
}
| 4,159
|
C++
|
.cpp
| 78
| 43.846154
| 116
| 0.627896
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,020
|
kern_svc_resource_limit.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_resource_limit.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
constexpr bool IsValidLimitableResource(ams::svc::LimitableResource which) {
return which < ams::svc::LimitableResource_Count;
}
Result GetResourceLimitLimitValue(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
/* Validate the resource. */
R_UNLESS(IsValidLimitableResource(which), svc::ResultInvalidEnumValue());
/* Get the resource limit. */
KScopedAutoObject resource_limit = GetCurrentProcess().GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
R_UNLESS(resource_limit.IsNotNull(), svc::ResultInvalidHandle());
/* Get the limit value. */
*out_limit_value = resource_limit->GetLimitValue(which);
R_SUCCEED();
}
Result GetResourceLimitCurrentValue(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
/* Validate the resource. */
R_UNLESS(IsValidLimitableResource(which), svc::ResultInvalidEnumValue());
/* Get the resource limit. */
KScopedAutoObject resource_limit = GetCurrentProcess().GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
R_UNLESS(resource_limit.IsNotNull(), svc::ResultInvalidHandle());
/* Get the current value. */
*out_current_value = resource_limit->GetCurrentValue(which);
R_SUCCEED();
}
Result GetResourceLimitPeakValue(int64_t *out_peak_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
/* Validate the resource. */
R_UNLESS(IsValidLimitableResource(which), svc::ResultInvalidEnumValue());
/* Get the resource limit. */
KScopedAutoObject resource_limit = GetCurrentProcess().GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
R_UNLESS(resource_limit.IsNotNull(), svc::ResultInvalidHandle());
/* Get the peak value. */
*out_peak_value = resource_limit->GetPeakValue(which);
R_SUCCEED();
}
Result CreateResourceLimit(ams::svc::Handle *out_handle) {
/* Create a new resource limit. */
KResourceLimit *resource_limit = KResourceLimit::Create();
R_UNLESS(resource_limit != nullptr, svc::ResultOutOfResource());
/* Ensure we don't leak a reference to the limit. */
ON_SCOPE_EXIT { resource_limit->Close(); };
/* Initialize the resource limit. */
resource_limit->Initialize();
/* Register the limit. */
KResourceLimit::Register(resource_limit);
/* Add the limit to the handle table. */
R_TRY(GetCurrentProcess().GetHandleTable().Add(out_handle, resource_limit));
R_SUCCEED();
}
Result SetResourceLimitLimitValue(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) {
/* Validate the resource. */
R_UNLESS(IsValidLimitableResource(which), svc::ResultInvalidEnumValue());
/* Get the resource limit. */
KScopedAutoObject resource_limit = GetCurrentProcess().GetHandleTable().GetObject<KResourceLimit>(resource_limit_handle);
R_UNLESS(resource_limit.IsNotNull(), svc::ResultInvalidHandle());
/* Set the limit value. */
R_TRY(resource_limit->SetLimitValue(which, limit_value));
R_SUCCEED();
}
}
/* ============================= 64 ABI ============================= */
Result GetResourceLimitLimitValue64(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
R_RETURN(GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which));
}
Result GetResourceLimitCurrentValue64(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
R_RETURN(GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which));
}
Result GetResourceLimitPeakValue64(int64_t *out_peak_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
R_RETURN(GetResourceLimitPeakValue(out_peak_value, resource_limit_handle, which));
}
Result CreateResourceLimit64(ams::svc::Handle *out_handle) {
R_RETURN(CreateResourceLimit(out_handle));
}
Result SetResourceLimitLimitValue64(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) {
R_RETURN(SetResourceLimitLimitValue(resource_limit_handle, which, limit_value));
}
/* ============================= 64From32 ABI ============================= */
Result GetResourceLimitLimitValue64From32(int64_t *out_limit_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
R_RETURN(GetResourceLimitLimitValue(out_limit_value, resource_limit_handle, which));
}
Result GetResourceLimitCurrentValue64From32(int64_t *out_current_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
R_RETURN(GetResourceLimitCurrentValue(out_current_value, resource_limit_handle, which));
}
Result GetResourceLimitPeakValue64From32(int64_t *out_peak_value, ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which) {
R_RETURN(GetResourceLimitPeakValue(out_peak_value, resource_limit_handle, which));
}
Result CreateResourceLimit64From32(ams::svc::Handle *out_handle) {
R_RETURN(CreateResourceLimit(out_handle));
}
Result SetResourceLimitLimitValue64From32(ams::svc::Handle resource_limit_handle, ams::svc::LimitableResource which, int64_t limit_value) {
R_RETURN(SetResourceLimitLimitValue(resource_limit_handle, which, limit_value));
}
}
| 6,778
|
C++
|
.cpp
| 110
| 52.709091
| 152
| 0.670338
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,021
|
kern_svc_light_ipc.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_light_ipc.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
ALWAYS_INLINE Result SendSyncRequestLight(ams::svc::Handle session_handle, u32 *args) {
/* Get the light client session from its handle. */
KScopedAutoObject session = GetCurrentProcess().GetHandleTable().GetObject<KLightClientSession>(session_handle);
R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle());
/* Send the request. */
R_TRY(session->SendSyncRequest(args));
R_SUCCEED();
}
ALWAYS_INLINE Result ReplyAndReceiveLight(ams::svc::Handle session_handle, u32 *args) {
/* Get the light server session from its handle. */
KScopedAutoObject session = GetCurrentProcess().GetHandleTable().GetObject<KLightServerSession>(session_handle);
R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle());
/* Handle the request. */
R_TRY(session->ReplyAndReceive(args));
R_SUCCEED();
}
}
/* ============================= 64 ABI ============================= */
Result SendSyncRequestLight64(ams::svc::Handle session_handle, u32 *args) {
R_RETURN(SendSyncRequestLight(session_handle, args));
}
Result ReplyAndReceiveLight64(ams::svc::Handle session_handle, u32 *args) {
R_RETURN(ReplyAndReceiveLight(session_handle, args));
}
/* ============================= 64From32 ABI ============================= */
Result SendSyncRequestLight64From32(ams::svc::Handle session_handle, u32 *args) {
R_RETURN(SendSyncRequestLight(session_handle, args));
}
Result ReplyAndReceiveLight64From32(ams::svc::Handle session_handle, u32 *args) {
R_RETURN(ReplyAndReceiveLight(session_handle, args));
}
}
| 2,540
|
C++
|
.cpp
| 51
| 43.098039
| 124
| 0.634709
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,022
|
kern_svc_processor.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_processor.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
int32_t GetCurrentProcessorNumber() {
/* Setup variables to track affinity information. */
s32 current_phys_core;
u64 v_affinity_mask = 0;
/* Forever try to get the affinity. */
while (true) {
/* Update affinity information if we've run out. */
while (v_affinity_mask == 0) {
current_phys_core = GetCurrentCoreId();
v_affinity_mask = GetCurrentThread().GetVirtualAffinityMask();
if ((v_affinity_mask & (1ul << current_phys_core)) != 0) {
return current_phys_core;
}
}
/* Check the next virtual bit. */
do {
const s32 next_virt_core = static_cast<s32>(__builtin_ctzll(v_affinity_mask));
if (current_phys_core == cpu::VirtualToPhysicalCoreMap[next_virt_core]) {
return next_virt_core;
}
v_affinity_mask &= ~(1ul << next_virt_core);
} while (v_affinity_mask != 0);
}
}
}
/* ============================= 64 ABI ============================= */
int32_t GetCurrentProcessorNumber64() {
return GetCurrentProcessorNumber();
}
/* ============================= 64From32 ABI ============================= */
int32_t GetCurrentProcessorNumber64From32() {
return GetCurrentProcessorNumber();
}
}
| 2,330
|
C++
|
.cpp
| 53
| 34.075472
| 98
| 0.534452
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
7,023
|
kern_svc_io_pool.cpp
|
Atmosphere-NX_Atmosphere/libraries/libmesosphere/source/svc/kern_svc_io_pool.cpp
|
/*
* Copyright (c) Atmosphère-NX
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <mesosphere.hpp>
namespace ams::kern::svc {
/* ============================= Common ============================= */
namespace {
#if defined(AMS_SVC_IO_POOL_NOT_SUPPORTED)
constexpr bool IsIoPoolApiSupported = false;
#else
constexpr bool IsIoPoolApiSupported = true;
#endif
[[maybe_unused]] constexpr bool IsValidIoRegionMapping(ams::svc::MemoryMapping mapping) {
switch (mapping) {
case ams::svc::MemoryMapping_IoRegister:
case ams::svc::MemoryMapping_Uncached:
case ams::svc::MemoryMapping_Memory:
return true;
default:
return false;
}
}
[[maybe_unused]] constexpr bool IsValidIoRegionPermission(ams::svc::MemoryPermission perm) {
switch (perm) {
case ams::svc::MemoryPermission_Read:
case ams::svc::MemoryPermission_ReadWrite:
return true;
default:
return false;
}
}
Result CreateIoPool(ams::svc::Handle *out, ams::svc::IoPoolType pool_type) {
if constexpr (IsIoPoolApiSupported) {
/* Validate that we're allowed to create a pool for the given type. */
R_UNLESS(KIoPool::IsValidIoPoolType(pool_type), svc::ResultNotFound());
/* Create the io pool. */
KIoPool *io_pool = KIoPool::Create();
R_UNLESS(io_pool != nullptr, svc::ResultOutOfResource());
/* Ensure the only reference is in the handle table when we're done. */
ON_SCOPE_EXIT { io_pool->Close(); };
/* Initialize the io pool. */
R_TRY(io_pool->Initialize(pool_type));
/* Register the io pool. */
KIoPool::Register(io_pool);
/* Add the io pool to the handle table. */
R_TRY(GetCurrentProcess().GetHandleTable().Add(out, io_pool));
R_SUCCEED();
} else {
MESOSPHERE_UNUSED(out, pool_type);
R_THROW(svc::ResultNotImplemented());
}
}
Result CreateIoRegion(ams::svc::Handle *out, ams::svc::Handle io_pool_handle, uint64_t phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
if constexpr (IsIoPoolApiSupported) {
/* Validate the address/size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(util::IsAligned(phys_addr, PageSize), svc::ResultInvalidAddress());
R_UNLESS((phys_addr < phys_addr + size), svc::ResultInvalidMemoryRegion());
/* Validate the mapping/permissions. */
R_UNLESS(IsValidIoRegionMapping(mapping), svc::ResultInvalidEnumValue());
R_UNLESS(IsValidIoRegionPermission(perm), svc::ResultInvalidEnumValue());
/* Get the current handle table. */
auto &handle_table = GetCurrentProcess().GetHandleTable();
/* Get the io pool. */
KScopedAutoObject io_pool = handle_table.GetObject<KIoPool>(io_pool_handle);
R_UNLESS(io_pool.IsNotNull(), svc::ResultInvalidHandle());
/* Create the io region. */
KIoRegion *io_region = KIoRegion::Create();
R_UNLESS(io_region != nullptr, svc::ResultOutOfResource());
/* Ensure the only reference is in the handle table when we're done. */
ON_SCOPE_EXIT { io_region->Close(); };
/* Initialize the io region. */
R_TRY(io_region->Initialize(io_pool.GetPointerUnsafe(), phys_addr, size, mapping, perm));
/* Register the io region. */
KIoRegion::Register(io_region);
/* Add the io region to the handle table. */
R_TRY(handle_table.Add(out, io_region));
R_SUCCEED();
} else {
MESOSPHERE_UNUSED(out, io_pool_handle, phys_addr, size, mapping, perm);
R_THROW(svc::ResultNotImplemented());
}
}
Result MapIoRegion(ams::svc::Handle io_region_handle, uintptr_t address, size_t size, ams::svc::MemoryPermission map_perm) {
if constexpr (IsIoPoolApiSupported) {
/* Validate the address/size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Verify that the mapping is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, ams::svc::MemoryState_Io), svc::ResultInvalidMemoryRegion());
/* Validate the map permission. */
R_UNLESS(IsValidIoRegionPermission(map_perm), svc::ResultInvalidNewMemoryPermission());
/* Get the io region. */
KScopedAutoObject io_region = GetCurrentProcess().GetHandleTable().GetObject<KIoRegion>(io_region_handle);
R_UNLESS(io_region.IsNotNull(), svc::ResultInvalidHandle());
/* Map the io region. */
R_TRY(io_region->Map(address, size, map_perm));
/* We succeeded. */
R_SUCCEED();
} else {
MESOSPHERE_UNUSED(io_region_handle, address, size, map_perm);
R_THROW(svc::ResultNotImplemented());
}
}
Result UnmapIoRegion(ams::svc::Handle io_region_handle, uintptr_t address, size_t size) {
if constexpr (IsIoPoolApiSupported) {
/* Validate the address/size. */
R_UNLESS(size > 0, svc::ResultInvalidSize());
R_UNLESS(util::IsAligned(size, PageSize), svc::ResultInvalidSize());
R_UNLESS(util::IsAligned(address, PageSize), svc::ResultInvalidAddress());
R_UNLESS((address < address + size), svc::ResultInvalidCurrentMemory());
/* Verify that the mapping is in range. */
R_UNLESS(GetCurrentProcess().GetPageTable().CanContain(address, size, ams::svc::MemoryState_Io), svc::ResultInvalidMemoryRegion());
/* Get the io region. */
KScopedAutoObject io_region = GetCurrentProcess().GetHandleTable().GetObject<KIoRegion>(io_region_handle);
R_UNLESS(io_region.IsNotNull(), svc::ResultInvalidHandle());
/* Unmap the io region. */
R_TRY(io_region->Unmap(address, size));
/* We succeeded. */
R_SUCCEED();
} else {
MESOSPHERE_UNUSED(io_region_handle, address, size);
R_THROW(svc::ResultNotImplemented());
}
}
}
/* ============================= 64 ABI ============================= */
Result CreateIoPool64(ams::svc::Handle *out_handle, ams::svc::IoPoolType pool_type) {
R_RETURN(CreateIoPool(out_handle, pool_type));
}
Result CreateIoRegion64(ams::svc::Handle *out_handle, ams::svc::Handle io_pool, ams::svc::PhysicalAddress physical_address, ams::svc::Size size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
R_RETURN(CreateIoRegion(out_handle, io_pool, physical_address, size, mapping, perm));
}
Result MapIoRegion64(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) {
R_RETURN(MapIoRegion(io_region, address, size, perm));
}
Result UnmapIoRegion64(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(UnmapIoRegion(io_region, address, size));
}
/* ============================= 64From32 ABI ============================= */
Result CreateIoPool64From32(ams::svc::Handle *out_handle, ams::svc::IoPoolType pool_type) {
R_RETURN(CreateIoPool(out_handle, pool_type));
}
Result CreateIoRegion64From32(ams::svc::Handle *out_handle, ams::svc::Handle io_pool, ams::svc::PhysicalAddress physical_address, ams::svc::Size size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission perm) {
R_RETURN(CreateIoRegion(out_handle, io_pool, physical_address, size, mapping, perm));
}
Result MapIoRegion64From32(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission perm) {
R_RETURN(MapIoRegion(io_region, address, size, perm));
}
Result UnmapIoRegion64From32(ams::svc::Handle io_region, ams::svc::Address address, ams::svc::Size size) {
R_RETURN(UnmapIoRegion(io_region, address, size));
}
}
| 9,823
|
C++
|
.cpp
| 168
| 45.755952
| 222
| 0.582683
|
Atmosphere-NX/Atmosphere
| 14,324
| 1,207
| 54
|
GPL-2.0
|
9/20/2024, 9:26:25 PM (Europe/Amsterdam)
| false
| false
| false
| false
| false
| false
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.