aboutsummaryrefslogtreecommitdiff
path: root/src/core/hle/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'src/core/hle/kernel')
-rw-r--r--src/core/hle/kernel/event.cpp3
-rw-r--r--src/core/hle/kernel/kernel.cpp22
-rw-r--r--src/core/hle/kernel/kernel.h6
-rw-r--r--src/core/hle/kernel/mutex.cpp9
-rw-r--r--src/core/hle/kernel/resource_limit.h8
-rw-r--r--src/core/hle/kernel/semaphore.cpp10
-rw-r--r--src/core/hle/kernel/thread.cpp84
-rw-r--r--src/core/hle/kernel/thread.h14
-rw-r--r--src/core/hle/kernel/timer.cpp2
-rw-r--r--src/core/hle/kernel/vm_manager.cpp245
-rw-r--r--src/core/hle/kernel/vm_manager.h200
11 files changed, 492 insertions, 111 deletions
diff --git a/src/core/hle/kernel/event.cpp b/src/core/hle/kernel/event.cpp
index e45deb1c6..f338f3266 100644
--- a/src/core/hle/kernel/event.cpp
+++ b/src/core/hle/kernel/event.cpp
@@ -41,10 +41,7 @@ void Event::Acquire() {
void Event::Signal() {
signaled = true;
-
WakeupAllWaitingThreads();
-
- HLE::Reschedule(__func__);
}
void Event::Clear() {
diff --git a/src/core/hle/kernel/kernel.cpp b/src/core/hle/kernel/kernel.cpp
index 726e4d2ff..20e11da16 100644
--- a/src/core/hle/kernel/kernel.cpp
+++ b/src/core/hle/kernel/kernel.cpp
@@ -32,27 +32,13 @@ void WaitObject::RemoveWaitingThread(Thread* thread) {
waiting_threads.erase(itr);
}
-SharedPtr<Thread> WaitObject::WakeupNextThread() {
- if (waiting_threads.empty())
- return nullptr;
-
- auto next_thread = std::move(waiting_threads.front());
- waiting_threads.erase(waiting_threads.begin());
-
- next_thread->ReleaseWaitObject(this);
-
- return next_thread;
-}
-
void WaitObject::WakeupAllWaitingThreads() {
- auto waiting_threads_copy = waiting_threads;
+ for (auto thread : waiting_threads)
+ thread->ResumeFromWait();
- // We use a copy because ReleaseWaitObject will remove the thread from this object's
- // waiting_threads list
- for (auto thread : waiting_threads_copy)
- thread->ReleaseWaitObject(this);
+ waiting_threads.clear();
- ASSERT_MSG(waiting_threads.empty(), "failed to awaken all waiting threads!");
+ HLE::Reschedule(__func__);
}
HandleTable::HandleTable() {
diff --git a/src/core/hle/kernel/kernel.h b/src/core/hle/kernel/kernel.h
index a5a0f4800..64595f758 100644
--- a/src/core/hle/kernel/kernel.h
+++ b/src/core/hle/kernel/kernel.h
@@ -140,12 +140,6 @@ public:
*/
void RemoveWaitingThread(Thread* thread);
- /**
- * Wake up the next thread waiting on this object
- * @return Pointer to the thread that was resumed, nullptr if no threads are waiting
- */
- SharedPtr<Thread> WakeupNextThread();
-
/// Wake up all threads waiting on this object
void WakeupAllWaitingThreads();
diff --git a/src/core/hle/kernel/mutex.cpp b/src/core/hle/kernel/mutex.cpp
index 6aa73df86..edb97d324 100644
--- a/src/core/hle/kernel/mutex.cpp
+++ b/src/core/hle/kernel/mutex.cpp
@@ -23,12 +23,7 @@ static void ResumeWaitingThread(Mutex* mutex) {
// Reset mutex lock thread handle, nothing is waiting
mutex->lock_count = 0;
mutex->holding_thread = nullptr;
-
- // Find the next waiting thread for the mutex...
- auto next_thread = mutex->WakeupNextThread();
- if (next_thread != nullptr) {
- mutex->Acquire(next_thread);
- }
+ mutex->WakeupAllWaitingThreads();
}
void ReleaseThreadMutexes(Thread* thread) {
@@ -94,8 +89,6 @@ void Mutex::Release() {
ResumeWaitingThread(this);
}
}
-
- HLE::Reschedule(__func__);
}
} // namespace
diff --git a/src/core/hle/kernel/resource_limit.h b/src/core/hle/kernel/resource_limit.h
index 201ec0db9..1b8249c74 100644
--- a/src/core/hle/kernel/resource_limit.h
+++ b/src/core/hle/kernel/resource_limit.h
@@ -81,13 +81,13 @@ public:
s32 max_timers = 0;
s32 max_shared_mems = 0;
s32 max_address_arbiters = 0;
-
+
/// Max CPU time that the processes in this category can utilize
s32 max_cpu_time = 0;
- // TODO(Subv): Increment these in their respective Kernel::T::Create functions, keeping in mind that
- // APPLICATION resource limits should not be affected by the objects created by service modules.
- // Currently we have no way of distinguishing if a Create was called by the running application,
+ // TODO(Subv): Increment these in their respective Kernel::T::Create functions, keeping in mind that
+ // APPLICATION resource limits should not be affected by the objects created by service modules.
+ // Currently we have no way of distinguishing if a Create was called by the running application,
// or by a service module. Approach this once we have separated the service modules into their own processes
/// Current memory that the processes in this category are using
diff --git a/src/core/hle/kernel/semaphore.cpp b/src/core/hle/kernel/semaphore.cpp
index dbb4c9b7f..4b359ed07 100644
--- a/src/core/hle/kernel/semaphore.cpp
+++ b/src/core/hle/kernel/semaphore.cpp
@@ -42,19 +42,13 @@ void Semaphore::Acquire() {
ResultVal<s32> Semaphore::Release(s32 release_count) {
if (max_count - available_count < release_count)
- return ResultCode(ErrorDescription::OutOfRange, ErrorModule::Kernel,
+ return ResultCode(ErrorDescription::OutOfRange, ErrorModule::Kernel,
ErrorSummary::InvalidArgument, ErrorLevel::Permanent);
s32 previous_count = available_count;
available_count += release_count;
- // Notify some of the threads that the semaphore has been released
- // stop once the semaphore is full again or there are no more waiting threads
- while (!ShouldWait() && WakeupNextThread() != nullptr) {
- Acquire();
- }
-
- HLE::Reschedule(__func__);
+ WakeupAllWaitingThreads();
return MakeResult<s32>(previous_count);
}
diff --git a/src/core/hle/kernel/thread.cpp b/src/core/hle/kernel/thread.cpp
index 690d33b55..4729a7fe0 100644
--- a/src/core/hle/kernel/thread.cpp
+++ b/src/core/hle/kernel/thread.cpp
@@ -13,6 +13,7 @@
#include "common/thread_queue_list.h"
#include "core/arm/arm_interface.h"
+#include "core/arm/skyeye_common/armdefs.h"
#include "core/core.h"
#include "core/core_timing.h"
#include "core/hle/hle.h"
@@ -100,7 +101,7 @@ void Thread::Stop() {
}
status = THREADSTATUS_DEAD;
-
+
WakeupAllWaitingThreads();
// Clean up any dangling references in objects that this thread was waiting for
@@ -169,7 +170,7 @@ static void PriorityBoostStarvedThreads() {
}
}
-/**
+/**
* Switches the CPU's active thread context to that of the specified thread
* @param new_thread The thread to switch to
*/
@@ -193,8 +194,22 @@ static void SwitchContext(Thread* new_thread) {
if (new_thread) {
DEBUG_ASSERT_MSG(new_thread->status == THREADSTATUS_READY, "Thread must be ready to become running.");
+ // Cancel any outstanding wakeup events for this thread
+ CoreTiming::UnscheduleEvent(ThreadWakeupEventType, new_thread->callback_handle);
+
current_thread = new_thread;
+ // If the thread was waited by a svcWaitSynch call, step back PC by one instruction to rerun
+ // the SVC when the thread wakes up. This is necessary to ensure that the thread can acquire
+ // the requested wait object(s) before continuing.
+ if (new_thread->waitsynch_waited) {
+ // CPSR flag indicates CPU mode
+ bool thumb_mode = (new_thread->context.cpsr & TBIT) != 0;
+
+ // SVC instruction is 2 bytes for THUMB, 4 bytes for ARM
+ new_thread->context.pc -= thumb_mode ? 2 : 4;
+ }
+
ready_queue.remove(new_thread->current_priority, new_thread);
new_thread->status = THREADSTATUS_RUNNING;
@@ -243,6 +258,7 @@ void WaitCurrentThread_WaitSynchronization(std::vector<SharedPtr<WaitObject>> wa
thread->wait_set_output = wait_set_output;
thread->wait_all = wait_all;
thread->wait_objects = std::move(wait_objects);
+ thread->waitsynch_waited = true;
thread->status = THREADSTATUS_WAIT_SYNCH;
}
@@ -268,6 +284,8 @@ static void ThreadWakeupCallback(u64 thread_handle, int cycles_late) {
return;
}
+ thread->waitsynch_waited = false;
+
if (thread->status == THREADSTATUS_WAIT_SYNCH) {
thread->SetWaitSynchronizationResult(ResultCode(ErrorDescription::Timeout, ErrorModule::OS,
ErrorSummary::StatusChanged, ErrorLevel::Info));
@@ -288,63 +306,20 @@ void Thread::WakeAfterDelay(s64 nanoseconds) {
CoreTiming::ScheduleEvent(usToCycles(microseconds), ThreadWakeupEventType, callback_handle);
}
-void Thread::ReleaseWaitObject(WaitObject* wait_object) {
- if (status != THREADSTATUS_WAIT_SYNCH || wait_objects.empty()) {
- LOG_CRITICAL(Kernel, "thread is not waiting on any objects!");
- return;
- }
-
- // Remove this thread from the waiting object's thread list
- wait_object->RemoveWaitingThread(this);
-
- unsigned index = 0;
- bool wait_all_failed = false; // Will be set to true if any object is unavailable
-
- // Iterate through all waiting objects to check availability...
- for (auto itr = wait_objects.begin(); itr != wait_objects.end(); ++itr) {
- if ((*itr)->ShouldWait())
- wait_all_failed = true;
-
- // The output should be the last index of wait_object
- if (*itr == wait_object)
- index = itr - wait_objects.begin();
- }
-
- // If we are waiting on all objects...
- if (wait_all) {
- // Resume the thread only if all are available...
- if (!wait_all_failed) {
- SetWaitSynchronizationResult(RESULT_SUCCESS);
- SetWaitSynchronizationOutput(-1);
-
- ResumeFromWait();
- }
- } else {
- // Otherwise, resume
- SetWaitSynchronizationResult(RESULT_SUCCESS);
-
- if (wait_set_output)
- SetWaitSynchronizationOutput(index);
-
- ResumeFromWait();
- }
-}
-
void Thread::ResumeFromWait() {
- // Cancel any outstanding wakeup events for this thread
- CoreTiming::UnscheduleEvent(ThreadWakeupEventType, callback_handle);
-
switch (status) {
case THREADSTATUS_WAIT_SYNCH:
- // Remove this thread from all other WaitObjects
- for (auto wait_object : wait_objects)
- wait_object->RemoveWaitingThread(this);
- break;
case THREADSTATUS_WAIT_ARB:
case THREADSTATUS_WAIT_SLEEP:
break;
- case THREADSTATUS_RUNNING:
+
case THREADSTATUS_READY:
+ // If the thread is waiting on multiple wait objects, it might be awoken more than once
+ // before actually resuming. We can ignore subsequent wakeups if the thread status has
+ // already been set to THREADSTATUS_READY.
+ return;
+
+ case THREADSTATUS_RUNNING:
DEBUG_ASSERT_MSG(false, "Thread with object id %u has already resumed.", GetObjectId());
return;
case THREADSTATUS_DEAD:
@@ -353,7 +328,7 @@ void Thread::ResumeFromWait() {
GetObjectId());
return;
}
-
+
ready_queue.push_back(current_priority, this);
status = THREADSTATUS_READY;
}
@@ -415,6 +390,7 @@ ResultVal<SharedPtr<Thread>> Thread::Create(std::string name, VAddr entry_point,
thread->callback_handle = wakeup_callback_handle_table.Create(thread).MoveFrom();
thread->owner_process = g_current_process;
thread->tls_index = -1;
+ thread->waitsynch_waited = false;
// Find the next available TLS index, and mark it as used
auto& used_tls_slots = Kernel::g_current_process->used_tls_slots;
@@ -504,7 +480,7 @@ void Reschedule() {
} else if (next) {
LOG_TRACE(Kernel, "context switch idle -> %u", next->GetObjectId());
}
-
+
SwitchContext(next);
}
diff --git a/src/core/hle/kernel/thread.h b/src/core/hle/kernel/thread.h
index 389928178..b8160bb2c 100644
--- a/src/core/hle/kernel/thread.h
+++ b/src/core/hle/kernel/thread.h
@@ -94,12 +94,6 @@ public:
* @return The thread's ID
*/
u32 GetThreadId() const { return thread_id; }
-
- /**
- * Release an acquired wait object
- * @param wait_object WaitObject to release
- */
- void ReleaseWaitObject(WaitObject* wait_object);
/**
* Resumes a thread from waiting
@@ -152,6 +146,8 @@ public:
s32 tls_index; ///< Index of the Thread Local Storage of the thread
+ bool waitsynch_waited; ///< Set to true if the last svcWaitSynch call caused the thread to wait
+
/// Mutexes currently held by this thread, which will be released when it exits.
boost::container::flat_set<SharedPtr<Mutex>> held_mutexes;
@@ -163,12 +159,12 @@ public:
std::string name;
+ /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
+ Handle callback_handle;
+
private:
Thread();
~Thread() override;
-
- /// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
- Handle callback_handle;
};
/**
diff --git a/src/core/hle/kernel/timer.cpp b/src/core/hle/kernel/timer.cpp
index 25d066bf1..8aa4110a6 100644
--- a/src/core/hle/kernel/timer.cpp
+++ b/src/core/hle/kernel/timer.cpp
@@ -88,7 +88,7 @@ static void TimerCallback(u64 timer_handle, int cycles_late) {
if (timer->interval_delay != 0) {
// Reschedule the timer with the interval delay
u64 interval_microseconds = timer->interval_delay / 1000;
- CoreTiming::ScheduleEvent(usToCycles(interval_microseconds) - cycles_late,
+ CoreTiming::ScheduleEvent(usToCycles(interval_microseconds) - cycles_late,
timer_callback_event_type, timer_handle);
}
}
diff --git a/src/core/hle/kernel/vm_manager.cpp b/src/core/hle/kernel/vm_manager.cpp
new file mode 100644
index 000000000..b2dd21542
--- /dev/null
+++ b/src/core/hle/kernel/vm_manager.cpp
@@ -0,0 +1,245 @@
+// Copyright 2015 Citra Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#include "common/assert.h"
+
+#include "core/hle/kernel/vm_manager.h"
+#include "core/memory_setup.h"
+
+namespace Kernel {
+
+bool VirtualMemoryArea::CanBeMergedWith(const VirtualMemoryArea& next) const {
+ ASSERT(base + size == next.base);
+ if (permissions != next.permissions ||
+ meminfo_state != next.meminfo_state ||
+ type != next.type) {
+ return false;
+ }
+ if (type == VMAType::AllocatedMemoryBlock &&
+ (backing_block != next.backing_block || offset + size != next.offset)) {
+ return false;
+ }
+ if (type == VMAType::BackingMemory && backing_memory + size != next.backing_memory) {
+ return false;
+ }
+ if (type == VMAType::MMIO && paddr + size != next.paddr) {
+ return false;
+ }
+ return true;
+}
+
+VMManager::VMManager() {
+ Reset();
+}
+
+void VMManager::Reset() {
+ vma_map.clear();
+
+ // Initialize the map with a single free region covering the entire managed space.
+ VirtualMemoryArea initial_vma;
+ initial_vma.size = MAX_ADDRESS;
+ vma_map.emplace(initial_vma.base, initial_vma);
+
+ UpdatePageTableForVMA(initial_vma);
+}
+
+VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
+ return std::prev(vma_map.upper_bound(target));
+}
+
+ResultVal<VMManager::VMAHandle> VMManager::MapMemoryBlock(VAddr target,
+ std::shared_ptr<std::vector<u8>> block, u32 offset, u32 size, MemoryState state) {
+ ASSERT(block != nullptr);
+ ASSERT(offset + size <= block->size());
+
+ // This is the appropriately sized VMA that will turn into our allocation.
+ CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
+ VirtualMemoryArea& final_vma = vma_handle->second;
+ ASSERT(final_vma.size == size);
+
+ final_vma.type = VMAType::AllocatedMemoryBlock;
+ final_vma.permissions = VMAPermission::ReadWrite;
+ final_vma.meminfo_state = state;
+ final_vma.backing_block = block;
+ final_vma.offset = offset;
+ UpdatePageTableForVMA(final_vma);
+
+ return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
+}
+
+ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, u8 * memory, u32 size, MemoryState state) {
+ ASSERT(memory != nullptr);
+
+ // This is the appropriately sized VMA that will turn into our allocation.
+ CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
+ VirtualMemoryArea& final_vma = vma_handle->second;
+ ASSERT(final_vma.size == size);
+
+ final_vma.type = VMAType::BackingMemory;
+ final_vma.permissions = VMAPermission::ReadWrite;
+ final_vma.meminfo_state = state;
+ final_vma.backing_memory = memory;
+ UpdatePageTableForVMA(final_vma);
+
+ return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
+}
+
+ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state) {
+ // This is the appropriately sized VMA that will turn into our allocation.
+ CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
+ VirtualMemoryArea& final_vma = vma_handle->second;
+ ASSERT(final_vma.size == size);
+
+ final_vma.type = VMAType::MMIO;
+ final_vma.permissions = VMAPermission::ReadWrite;
+ final_vma.meminfo_state = state;
+ final_vma.paddr = paddr;
+ UpdatePageTableForVMA(final_vma);
+
+ return MakeResult<VMAHandle>(MergeAdjacent(vma_handle));
+}
+
+void VMManager::Unmap(VMAHandle vma_handle) {
+ VMAIter iter = StripIterConstness(vma_handle);
+
+ VirtualMemoryArea& vma = iter->second;
+ vma.type = VMAType::Free;
+ vma.permissions = VMAPermission::None;
+ vma.meminfo_state = MemoryState::Free;
+
+ vma.backing_block = nullptr;
+ vma.offset = 0;
+ vma.backing_memory = nullptr;
+ vma.paddr = 0;
+
+ UpdatePageTableForVMA(vma);
+
+ MergeAdjacent(iter);
+}
+
+void VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
+ VMAIter iter = StripIterConstness(vma_handle);
+
+ VirtualMemoryArea& vma = iter->second;
+ vma.permissions = new_perms;
+ UpdatePageTableForVMA(vma);
+
+ MergeAdjacent(iter);
+}
+
+VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle & iter) {
+ // This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
+ // non-const access to its container.
+ return vma_map.erase(iter, iter); // Erases an empty range of elements
+}
+
+ResultVal<VMManager::VMAIter> VMManager::CarveVMA(VAddr base, u32 size) {
+ ASSERT_MSG((size & Memory::PAGE_MASK) == 0, "non-page aligned size: %8X", size);
+ ASSERT_MSG((base & Memory::PAGE_MASK) == 0, "non-page aligned base: %08X", base);
+
+ VMAIter vma_handle = StripIterConstness(FindVMA(base));
+ if (vma_handle == vma_map.end()) {
+ // Target address is outside the range managed by the kernel
+ return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
+ ErrorSummary::InvalidArgument, ErrorLevel::Usage); // 0xE0E01BF5
+ }
+
+ VirtualMemoryArea& vma = vma_handle->second;
+ if (vma.type != VMAType::Free) {
+ // Region is already allocated
+ return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
+ ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
+ }
+
+ u32 start_in_vma = base - vma.base;
+ u32 end_in_vma = start_in_vma + size;
+
+ if (end_in_vma > vma.size) {
+ // Requested allocation doesn't fit inside VMA
+ return ResultCode(ErrorDescription::InvalidAddress, ErrorModule::OS,
+ ErrorSummary::InvalidState, ErrorLevel::Usage); // 0xE0A01BF5
+ }
+
+ if (end_in_vma != vma.size) {
+ // Split VMA at the end of the allocated region
+ SplitVMA(vma_handle, end_in_vma);
+ }
+ if (start_in_vma != 0) {
+ // Split VMA at the start of the allocated region
+ vma_handle = SplitVMA(vma_handle, start_in_vma);
+ }
+
+ return MakeResult<VMAIter>(vma_handle);
+}
+
+VMManager::VMAIter VMManager::SplitVMA(VMAIter vma_handle, u32 offset_in_vma) {
+ VirtualMemoryArea& old_vma = vma_handle->second;
+ VirtualMemoryArea new_vma = old_vma; // Make a copy of the VMA
+
+ // For now, don't allow no-op VMA splits (trying to split at a boundary) because it's probably
+ // a bug. This restriction might be removed later.
+ ASSERT(offset_in_vma < old_vma.size);
+ ASSERT(offset_in_vma > 0);
+
+ old_vma.size = offset_in_vma;
+ new_vma.base += offset_in_vma;
+ new_vma.size -= offset_in_vma;
+
+ switch (new_vma.type) {
+ case VMAType::Free:
+ break;
+ case VMAType::AllocatedMemoryBlock:
+ new_vma.offset += offset_in_vma;
+ break;
+ case VMAType::BackingMemory:
+ new_vma.backing_memory += offset_in_vma;
+ break;
+ case VMAType::MMIO:
+ new_vma.paddr += offset_in_vma;
+ break;
+ }
+
+ ASSERT(old_vma.CanBeMergedWith(new_vma));
+
+ return vma_map.emplace_hint(std::next(vma_handle), new_vma.base, new_vma);
+}
+
+VMManager::VMAIter VMManager::MergeAdjacent(VMAIter iter) {
+ VMAIter next_vma = std::next(iter);
+ if (next_vma != vma_map.end() && iter->second.CanBeMergedWith(next_vma->second)) {
+ iter->second.size += next_vma->second.size;
+ vma_map.erase(next_vma);
+ }
+
+ if (iter != vma_map.begin()) {
+ VMAIter prev_vma = std::prev(iter);
+ if (prev_vma->second.CanBeMergedWith(iter->second)) {
+ prev_vma->second.size += iter->second.size;
+ vma_map.erase(iter);
+ iter = prev_vma;
+ }
+ }
+
+ return iter;
+}
+
+void VMManager::UpdatePageTableForVMA(const VirtualMemoryArea& vma) {
+ switch (vma.type) {
+ case VMAType::Free:
+ Memory::UnmapRegion(vma.base, vma.size);
+ break;
+ case VMAType::AllocatedMemoryBlock:
+ Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_block->data() + vma.offset);
+ break;
+ case VMAType::BackingMemory:
+ Memory::MapMemoryRegion(vma.base, vma.size, vma.backing_memory);
+ break;
+ case VMAType::MMIO:
+ // TODO(yuriks): Add support for MMIO handlers.
+ Memory::MapIoRegion(vma.base, vma.size);
+ break;
+ }
+}
+
+}
diff --git a/src/core/hle/kernel/vm_manager.h b/src/core/hle/kernel/vm_manager.h
new file mode 100644
index 000000000..22b724603
--- /dev/null
+++ b/src/core/hle/kernel/vm_manager.h
@@ -0,0 +1,200 @@
+// Copyright 2015 Citra Emulator Project
+// Licensed under GPLv2 or any later version
+// Refer to the license.txt file included.
+
+#pragma once
+
+#include <map>
+#include <memory>
+#include <string>
+#include <vector>
+
+#include "common/common_types.h"
+
+#include "core/hle/result.h"
+
+namespace Kernel {
+
+enum class VMAType : u8 {
+ /// VMA represents an unmapped region of the address space.
+ Free,
+ /// VMA is backed by a ref-counted allocate memory block.
+ AllocatedMemoryBlock,
+ /// VMA is backed by a raw, unmanaged pointer.
+ BackingMemory,
+ /// VMA is mapped to MMIO registers at a fixed PAddr.
+ MMIO,
+ // TODO(yuriks): Implement MemoryAlias to support MAP/UNMAP
+};
+
+/// Permissions for mapped memory blocks
+enum class VMAPermission : u8 {
+ None = 0,
+ Read = 1,
+ Write = 2,
+ Execute = 4,
+
+ ReadWrite = Read | Write,
+ ReadExecute = Read | Execute,
+ WriteExecute = Write | Execute,
+ ReadWriteExecute = Read | Write | Execute,
+};
+
+/// Set of values returned in MemoryInfo.state by svcQueryMemory.
+enum class MemoryState : u8 {
+ Free = 0,
+ Reserved = 1,
+ IO = 2,
+ Static = 3,
+ Code = 4,
+ Private = 5,
+ Shared = 6,
+ Continuous = 7,
+ Aliased = 8,
+ Alias = 9,
+ AliasCode = 10,
+ Locked = 11,
+};
+
+/**
+ * Represents a VMA in an address space. A VMA is a contiguous region of virtual addressing space
+ * with homogeneous attributes across its extents. In this particular implementation each VMA is
+ * also backed by a single host memory allocation.
+ */
+struct VirtualMemoryArea {
+ /// Virtual base address of the region.
+ VAddr base = 0;
+ /// Size of the region.
+ u32 size = 0;
+
+ VMAType type = VMAType::Free;
+ VMAPermission permissions = VMAPermission::None;
+ /// Tag returned by svcQueryMemory. Not otherwise used.
+ MemoryState meminfo_state = MemoryState::Free;
+
+ // Settings for type = AllocatedMemoryBlock
+ /// Memory block backing this VMA.
+ std::shared_ptr<std::vector<u8>> backing_block = nullptr;
+ /// Offset into the backing_memory the mapping starts from.
+ u32 offset = 0;
+
+ // Settings for type = BackingMemory
+ /// Pointer backing this VMA. It will not be destroyed or freed when the VMA is removed.
+ u8* backing_memory = nullptr;
+
+ // Settings for type = MMIO
+ /// Physical address of the register area this VMA maps to.
+ PAddr paddr = 0;
+
+ /// Tests if this area can be merged to the right with `next`.
+ bool CanBeMergedWith(const VirtualMemoryArea& next) const;
+};
+
+/**
+ * Manages a process' virtual addressing space. This class maintains a list of allocated and free
+ * regions in the address space, along with their attributes, and allows kernel clients to
+ * manipulate it, adjusting the page table to match.
+ *
+ * This is similar in idea and purpose to the VM manager present in operating system kernels, with
+ * the main difference being that it doesn't have to support swapping or memory mapping of files.
+ * The implementation is also simplified by not having to allocate page frames. See these articles
+ * about the Linux kernel for an explantion of the concept and implementation:
+ * - http://duartes.org/gustavo/blog/post/how-the-kernel-manages-your-memory/
+ * - http://duartes.org/gustavo/blog/post/page-cache-the-affair-between-memory-and-files/
+ */
+class VMManager {
+ // TODO(yuriks): Make page tables switchable to support multiple VMManagers
+public:
+ /**
+ * The maximum amount of address space managed by the kernel. Addresses above this are never used.
+ * @note This is the limit used by the New 3DS kernel. Old 3DS used 0x20000000.
+ */
+ static const u32 MAX_ADDRESS = 0x40000000;
+
+ /**
+ * A map covering the entirety of the managed address space, keyed by the `base` field of each
+ * VMA. It must always be modified by splitting or merging VMAs, so that the invariant
+ * `elem.base + elem.size == next.base` is preserved, and mergeable regions must always be
+ * merged when possible so that no two similar and adjacent regions exist that have not been
+ * merged.
+ */
+ std::map<VAddr, VirtualMemoryArea> vma_map;
+ using VMAHandle = decltype(vma_map)::const_iterator;
+
+ VMManager();
+
+ /// Clears the address space map, re-initializing with a single free area.
+ void Reset();
+
+ /// Finds the VMA in which the given address is included in, or `vma_map.end()`.
+ VMAHandle FindVMA(VAddr target) const;
+
+ // TODO(yuriks): Should these functions actually return the handle?
+
+ /**
+ * Maps part of a ref-counted block of memory at a given address.
+ *
+ * @param target The guest address to start the mapping at.
+ * @param block The block to be mapped.
+ * @param offset Offset into `block` to map from.
+ * @param size Size of the mapping.
+ * @param state MemoryState tag to attach to the VMA.
+ */
+ ResultVal<VMAHandle> MapMemoryBlock(VAddr target, std::shared_ptr<std::vector<u8>> block,
+ u32 offset, u32 size, MemoryState state);
+
+ /**
+ * Maps an unmanaged host memory pointer at a given address.
+ *
+ * @param target The guest address to start the mapping at.
+ * @param memory The memory to be mapped.
+ * @param size Size of the mapping.
+ * @param state MemoryState tag to attach to the VMA.
+ */
+ ResultVal<VMAHandle> MapBackingMemory(VAddr target, u8* memory, u32 size, MemoryState state);
+
+ /**
+ * Maps a memory-mapped IO region at a given address.
+ *
+ * @param target The guest address to start the mapping at.
+ * @param paddr The physical address where the registers are present.
+ * @param size Size of the mapping.
+ * @param state MemoryState tag to attach to the VMA.
+ */
+ ResultVal<VMAHandle> MapMMIO(VAddr target, PAddr paddr, u32 size, MemoryState state);
+
+ /// Unmaps the given VMA.
+ void Unmap(VMAHandle vma);
+
+ /// Changes the permissions of the given VMA.
+ void Reprotect(VMAHandle vma, VMAPermission new_perms);
+
+private:
+ using VMAIter = decltype(vma_map)::iterator;
+
+ /// Converts a VMAHandle to a mutable VMAIter.
+ VMAIter StripIterConstness(const VMAHandle& iter);
+
+ /**
+ * Carves a VMA of a specific size at the specified address by splitting Free VMAs while doing
+ * the appropriate error checking.
+ */
+ ResultVal<VMAIter> CarveVMA(VAddr base, u32 size);
+
+ /**
+ * Splits a VMA in two, at the specified offset.
+ * @returns the right side of the split, with the original iterator becoming the left side.
+ */
+ VMAIter SplitVMA(VMAIter vma, u32 offset_in_vma);
+
+ /**
+ * Checks for and merges the specified VMA with adjacent ones if possible.
+ * @returns the merged VMA or the original if no merging was possible.
+ */
+ VMAIter MergeAdjacent(VMAIter vma);
+
+ /// Updates the pages corresponding to this VMA so they match the VMA's attributes.
+ void UpdatePageTableForVMA(const VirtualMemoryArea& vma);
+};
+
+}