aboutsummaryrefslogtreecommitdiff
path: root/src/core
diff options
context:
space:
mode:
Diffstat (limited to 'src/core')
-rw-r--r--src/core/hle/kernel/k_page_table.cpp560
-rw-r--r--src/core/hle/service/am/am.cpp6
-rw-r--r--src/core/hle/service/am/am.h36
-rw-r--r--src/core/hle/service/apm/apm_controller.cpp10
-rw-r--r--src/core/hle/service/apm/apm_controller.h15
5 files changed, 483 insertions, 144 deletions
diff --git a/src/core/hle/kernel/k_page_table.cpp b/src/core/hle/kernel/k_page_table.cpp
index 912853e5c..88aa2a152 100644
--- a/src/core/hle/kernel/k_page_table.cpp
+++ b/src/core/hle/kernel/k_page_table.cpp
@@ -41,24 +41,6 @@ constexpr std::size_t GetAddressSpaceWidthFromType(FileSys::ProgramAddressSpaceT
}
}
-constexpr u64 GetAddressInRange(const KMemoryInfo& info, VAddr addr) {
- if (info.GetAddress() < addr) {
- return addr;
- }
- return info.GetAddress();
-}
-
-constexpr std::size_t GetSizeInRange(const KMemoryInfo& info, VAddr start, VAddr end) {
- std::size_t size{info.GetSize()};
- if (info.GetAddress() < start) {
- size -= start - info.GetAddress();
- }
- if (info.GetEndAddress() > end) {
- size -= info.GetEndAddress() - end;
- }
- return size;
-}
-
} // namespace
KPageTable::KPageTable(Core::System& system_)
@@ -400,148 +382,471 @@ ResultCode KPageTable::UnmapProcessMemory(VAddr dst_addr, std::size_t size,
return ResultSuccess;
}
-ResultCode KPageTable::MapPhysicalMemory(VAddr addr, std::size_t size) {
+ResultCode KPageTable::MapPhysicalMemory(VAddr address, std::size_t size) {
// Lock the physical memory lock.
KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
- // Lock the table.
- KScopedLightLock lk(general_lock);
-
- std::size_t mapped_size{};
- const VAddr end_addr{addr + size};
+ // Calculate the last address for convenience.
+ const VAddr last_address = address + size - 1;
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state != KMemoryState::Free) {
- mapped_size += GetSizeInRange(info, addr, end_addr);
- }
- });
+ // Define iteration variables.
+ VAddr cur_address;
+ std::size_t mapped_size;
- if (mapped_size == size) {
- return ResultSuccess;
- }
+ // The entire mapping process can be retried.
+ while (true) {
+ // Check if the memory is already mapped.
+ {
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (info.GetState() != KMemoryState::Free) {
+ mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
- const std::size_t remaining_size{size - mapped_size};
- const std::size_t remaining_pages{remaining_size / PageSize};
+ // If the size mapped is the size requested, we've nothing to do.
+ R_SUCCEED_IF(size == mapped_size);
+ }
- // Reserve the memory from the process resource limit.
- KScopedResourceReservation memory_reservation(
- system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
- remaining_size);
- if (!memory_reservation.Succeeded()) {
- LOG_ERROR(Kernel, "Could not reserve remaining {:X} bytes", remaining_size);
- return ResultLimitReached;
+ // Allocate and map the memory.
+ {
+ // Reserve the memory from the process resource limit.
+ KScopedResourceReservation memory_reservation(
+ system.Kernel().CurrentProcess()->GetResourceLimit(),
+ LimitableResource::PhysicalMemory, size - mapped_size);
+ R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
+
+ // Allocate pages for the new memory.
+ KPageLinkedList page_linked_list;
+ R_TRY(system.Kernel().MemoryManager().Allocate(
+ page_linked_list, (size - mapped_size) / PageSize, memory_pool, allocation_option));
+
+ // Map the memory.
+ {
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
+
+ size_t num_allocator_blocks = 0;
+
+ // Verify that nobody has mapped memory since we first checked.
+ {
+ // Iterate over the memory.
+ size_t checked_mapped_size = 0;
+ cur_address = address;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ if (is_free) {
+ if (info.GetAddress() < address) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (!is_free) {
+ checked_mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
+ }
+
+ // Track the memory if it's mapped.
+ if (!is_free) {
+ checked_mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // If the size now isn't what it was before, somebody mapped or unmapped
+ // concurrently. If this happened, retry.
+ if (mapped_size != checked_mapped_size) {
+ continue;
+ }
+ }
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+ auto unmap_guard = detail::ScopeExit([&] {
+ if (cur_address > address) {
+ const VAddr last_unmap_address = cur_address - 1;
+
+ // Iterate, unmapping the pages.
+ cur_address = address;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory state is free, we mapped it and need to unmap it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to unmap.
+ const size_t cur_pages =
+ std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_unmap_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
+ OperationType::Unmap)
+ .IsSuccess());
+ }
+
+ // Check if we're done.
+ if (last_unmap_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ }
+ });
+
+ // Iterate over the memory.
+ auto pg_it = page_linked_list.Nodes().begin();
+ PAddr pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If it's unmapped, we need to map it.
+ if (info.GetState() == KMemoryState::Free) {
+ // Determine the range to map.
+ size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != page_linked_list.Nodes().end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
+ OperationType::Map, pg_phys_addr));
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+
+ // We succeeded, so commit the memory reservation.
+ memory_reservation.Commit();
+
+ // Increase our tracked mapped size.
+ mapped_physical_memory_size += (size - mapped_size);
+
+ // Update the relevant memory blocks.
+ block_manager->Update(address, size / PageSize, KMemoryState::Free,
+ KMemoryPermission::None, KMemoryAttribute::None,
+ KMemoryState::Normal, KMemoryPermission::UserReadWrite,
+ KMemoryAttribute::None);
+
+ // Cancel our guard.
+ unmap_guard.Cancel();
+
+ return ResultSuccess;
+ }
+ }
}
+}
- KPageLinkedList page_linked_list;
+ResultCode KPageTable::UnmapPhysicalMemory(VAddr address, std::size_t size) {
+ // Lock the physical memory lock.
+ KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
- CASCADE_CODE(system.Kernel().MemoryManager().Allocate(page_linked_list, remaining_pages,
- memory_pool, allocation_option));
+ // Lock the table.
+ KScopedLightLock lk(general_lock);
- // We succeeded, so commit the memory reservation.
- memory_reservation.Commit();
+ // Calculate the last address for convenience.
+ const VAddr last_address = address + size - 1;
- // Map the memory.
- auto node{page_linked_list.Nodes().begin()};
- PAddr map_addr{node->GetAddress()};
- std::size_t src_num_pages{node->GetNumPages()};
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state != KMemoryState::Free) {
- return;
- }
+ // Define iteration variables.
+ VAddr cur_address = 0;
+ std::size_t mapped_size = 0;
+ std::size_t num_allocator_blocks = 0;
- std::size_t dst_num_pages{GetSizeInRange(info, addr, end_addr) / PageSize};
- VAddr dst_addr{GetAddressInRange(info, addr)};
+ // Check if the memory is mapped.
+ {
+ // Iterate over the memory.
+ cur_address = address;
+ mapped_size = 0;
+
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
+
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // Verify the memory's state.
+ const bool is_normal = info.GetState() == KMemoryState::Normal &&
+ info.GetAttribute() == KMemoryAttribute::None;
+ const bool is_free = info.GetState() == KMemoryState::Free;
+ R_UNLESS(is_normal || is_free, ResultInvalidCurrentMemory);
+
+ if (is_normal) {
+ R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
+
+ if (info.GetAddress() < address) {
+ ++num_allocator_blocks;
+ }
+ if (last_address < info.GetLastAddress()) {
+ ++num_allocator_blocks;
+ }
+ }
- while (dst_num_pages) {
- if (!src_num_pages) {
- node = std::next(node);
- map_addr = node->GetAddress();
- src_num_pages = node->GetNumPages();
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ if (is_normal) {
+ mapped_size += (last_address + 1 - cur_address);
+ }
+ break;
}
- const std::size_t num_pages{std::min(src_num_pages, dst_num_pages)};
- Operate(dst_addr, num_pages, KMemoryPermission::UserReadWrite, OperationType::Map,
- map_addr);
+ // Track the memory if it's mapped.
+ if (is_normal) {
+ mapped_size += VAddr(info.GetEndAddress()) - cur_address;
+ }
- dst_addr += num_pages * PageSize;
- map_addr += num_pages * PageSize;
- src_num_pages -= num_pages;
- dst_num_pages -= num_pages;
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
}
- });
-
- mapped_physical_memory_size += remaining_size;
-
- const std::size_t num_pages{size / PageSize};
- block_manager->Update(addr, num_pages, KMemoryState::Free, KMemoryPermission::None,
- KMemoryAttribute::None, KMemoryState::Normal,
- KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
- return ResultSuccess;
-}
+ // If there's nothing mapped, we've nothing to do.
+ R_SUCCEED_IF(mapped_size == 0);
+ }
-ResultCode KPageTable::UnmapPhysicalMemory(VAddr addr, std::size_t size) {
- // Lock the physical memory lock.
- KScopedLightLock map_phys_mem_lk(map_physical_memory_lock);
+ // Make a page group for the unmap region.
+ KPageLinkedList pg;
+ {
+ auto& impl = this->PageTableImpl();
+
+ // Begin traversal.
+ Common::PageTable::TraversalContext context;
+ Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
+ bool cur_valid = false;
+ Common::PageTable::TraversalEntry next_entry;
+ bool next_valid = false;
+ size_t tot_size = 0;
+
+ cur_address = address;
+ next_valid = impl.BeginTraversal(next_entry, context, cur_address);
+ next_entry.block_size =
+ (next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
+
+ // Iterate, building the group.
+ while (true) {
+ if ((!next_valid && !cur_valid) ||
+ (next_valid && cur_valid &&
+ next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
+ cur_entry.block_size += next_entry.block_size;
+ } else {
+ if (cur_valid) {
+ // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+ R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
+ }
+
+ // Update tracking variables.
+ tot_size += cur_entry.block_size;
+ cur_entry = next_entry;
+ cur_valid = next_valid;
+ }
- // Lock the table.
- KScopedLightLock lk(general_lock);
+ if (cur_entry.block_size + tot_size >= size) {
+ break;
+ }
- const VAddr end_addr{addr + size};
- ResultCode result{ResultSuccess};
- std::size_t mapped_size{};
+ next_valid = impl.ContinueTraversal(next_entry, context);
+ }
- // Verify that the region can be unmapped
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state == KMemoryState::Normal) {
- if (info.attribute != KMemoryAttribute::None) {
- result = ResultInvalidCurrentMemory;
- return;
+ // Add the last block.
+ if (cur_valid) {
+ // ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
+ R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
+ }
+ }
+ ASSERT(pg.GetNumPages() == mapped_size / PageSize);
+
+ // Reset the current tracking address, and make sure we clean up on failure.
+ cur_address = address;
+ auto remap_guard = detail::ScopeExit([&] {
+ if (cur_address > address) {
+ const VAddr last_map_address = cur_address - 1;
+ cur_address = address;
+
+ // Iterate over the memory we unmapped.
+ auto it = block_manager->FindIterator(cur_address);
+ auto pg_it = pg.Nodes().begin();
+ PAddr pg_phys_addr = pg_it->GetAddress();
+ size_t pg_pages = pg_it->GetNumPages();
+
+ while (true) {
+ // Get the memory info for the pages we unmapped, convert to property.
+ const KMemoryInfo info = it->GetMemoryInfo();
+
+ // If the memory is normal, we unmapped it and need to re-map it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to map.
+ size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_map_address + 1 - cur_address) /
+ PageSize;
+
+ // While we have pages to map, map them.
+ while (map_pages > 0) {
+ // Check if we're at the end of the physical block.
+ if (pg_pages == 0) {
+ // Ensure there are more pages to map.
+ ASSERT(pg_it != pg.Nodes().end());
+
+ // Advance our physical block.
+ ++pg_it;
+ pg_phys_addr = pg_it->GetAddress();
+ pg_pages = pg_it->GetNumPages();
+ }
+
+ // Map whatever we can.
+ const size_t cur_pages = std::min(pg_pages, map_pages);
+ ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
+ OperationType::Map, pg_phys_addr) == ResultSuccess);
+
+ // Advance.
+ cur_address += cur_pages * PageSize;
+ map_pages -= cur_pages;
+
+ pg_phys_addr += cur_pages * PageSize;
+ pg_pages -= cur_pages;
+ }
+ }
+
+ // Check if we're done.
+ if (last_map_address <= info.GetLastAddress()) {
+ break;
+ }
+
+ // Advance.
+ ++it;
}
- mapped_size += GetSizeInRange(info, addr, end_addr);
- } else if (info.state != KMemoryState::Free) {
- result = ResultInvalidCurrentMemory;
}
});
- if (result.IsError()) {
- return result;
- }
+ // Iterate over the memory, unmapping as we go.
+ auto it = block_manager->FindIterator(cur_address);
+ while (true) {
+ // Check that the iterator is valid.
+ ASSERT(it != block_manager->end());
- if (!mapped_size) {
- return ResultSuccess;
- }
+ // Get the memory info.
+ const KMemoryInfo info = it->GetMemoryInfo();
- // Unmap each region within the range
- KPageLinkedList page_linked_list;
- block_manager->IterateForRange(addr, end_addr, [&](const KMemoryInfo& info) {
- if (info.state == KMemoryState::Normal) {
- const std::size_t block_size{GetSizeInRange(info, addr, end_addr)};
- const std::size_t block_num_pages{block_size / PageSize};
- const VAddr block_addr{GetAddressInRange(info, addr)};
-
- AddRegionToPages(block_addr, block_size / PageSize, page_linked_list);
-
- if (result = Operate(block_addr, block_num_pages, KMemoryPermission::None,
- OperationType::Unmap);
- result.IsError()) {
- return;
- }
+ // If the memory state is normal, we need to unmap it.
+ if (info.GetState() == KMemoryState::Normal) {
+ // Determine the range to unmap.
+ const size_t cur_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
+ last_address + 1 - cur_address) /
+ PageSize;
+
+ // Unmap.
+ R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
}
- });
- if (result.IsError()) {
- return result;
- }
- const std::size_t num_pages{size / PageSize};
- system.Kernel().MemoryManager().Free(page_linked_list, num_pages, memory_pool,
- allocation_option);
+ // Check if we're done.
+ if (last_address <= info.GetLastAddress()) {
+ break;
+ }
- block_manager->Update(addr, num_pages, KMemoryState::Free);
+ // Advance.
+ cur_address = info.GetEndAddress();
+ ++it;
+ }
+ // Release the memory resource.
+ mapped_physical_memory_size -= mapped_size;
auto process{system.Kernel().CurrentProcess()};
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
- mapped_physical_memory_size -= mapped_size;
+
+ // Update memory blocks.
+ system.Kernel().MemoryManager().Free(pg, size / PageSize, memory_pool, allocation_option);
+ block_manager->Update(address, size / PageSize, KMemoryState::Free, KMemoryPermission::None,
+ KMemoryAttribute::None);
+
+ // We succeeded.
+ remap_guard.Cancel();
return ResultSuccess;
}
@@ -681,9 +986,8 @@ ResultCode KPageTable::UnmapPages(VAddr addr, const KPageLinkedList& page_linked
VAddr cur_addr{addr};
for (const auto& node : page_linked_list.Nodes()) {
- const std::size_t num_pages{(addr - cur_addr) / PageSize};
- if (const auto result{
- Operate(addr, num_pages, KMemoryPermission::None, OperationType::Unmap)};
+ if (const auto result{Operate(cur_addr, node.GetNumPages(), KMemoryPermission::None,
+ OperationType::Unmap)};
result.IsError()) {
return result;
}
diff --git a/src/core/hle/service/am/am.cpp b/src/core/hle/service/am/am.cpp
index 773dc9f29..2f8e21568 100644
--- a/src/core/hle/service/am/am.cpp
+++ b/src/core/hle/service/am/am.cpp
@@ -618,7 +618,7 @@ void AppletMessageQueue::PushMessage(AppletMessage msg) {
AppletMessageQueue::AppletMessage AppletMessageQueue::PopMessage() {
if (messages.empty()) {
on_new_message->GetWritableEvent().Clear();
- return AppletMessage::NoMessage;
+ return AppletMessage::None;
}
auto msg = messages.front();
messages.pop();
@@ -633,7 +633,7 @@ std::size_t AppletMessageQueue::GetMessageCount() const {
}
void AppletMessageQueue::RequestExit() {
- PushMessage(AppletMessage::ExitRequested);
+ PushMessage(AppletMessage::Exit);
}
void AppletMessageQueue::FocusStateChanged() {
@@ -732,7 +732,7 @@ void ICommonStateGetter::ReceiveMessage(Kernel::HLERequestContext& ctx) {
const auto message = msg_queue->PopMessage();
IPC::ResponseBuilder rb{ctx, 3};
- if (message == AppletMessageQueue::AppletMessage::NoMessage) {
+ if (message == AppletMessageQueue::AppletMessage::None) {
LOG_ERROR(Service_AM, "Message queue is empty");
rb.Push(ERR_NO_MESSAGES);
rb.PushEnum<AppletMessageQueue::AppletMessage>(message);
diff --git a/src/core/hle/service/am/am.h b/src/core/hle/service/am/am.h
index 2a578aea5..fdd937b82 100644
--- a/src/core/hle/service/am/am.h
+++ b/src/core/hle/service/am/am.h
@@ -22,6 +22,7 @@ class NVFlinger;
namespace Service::AM {
+// This is nn::settings::Language
enum SystemLanguage {
Japanese = 0,
English = 1, // en-US
@@ -41,16 +42,44 @@ enum SystemLanguage {
// 4.0.0+
SimplifiedChinese = 15,
TraditionalChinese = 16,
+ // 10.1.0+
+ BrazilianPortuguese = 17,
};
class AppletMessageQueue {
public:
+ // This is nn::am::AppletMessage
enum class AppletMessage : u32 {
- NoMessage = 0,
- ExitRequested = 4,
+ None = 0,
+ ChangeIntoForeground = 1,
+ ChangeIntoBackground = 2,
+ Exit = 4,
+ ApplicationExited = 6,
FocusStateChanged = 15,
+ Resume = 16,
+ DetectShortPressingHomeButton = 20,
+ DetectLongPressingHomeButton = 21,
+ DetectShortPressingPowerButton = 22,
+ DetectMiddlePressingPowerButton = 23,
+ DetectLongPressingPowerButton = 24,
+ RequestToPrepareSleep = 25,
+ FinishedSleepSequence = 26,
+ SleepRequiredByHighTemperature = 27,
+ SleepRequiredByLowBattery = 28,
+ AutoPowerDown = 29,
OperationModeChanged = 30,
PerformanceModeChanged = 31,
+ DetectReceivingCecSystemStandby = 32,
+ SdCardRemoved = 33,
+ LaunchApplicationRequested = 50,
+ RequestToDisplay = 51,
+ ShowApplicationLogo = 55,
+ HideApplicationLogo = 56,
+ ForceHideApplicationLogo = 57,
+ FloatingApplicationDetected = 60,
+ DetectShortPressingCaptureButton = 90,
+ AlbumScreenShotTaken = 92,
+ AlbumRecordingSaved = 93,
};
explicit AppletMessageQueue(Core::System& system);
@@ -179,11 +208,14 @@ public:
~ICommonStateGetter() override;
private:
+ // This is nn::oe::FocusState
enum class FocusState : u8 {
InFocus = 1,
NotInFocus = 2,
+ Background = 3,
};
+ // This is nn::oe::OperationMode
enum class OperationMode : u8 {
Handheld = 0,
Docked = 1,
diff --git a/src/core/hle/service/apm/apm_controller.cpp b/src/core/hle/service/apm/apm_controller.cpp
index 98839fe97..187fef2ad 100644
--- a/src/core/hle/service/apm/apm_controller.cpp
+++ b/src/core/hle/service/apm/apm_controller.cpp
@@ -17,8 +17,8 @@ constexpr auto DEFAULT_PERFORMANCE_CONFIGURATION = PerformanceConfiguration::Con
Controller::Controller(Core::Timing::CoreTiming& core_timing_)
: core_timing{core_timing_}, configs{
- {PerformanceMode::Handheld, DEFAULT_PERFORMANCE_CONFIGURATION},
- {PerformanceMode::Docked, DEFAULT_PERFORMANCE_CONFIGURATION},
+ {PerformanceMode::Normal, DEFAULT_PERFORMANCE_CONFIGURATION},
+ {PerformanceMode::Boost, DEFAULT_PERFORMANCE_CONFIGURATION},
} {}
Controller::~Controller() = default;
@@ -63,13 +63,13 @@ void Controller::SetFromCpuBoostMode(CpuBoostMode mode) {
PerformanceConfiguration::Config15,
}};
- SetPerformanceConfiguration(PerformanceMode::Docked,
+ SetPerformanceConfiguration(PerformanceMode::Boost,
BOOST_MODE_TO_CONFIG_MAP.at(static_cast<u32>(mode)));
}
PerformanceMode Controller::GetCurrentPerformanceMode() const {
- return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Docked
- : PerformanceMode::Handheld;
+ return Settings::values.use_docked_mode.GetValue() ? PerformanceMode::Boost
+ : PerformanceMode::Normal;
}
PerformanceConfiguration Controller::GetCurrentPerformanceConfiguration(PerformanceMode mode) {
diff --git a/src/core/hle/service/apm/apm_controller.h b/src/core/hle/service/apm/apm_controller.h
index 8d48e0104..d6fbd2c0c 100644
--- a/src/core/hle/service/apm/apm_controller.h
+++ b/src/core/hle/service/apm/apm_controller.h
@@ -32,15 +32,18 @@ enum class PerformanceConfiguration : u32 {
Config16 = 0x9222000C,
};
+// This is nn::oe::CpuBoostMode
enum class CpuBoostMode : u32 {
- Disabled = 0,
- Full = 1, // CPU + GPU -> Config 13, 14, 15, or 16
- Partial = 2, // GPU Only -> Config 15 or 16
+ Normal = 0, // Boost mode disabled
+ FastLoad = 1, // CPU + GPU -> Config 13, 14, 15, or 16
+ Partial = 2, // GPU Only -> Config 15 or 16
};
-enum class PerformanceMode : u8 {
- Handheld = 0,
- Docked = 1,
+// This is nn::oe::PerformanceMode
+enum class PerformanceMode : s32 {
+ Invalid = -1,
+ Normal = 0,
+ Boost = 1,
};
// Class to manage the state and change of the emulated system performance.