mirror of
https://git.suyu.dev/suyu/suyu
synced 2024-12-26 19:32:40 -06:00
core: hle: kernel: k_memory_manager: Refresh.
This commit is contained in:
parent
32d7faafa8
commit
ba21ba0c5c
4 changed files with 461 additions and 370 deletions
|
@ -29,43 +29,44 @@ constexpr KMemoryManager::Pool GetPoolFromMemoryRegionType(u32 type) {
|
||||||
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
} else if ((type | KMemoryRegionType_DramSystemNonSecurePool) == type) {
|
||||||
return KMemoryManager::Pool::SystemNonSecure;
|
return KMemoryManager::Pool::SystemNonSecure;
|
||||||
} else {
|
} else {
|
||||||
ASSERT_MSG(false, "InvalidMemoryRegionType for conversion to Pool");
|
UNREACHABLE_MSG("InvalidMemoryRegionType for conversion to Pool");
|
||||||
return {};
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
KMemoryManager::KMemoryManager(Core::System& system_)
|
KMemoryManager::KMemoryManager(Core::System& system)
|
||||||
: system{system_}, pool_locks{
|
: m_system{system}, m_memory_layout{system.Kernel().MemoryLayout()},
|
||||||
KLightLock{system_.Kernel()},
|
m_pool_locks{
|
||||||
KLightLock{system_.Kernel()},
|
KLightLock{system.Kernel()},
|
||||||
KLightLock{system_.Kernel()},
|
KLightLock{system.Kernel()},
|
||||||
KLightLock{system_.Kernel()},
|
KLightLock{system.Kernel()},
|
||||||
} {}
|
KLightLock{system.Kernel()},
|
||||||
|
} {}
|
||||||
|
|
||||||
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
|
void KMemoryManager::Initialize(VAddr management_region, size_t management_region_size) {
|
||||||
|
|
||||||
// Clear the management region to zero.
|
// Clear the management region to zero.
|
||||||
const VAddr management_region_end = management_region + management_region_size;
|
const VAddr management_region_end = management_region + management_region_size;
|
||||||
|
// std::memset(GetVoidPointer(management_region), 0, management_region_size);
|
||||||
|
|
||||||
// Reset our manager count.
|
// Reset our manager count.
|
||||||
num_managers = 0;
|
m_num_managers = 0;
|
||||||
|
|
||||||
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
|
// Traverse the virtual memory layout tree, initializing each manager as appropriate.
|
||||||
while (num_managers != MaxManagerCount) {
|
while (m_num_managers != MaxManagerCount) {
|
||||||
// Locate the region that should initialize the current manager.
|
// Locate the region that should initialize the current manager.
|
||||||
PAddr region_address = 0;
|
PAddr region_address = 0;
|
||||||
size_t region_size = 0;
|
size_t region_size = 0;
|
||||||
Pool region_pool = Pool::Count;
|
Pool region_pool = Pool::Count;
|
||||||
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||||
// We only care about regions that we need to create managers for.
|
// We only care about regions that we need to create managers for.
|
||||||
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
if (!it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We want to initialize the managers in order.
|
// We want to initialize the managers in order.
|
||||||
if (it.GetAttributes() != num_managers) {
|
if (it.GetAttributes() != m_num_managers) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -97,8 +98,8 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||||
}
|
}
|
||||||
|
|
||||||
// Initialize a new manager for the region.
|
// Initialize a new manager for the region.
|
||||||
Impl* manager = std::addressof(managers[num_managers++]);
|
Impl* manager = std::addressof(m_managers[m_num_managers++]);
|
||||||
ASSERT(num_managers <= managers.size());
|
ASSERT(m_num_managers <= m_managers.size());
|
||||||
|
|
||||||
const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
|
const size_t cur_size = manager->Initialize(region_address, region_size, management_region,
|
||||||
management_region_end, region_pool);
|
management_region_end, region_pool);
|
||||||
|
@ -107,13 +108,13 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||||
|
|
||||||
// Insert the manager into the pool list.
|
// Insert the manager into the pool list.
|
||||||
const auto region_pool_index = static_cast<u32>(region_pool);
|
const auto region_pool_index = static_cast<u32>(region_pool);
|
||||||
if (pool_managers_tail[region_pool_index] == nullptr) {
|
if (m_pool_managers_tail[region_pool_index] == nullptr) {
|
||||||
pool_managers_head[region_pool_index] = manager;
|
m_pool_managers_head[region_pool_index] = manager;
|
||||||
} else {
|
} else {
|
||||||
pool_managers_tail[region_pool_index]->SetNext(manager);
|
m_pool_managers_tail[region_pool_index]->SetNext(manager);
|
||||||
manager->SetPrev(pool_managers_tail[region_pool_index]);
|
manager->SetPrev(m_pool_managers_tail[region_pool_index]);
|
||||||
}
|
}
|
||||||
pool_managers_tail[region_pool_index] = manager;
|
m_pool_managers_tail[region_pool_index] = manager;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Free each region to its corresponding heap.
|
// Free each region to its corresponding heap.
|
||||||
|
@ -121,11 +122,10 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||||
const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
|
const PAddr ini_start = GetInitialProcessBinaryPhysicalAddress();
|
||||||
const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
|
const PAddr ini_end = ini_start + InitialProcessBinarySizeMax;
|
||||||
const PAddr ini_last = ini_end - 1;
|
const PAddr ini_last = ini_end - 1;
|
||||||
for (const auto& it : system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
for (const auto& it : m_system.Kernel().MemoryLayout().GetPhysicalMemoryRegionTree()) {
|
||||||
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
if (it.IsDerivedFrom(KMemoryRegionType_DramUserPool)) {
|
||||||
// Get the manager for the region.
|
// Get the manager for the region.
|
||||||
auto index = it.GetAttributes();
|
auto& manager = m_managers[it.GetAttributes()];
|
||||||
auto& manager = managers[index];
|
|
||||||
|
|
||||||
const PAddr cur_start = it.GetAddress();
|
const PAddr cur_start = it.GetAddress();
|
||||||
const PAddr cur_last = it.GetLastAddress();
|
const PAddr cur_last = it.GetLastAddress();
|
||||||
|
@ -162,11 +162,19 @@ void KMemoryManager::Initialize(VAddr management_region, size_t management_regio
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the used size for all managers.
|
// Update the used size for all managers.
|
||||||
for (size_t i = 0; i < num_managers; ++i) {
|
for (size_t i = 0; i < m_num_managers; ++i) {
|
||||||
managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
|
m_managers[i].SetInitialUsedHeapSize(reserved_sizes[i]);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Result KMemoryManager::InitializeOptimizedMemory(u64 process_id, Pool pool) {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
|
PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option) {
|
||||||
// Early return if we're allocating no pages.
|
// Early return if we're allocating no pages.
|
||||||
if (num_pages == 0) {
|
if (num_pages == 0) {
|
||||||
|
@ -175,7 +183,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||||
|
|
||||||
// Lock the pool that we're allocating from.
|
// Lock the pool that we're allocating from.
|
||||||
const auto [pool, dir] = DecodeOption(option);
|
const auto [pool, dir] = DecodeOption(option);
|
||||||
KScopedLightLock lk(pool_locks[static_cast<std::size_t>(pool)]);
|
KScopedLightLock lk(m_pool_locks[static_cast<std::size_t>(pool)]);
|
||||||
|
|
||||||
// Choose a heap based on our page size request.
|
// Choose a heap based on our page size request.
|
||||||
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
|
const s32 heap_index = KPageHeap::GetAlignedBlockIndex(num_pages, align_pages);
|
||||||
|
@ -185,7 +193,7 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||||
PAddr allocated_block = 0;
|
PAddr allocated_block = 0;
|
||||||
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
|
for (chosen_manager = this->GetFirstManager(pool, dir); chosen_manager != nullptr;
|
||||||
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
chosen_manager = this->GetNextManager(chosen_manager, dir)) {
|
||||||
allocated_block = chosen_manager->AllocateBlock(heap_index, true);
|
allocated_block = chosen_manager->AllocateAligned(heap_index, num_pages, align_pages);
|
||||||
if (allocated_block != 0) {
|
if (allocated_block != 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -196,10 +204,9 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If we allocated more than we need, free some.
|
// Maintain the optimized memory bitmap, if we should.
|
||||||
const size_t allocated_pages = KPageHeap::GetBlockNumPages(heap_index);
|
if (m_has_optimized_process[static_cast<size_t>(pool)]) {
|
||||||
if (allocated_pages > num_pages) {
|
UNIMPLEMENTED();
|
||||||
chosen_manager->Free(allocated_block + num_pages * PageSize, allocated_pages - num_pages);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open the first reference to the pages.
|
// Open the first reference to the pages.
|
||||||
|
@ -209,20 +216,21 @@ PAddr KMemoryManager::AllocateAndOpenContinuous(size_t num_pages, size_t align_p
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
|
Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool,
|
||||||
Direction dir, bool random) {
|
Direction dir, bool unoptimized, bool random) {
|
||||||
// Choose a heap based on our page size request.
|
// Choose a heap based on our page size request.
|
||||||
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
const s32 heap_index = KPageHeap::GetBlockIndex(num_pages);
|
||||||
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
|
R_UNLESS(0 <= heap_index, ResultOutOfMemory);
|
||||||
|
|
||||||
// Ensure that we don't leave anything un-freed.
|
// Ensure that we don't leave anything un-freed.
|
||||||
auto group_guard = SCOPE_GUARD({
|
ON_RESULT_FAILURE {
|
||||||
for (const auto& it : out->Nodes()) {
|
for (const auto& it : out->Nodes()) {
|
||||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), it.GetAddress());
|
auto& manager = this->GetManager(it.GetAddress());
|
||||||
const size_t num_pages_to_free =
|
const size_t node_num_pages =
|
||||||
std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
|
||||||
manager.Free(it.GetAddress(), num_pages_to_free);
|
manager.Free(it.GetAddress(), node_num_pages);
|
||||||
}
|
}
|
||||||
});
|
out->Finalize();
|
||||||
|
};
|
||||||
|
|
||||||
// Keep allocating until we've allocated all our pages.
|
// Keep allocating until we've allocated all our pages.
|
||||||
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
|
for (s32 index = heap_index; index >= 0 && num_pages > 0; index--) {
|
||||||
|
@ -236,12 +244,17 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Safely add it to our group.
|
// Ensure we don't leak the block if we fail.
|
||||||
{
|
ON_RESULT_FAILURE_2 {
|
||||||
auto block_guard =
|
cur_manager->Free(allocated_block, pages_per_alloc);
|
||||||
SCOPE_GUARD({ cur_manager->Free(allocated_block, pages_per_alloc); });
|
};
|
||||||
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
|
||||||
block_guard.Cancel();
|
// Add the block to our group.
|
||||||
|
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
|
||||||
|
|
||||||
|
// Maintain the optimized memory bitmap, if we should.
|
||||||
|
if (unoptimized) {
|
||||||
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
||||||
num_pages -= pages_per_alloc;
|
num_pages -= pages_per_alloc;
|
||||||
|
@ -253,8 +266,7 @@ Result KMemoryManager::AllocatePageGroupImpl(KPageGroup* out, size_t num_pages,
|
||||||
R_UNLESS(num_pages == 0, ResultOutOfMemory);
|
R_UNLESS(num_pages == 0, ResultOutOfMemory);
|
||||||
|
|
||||||
// We succeeded!
|
// We succeeded!
|
||||||
group_guard.Cancel();
|
R_SUCCEED();
|
||||||
return ResultSuccess;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
|
Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option) {
|
||||||
|
@ -266,10 +278,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
||||||
|
|
||||||
// Lock the pool that we're allocating from.
|
// Lock the pool that we're allocating from.
|
||||||
const auto [pool, dir] = DecodeOption(option);
|
const auto [pool, dir] = DecodeOption(option);
|
||||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||||
|
|
||||||
// Allocate the page group.
|
// Allocate the page group.
|
||||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
|
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir,
|
||||||
|
m_has_optimized_process[static_cast<size_t>(pool)], true));
|
||||||
|
|
||||||
// Open the first reference to the pages.
|
// Open the first reference to the pages.
|
||||||
for (const auto& block : out->Nodes()) {
|
for (const auto& block : out->Nodes()) {
|
||||||
|
@ -277,7 +290,7 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
||||||
size_t remaining_pages = block.GetNumPages();
|
size_t remaining_pages = block.GetNumPages();
|
||||||
while (remaining_pages > 0) {
|
while (remaining_pages > 0) {
|
||||||
// Get the manager for the current address.
|
// Get the manager for the current address.
|
||||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
|
auto& manager = this->GetManager(cur_address);
|
||||||
|
|
||||||
// Process part or all of the block.
|
// Process part or all of the block.
|
||||||
const size_t cur_pages =
|
const size_t cur_pages =
|
||||||
|
@ -290,11 +303,11 @@ Result KMemoryManager::AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 op
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return ResultSuccess;
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
Result KMemoryManager::AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option,
|
||||||
u64 process_id, u8 fill_pattern) {
|
u64 process_id, u8 fill_pattern) {
|
||||||
ASSERT(out != nullptr);
|
ASSERT(out != nullptr);
|
||||||
ASSERT(out->GetNumPages() == 0);
|
ASSERT(out->GetNumPages() == 0);
|
||||||
|
|
||||||
|
@ -302,83 +315,89 @@ Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup* out, size_t num_pag
|
||||||
const auto [pool, dir] = DecodeOption(option);
|
const auto [pool, dir] = DecodeOption(option);
|
||||||
|
|
||||||
// Allocate the memory.
|
// Allocate the memory.
|
||||||
|
bool optimized;
|
||||||
{
|
{
|
||||||
// Lock the pool that we're allocating from.
|
// Lock the pool that we're allocating from.
|
||||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(pool)]);
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||||
|
|
||||||
|
// Check if we have an optimized process.
|
||||||
|
const bool has_optimized = m_has_optimized_process[static_cast<size_t>(pool)];
|
||||||
|
const bool is_optimized = m_optimized_process_ids[static_cast<size_t>(pool)] == process_id;
|
||||||
|
|
||||||
// Allocate the page group.
|
// Allocate the page group.
|
||||||
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, false));
|
R_TRY(this->AllocatePageGroupImpl(out, num_pages, pool, dir, has_optimized && !is_optimized,
|
||||||
|
false));
|
||||||
|
|
||||||
// Open the first reference to the pages.
|
// Set whether we should optimize.
|
||||||
|
optimized = has_optimized && is_optimized;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform optimized memory tracking, if we should.
|
||||||
|
if (optimized) {
|
||||||
|
// Iterate over the allocated blocks.
|
||||||
for (const auto& block : out->Nodes()) {
|
for (const auto& block : out->Nodes()) {
|
||||||
PAddr cur_address = block.GetAddress();
|
// Get the block extents.
|
||||||
size_t remaining_pages = block.GetNumPages();
|
const PAddr block_address = block.GetAddress();
|
||||||
while (remaining_pages > 0) {
|
const size_t block_pages = block.GetNumPages();
|
||||||
// Get the manager for the current address.
|
|
||||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), cur_address);
|
|
||||||
|
|
||||||
// Process part or all of the block.
|
// If it has no pages, we don't need to do anything.
|
||||||
const size_t cur_pages =
|
if (block_pages == 0) {
|
||||||
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
continue;
|
||||||
manager.OpenFirst(cur_address, cur_pages);
|
}
|
||||||
|
|
||||||
// Advance.
|
// Fill all the pages that we need to fill.
|
||||||
cur_address += cur_pages * PageSize;
|
bool any_new = false;
|
||||||
remaining_pages -= cur_pages;
|
{
|
||||||
|
PAddr cur_address = block_address;
|
||||||
|
size_t remaining_pages = block_pages;
|
||||||
|
while (remaining_pages > 0) {
|
||||||
|
// Get the manager for the current address.
|
||||||
|
auto& manager = this->GetManager(cur_address);
|
||||||
|
|
||||||
|
// Process part or all of the block.
|
||||||
|
const size_t cur_pages =
|
||||||
|
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||||
|
any_new =
|
||||||
|
manager.ProcessOptimizedAllocation(cur_address, cur_pages, fill_pattern);
|
||||||
|
|
||||||
|
// Advance.
|
||||||
|
cur_address += cur_pages * PageSize;
|
||||||
|
remaining_pages -= cur_pages;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there are new pages, update tracking for the allocation.
|
||||||
|
if (any_new) {
|
||||||
|
// Update tracking for the allocation.
|
||||||
|
PAddr cur_address = block_address;
|
||||||
|
size_t remaining_pages = block_pages;
|
||||||
|
while (remaining_pages > 0) {
|
||||||
|
// Get the manager for the current address.
|
||||||
|
auto& manager = this->GetManager(cur_address);
|
||||||
|
|
||||||
|
// Lock the pool for the manager.
|
||||||
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||||
|
|
||||||
|
// Track some or all of the current pages.
|
||||||
|
const size_t cur_pages =
|
||||||
|
std::min(remaining_pages, manager.GetPageOffsetToEnd(cur_address));
|
||||||
|
manager.TrackOptimizedAllocation(cur_address, cur_pages);
|
||||||
|
|
||||||
|
// Advance.
|
||||||
|
cur_address += cur_pages * PageSize;
|
||||||
|
remaining_pages -= cur_pages;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
// Set all the allocated memory.
|
||||||
// Set all the allocated memory.
|
for (const auto& block : out->Nodes()) {
|
||||||
for (const auto& block : out->Nodes()) {
|
std::memset(m_system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
||||||
std::memset(system.DeviceMemory().GetPointer<void>(block.GetAddress()), fill_pattern,
|
block.GetSize());
|
||||||
block.GetSize());
|
|
||||||
}
|
|
||||||
|
|
||||||
return ResultSuccess;
|
|
||||||
}
|
|
||||||
|
|
||||||
void KMemoryManager::Open(PAddr address, size_t num_pages) {
|
|
||||||
// Repeatedly open references until we've done so for all pages.
|
|
||||||
while (num_pages) {
|
|
||||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
|
|
||||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
|
||||||
|
|
||||||
{
|
|
||||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
|
|
||||||
manager.Open(address, cur_pages);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
num_pages -= cur_pages;
|
|
||||||
address += cur_pages * PageSize;
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
void KMemoryManager::Close(PAddr address, size_t num_pages) {
|
R_SUCCEED();
|
||||||
// Repeatedly close references until we've done so for all pages.
|
|
||||||
while (num_pages) {
|
|
||||||
auto& manager = this->GetManager(system.Kernel().MemoryLayout(), address);
|
|
||||||
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
|
||||||
|
|
||||||
{
|
|
||||||
KScopedLightLock lk(pool_locks[static_cast<size_t>(manager.GetPool())]);
|
|
||||||
manager.Close(address, cur_pages);
|
|
||||||
}
|
|
||||||
|
|
||||||
num_pages -= cur_pages;
|
|
||||||
address += cur_pages * PageSize;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void KMemoryManager::Close(const KPageGroup& pg) {
|
|
||||||
for (const auto& node : pg.Nodes()) {
|
|
||||||
Close(node.GetAddress(), node.GetNumPages());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
void KMemoryManager::Open(const KPageGroup& pg) {
|
|
||||||
for (const auto& node : pg.Nodes()) {
|
|
||||||
Open(node.GetAddress(), node.GetNumPages());
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
|
size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr management,
|
||||||
|
@ -394,18 +413,31 @@ size_t KMemoryManager::Impl::Initialize(PAddr address, size_t size, VAddr manage
|
||||||
ASSERT(Common::IsAligned(total_management_size, PageSize));
|
ASSERT(Common::IsAligned(total_management_size, PageSize));
|
||||||
|
|
||||||
// Setup region.
|
// Setup region.
|
||||||
pool = p;
|
m_pool = p;
|
||||||
management_region = management;
|
m_management_region = management;
|
||||||
page_reference_counts.resize(
|
m_page_reference_counts.resize(
|
||||||
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
|
Kernel::Board::Nintendo::Nx::KSystemControl::Init::GetIntendedMemorySize() / PageSize);
|
||||||
ASSERT(Common::IsAligned(management_region, PageSize));
|
ASSERT(Common::IsAligned(m_management_region, PageSize));
|
||||||
|
|
||||||
// Initialize the manager's KPageHeap.
|
// Initialize the manager's KPageHeap.
|
||||||
heap.Initialize(address, size, management + manager_size, page_heap_size);
|
m_heap.Initialize(address, size, management + manager_size, page_heap_size);
|
||||||
|
|
||||||
return total_management_size;
|
return total_management_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KMemoryManager::Impl::TrackUnoptimizedAllocation(PAddr block, size_t num_pages) {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
void KMemoryManager::Impl::TrackOptimizedAllocation(PAddr block, size_t num_pages) {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
bool KMemoryManager::Impl::ProcessOptimizedAllocation(PAddr block, size_t num_pages,
|
||||||
|
u8 fill_pattern) {
|
||||||
|
UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
|
size_t KMemoryManager::Impl::CalculateManagementOverheadSize(size_t region_size) {
|
||||||
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
const size_t ref_count_size = (region_size / PageSize) * sizeof(u16);
|
||||||
const size_t optimize_map_size =
|
const size_t optimize_map_size =
|
||||||
|
|
|
@ -21,11 +21,8 @@ namespace Kernel {
|
||||||
|
|
||||||
class KPageGroup;
|
class KPageGroup;
|
||||||
|
|
||||||
class KMemoryManager final {
|
class KMemoryManager {
|
||||||
public:
|
public:
|
||||||
YUZU_NON_COPYABLE(KMemoryManager);
|
|
||||||
YUZU_NON_MOVEABLE(KMemoryManager);
|
|
||||||
|
|
||||||
enum class Pool : u32 {
|
enum class Pool : u32 {
|
||||||
Application = 0,
|
Application = 0,
|
||||||
Applet = 1,
|
Applet = 1,
|
||||||
|
@ -45,16 +42,85 @@ public:
|
||||||
enum class Direction : u32 {
|
enum class Direction : u32 {
|
||||||
FromFront = 0,
|
FromFront = 0,
|
||||||
FromBack = 1,
|
FromBack = 1,
|
||||||
|
|
||||||
Shift = 0,
|
Shift = 0,
|
||||||
Mask = (0xF << Shift),
|
Mask = (0xF << Shift),
|
||||||
};
|
};
|
||||||
|
|
||||||
explicit KMemoryManager(Core::System& system_);
|
static constexpr size_t MaxManagerCount = 10;
|
||||||
|
|
||||||
|
explicit KMemoryManager(Core::System& system);
|
||||||
|
|
||||||
void Initialize(VAddr management_region, size_t management_region_size);
|
void Initialize(VAddr management_region, size_t management_region_size);
|
||||||
|
|
||||||
constexpr size_t GetSize(Pool pool) const {
|
Result InitializeOptimizedMemory(u64 process_id, Pool pool);
|
||||||
|
void FinalizeOptimizedMemory(u64 process_id, Pool pool);
|
||||||
|
|
||||||
|
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
||||||
|
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
||||||
|
Result AllocateForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
||||||
|
u8 fill_pattern);
|
||||||
|
|
||||||
|
Pool GetPool(PAddr address) const {
|
||||||
|
return this->GetManager(address).GetPool();
|
||||||
|
}
|
||||||
|
|
||||||
|
void Open(PAddr address, size_t num_pages) {
|
||||||
|
// Repeatedly open references until we've done so for all pages.
|
||||||
|
while (num_pages) {
|
||||||
|
auto& manager = this->GetManager(address);
|
||||||
|
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||||
|
|
||||||
|
{
|
||||||
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||||
|
manager.Open(address, cur_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
num_pages -= cur_pages;
|
||||||
|
address += cur_pages * PageSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void OpenFirst(PAddr address, size_t num_pages) {
|
||||||
|
// Repeatedly open references until we've done so for all pages.
|
||||||
|
while (num_pages) {
|
||||||
|
auto& manager = this->GetManager(address);
|
||||||
|
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||||
|
|
||||||
|
{
|
||||||
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||||
|
manager.OpenFirst(address, cur_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
num_pages -= cur_pages;
|
||||||
|
address += cur_pages * PageSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void Close(PAddr address, size_t num_pages) {
|
||||||
|
// Repeatedly close references until we've done so for all pages.
|
||||||
|
while (num_pages) {
|
||||||
|
auto& manager = this->GetManager(address);
|
||||||
|
const size_t cur_pages = std::min(num_pages, manager.GetPageOffsetToEnd(address));
|
||||||
|
|
||||||
|
{
|
||||||
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(manager.GetPool())]);
|
||||||
|
manager.Close(address, cur_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
num_pages -= cur_pages;
|
||||||
|
address += cur_pages * PageSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetSize() {
|
||||||
|
size_t total = 0;
|
||||||
|
for (size_t i = 0; i < m_num_managers; i++) {
|
||||||
|
total += m_managers[i].GetSize();
|
||||||
|
}
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t GetSize(Pool pool) {
|
||||||
constexpr Direction GetSizeDirection = Direction::FromFront;
|
constexpr Direction GetSizeDirection = Direction::FromFront;
|
||||||
size_t total = 0;
|
size_t total = 0;
|
||||||
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
|
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
|
||||||
|
@ -64,18 +130,36 @@ public:
|
||||||
return total;
|
return total;
|
||||||
}
|
}
|
||||||
|
|
||||||
PAddr AllocateAndOpenContinuous(size_t num_pages, size_t align_pages, u32 option);
|
size_t GetFreeSize() {
|
||||||
Result AllocateAndOpen(KPageGroup* out, size_t num_pages, u32 option);
|
size_t total = 0;
|
||||||
Result AllocateAndOpenForProcess(KPageGroup* out, size_t num_pages, u32 option, u64 process_id,
|
for (size_t i = 0; i < m_num_managers; i++) {
|
||||||
u8 fill_pattern);
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(m_managers[i].GetPool())]);
|
||||||
|
total += m_managers[i].GetFreeSize();
|
||||||
|
}
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
static constexpr size_t MaxManagerCount = 10;
|
size_t GetFreeSize(Pool pool) {
|
||||||
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||||
|
|
||||||
void Close(PAddr address, size_t num_pages);
|
constexpr Direction GetSizeDirection = Direction::FromFront;
|
||||||
void Close(const KPageGroup& pg);
|
size_t total = 0;
|
||||||
|
for (auto* manager = this->GetFirstManager(pool, GetSizeDirection); manager != nullptr;
|
||||||
|
manager = this->GetNextManager(manager, GetSizeDirection)) {
|
||||||
|
total += manager->GetFreeSize();
|
||||||
|
}
|
||||||
|
return total;
|
||||||
|
}
|
||||||
|
|
||||||
void Open(PAddr address, size_t num_pages);
|
void DumpFreeList(Pool pool) {
|
||||||
void Open(const KPageGroup& pg);
|
KScopedLightLock lk(m_pool_locks[static_cast<size_t>(pool)]);
|
||||||
|
|
||||||
|
constexpr Direction DumpDirection = Direction::FromFront;
|
||||||
|
for (auto* manager = this->GetFirstManager(pool, DumpDirection); manager != nullptr;
|
||||||
|
manager = this->GetNextManager(manager, DumpDirection)) {
|
||||||
|
manager->DumpFreeList();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
static size_t CalculateManagementOverheadSize(size_t region_size) {
|
||||||
|
@ -88,14 +172,13 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Pool GetPool(u32 option) {
|
static constexpr Pool GetPool(u32 option) {
|
||||||
return static_cast<Pool>((static_cast<u32>(option) & static_cast<u32>(Pool::Mask)) >>
|
return static_cast<Pool>((option & static_cast<u32>(Pool::Mask)) >>
|
||||||
static_cast<u32>(Pool::Shift));
|
static_cast<u32>(Pool::Shift));
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr Direction GetDirection(u32 option) {
|
static constexpr Direction GetDirection(u32 option) {
|
||||||
return static_cast<Direction>(
|
return static_cast<Direction>((option & static_cast<u32>(Direction::Mask)) >>
|
||||||
(static_cast<u32>(option) & static_cast<u32>(Direction::Mask)) >>
|
static_cast<u32>(Direction::Shift));
|
||||||
static_cast<u32>(Direction::Shift));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
|
static constexpr std::tuple<Pool, Direction> DecodeOption(u32 option) {
|
||||||
|
@ -103,74 +186,88 @@ public:
|
||||||
}
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
class Impl final {
|
class Impl {
|
||||||
public:
|
public:
|
||||||
YUZU_NON_COPYABLE(Impl);
|
static size_t CalculateManagementOverheadSize(size_t region_size);
|
||||||
YUZU_NON_MOVEABLE(Impl);
|
|
||||||
|
|
||||||
|
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
|
||||||
|
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
||||||
|
Common::BitSize<u64>()) *
|
||||||
|
sizeof(u64);
|
||||||
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
Impl() = default;
|
Impl() = default;
|
||||||
~Impl() = default;
|
|
||||||
|
|
||||||
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
|
size_t Initialize(PAddr address, size_t size, VAddr management, VAddr management_end,
|
||||||
Pool p);
|
Pool p);
|
||||||
|
|
||||||
VAddr AllocateBlock(s32 index, bool random) {
|
PAddr AllocateBlock(s32 index, bool random) {
|
||||||
return heap.AllocateBlock(index, random);
|
return m_heap.AllocateBlock(index, random);
|
||||||
}
|
}
|
||||||
|
PAddr AllocateAligned(s32 index, size_t num_pages, size_t align_pages) {
|
||||||
void Free(VAddr addr, size_t num_pages) {
|
return m_heap.AllocateAligned(index, num_pages, align_pages);
|
||||||
heap.Free(addr, num_pages);
|
}
|
||||||
|
void Free(PAddr addr, size_t num_pages) {
|
||||||
|
m_heap.Free(addr, num_pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
void SetInitialUsedHeapSize(size_t reserved_size) {
|
void SetInitialUsedHeapSize(size_t reserved_size) {
|
||||||
heap.SetInitialUsedSize(reserved_size);
|
m_heap.SetInitialUsedSize(reserved_size);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void InitializeOptimizedMemory() {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
|
void TrackUnoptimizedAllocation(PAddr block, size_t num_pages);
|
||||||
|
void TrackOptimizedAllocation(PAddr block, size_t num_pages);
|
||||||
|
|
||||||
|
bool ProcessOptimizedAllocation(PAddr block, size_t num_pages, u8 fill_pattern);
|
||||||
|
|
||||||
constexpr Pool GetPool() const {
|
constexpr Pool GetPool() const {
|
||||||
return pool;
|
return m_pool;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetSize() const {
|
constexpr size_t GetSize() const {
|
||||||
return heap.GetSize();
|
return m_heap.GetSize();
|
||||||
|
}
|
||||||
|
constexpr PAddr GetEndAddress() const {
|
||||||
|
return m_heap.GetEndAddress();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr VAddr GetAddress() const {
|
size_t GetFreeSize() const {
|
||||||
return heap.GetAddress();
|
return m_heap.GetFreeSize();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr VAddr GetEndAddress() const {
|
void DumpFreeList() const {
|
||||||
return heap.GetEndAddress();
|
UNIMPLEMENTED();
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetPageOffset(PAddr address) const {
|
constexpr size_t GetPageOffset(PAddr address) const {
|
||||||
return heap.GetPageOffset(address);
|
return m_heap.GetPageOffset(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr size_t GetPageOffsetToEnd(PAddr address) const {
|
constexpr size_t GetPageOffsetToEnd(PAddr address) const {
|
||||||
return heap.GetPageOffsetToEnd(address);
|
return m_heap.GetPageOffsetToEnd(address);
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void SetNext(Impl* n) {
|
constexpr void SetNext(Impl* n) {
|
||||||
next = n;
|
m_next = n;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr void SetPrev(Impl* n) {
|
constexpr void SetPrev(Impl* n) {
|
||||||
prev = n;
|
m_prev = n;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Impl* GetNext() const {
|
constexpr Impl* GetNext() const {
|
||||||
return next;
|
return m_next;
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Impl* GetPrev() const {
|
constexpr Impl* GetPrev() const {
|
||||||
return prev;
|
return m_prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
void OpenFirst(PAddr address, size_t num_pages) {
|
void OpenFirst(PAddr address, size_t num_pages) {
|
||||||
size_t index = this->GetPageOffset(address);
|
size_t index = this->GetPageOffset(address);
|
||||||
const size_t end = index + num_pages;
|
const size_t end = index + num_pages;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
const RefCount ref_count = (++page_reference_counts[index]);
|
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||||
ASSERT(ref_count == 1);
|
ASSERT(ref_count == 1);
|
||||||
|
|
||||||
index++;
|
index++;
|
||||||
|
@ -181,7 +278,7 @@ private:
|
||||||
size_t index = this->GetPageOffset(address);
|
size_t index = this->GetPageOffset(address);
|
||||||
const size_t end = index + num_pages;
|
const size_t end = index + num_pages;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
const RefCount ref_count = (++page_reference_counts[index]);
|
const RefCount ref_count = (++m_page_reference_counts[index]);
|
||||||
ASSERT(ref_count > 1);
|
ASSERT(ref_count > 1);
|
||||||
|
|
||||||
index++;
|
index++;
|
||||||
|
@ -195,8 +292,8 @@ private:
|
||||||
size_t free_start = 0;
|
size_t free_start = 0;
|
||||||
size_t free_count = 0;
|
size_t free_count = 0;
|
||||||
while (index < end) {
|
while (index < end) {
|
||||||
ASSERT(page_reference_counts[index] > 0);
|
ASSERT(m_page_reference_counts[index] > 0);
|
||||||
const RefCount ref_count = (--page_reference_counts[index]);
|
const RefCount ref_count = (--m_page_reference_counts[index]);
|
||||||
|
|
||||||
// Keep track of how many zero refcounts we see in a row, to minimize calls to free.
|
// Keep track of how many zero refcounts we see in a row, to minimize calls to free.
|
||||||
if (ref_count == 0) {
|
if (ref_count == 0) {
|
||||||
|
@ -208,7 +305,7 @@ private:
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (free_count > 0) {
|
if (free_count > 0) {
|
||||||
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
|
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||||
free_count = 0;
|
free_count = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -217,44 +314,36 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
if (free_count > 0) {
|
if (free_count > 0) {
|
||||||
this->Free(heap.GetAddress() + free_start * PageSize, free_count);
|
this->Free(m_heap.GetAddress() + free_start * PageSize, free_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static size_t CalculateManagementOverheadSize(size_t region_size);
|
|
||||||
|
|
||||||
static constexpr size_t CalculateOptimizedProcessOverheadSize(size_t region_size) {
|
|
||||||
return (Common::AlignUp((region_size / PageSize), Common::BitSize<u64>()) /
|
|
||||||
Common::BitSize<u64>()) *
|
|
||||||
sizeof(u64);
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using RefCount = u16;
|
using RefCount = u16;
|
||||||
|
|
||||||
KPageHeap heap;
|
KPageHeap m_heap;
|
||||||
std::vector<RefCount> page_reference_counts;
|
std::vector<RefCount> m_page_reference_counts;
|
||||||
VAddr management_region{};
|
VAddr m_management_region{};
|
||||||
Pool pool{};
|
Pool m_pool{};
|
||||||
Impl* next{};
|
Impl* m_next{};
|
||||||
Impl* prev{};
|
Impl* m_prev{};
|
||||||
};
|
};
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) {
|
Impl& GetManager(PAddr address) {
|
||||||
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||||
}
|
}
|
||||||
|
|
||||||
const Impl& GetManager(const KMemoryLayout& memory_layout, PAddr address) const {
|
const Impl& GetManager(PAddr address) const {
|
||||||
return managers[memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
return m_managers[m_memory_layout.GetPhysicalLinearRegion(address).GetAttributes()];
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Impl* GetFirstManager(Pool pool, Direction dir) const {
|
constexpr Impl* GetFirstManager(Pool pool, Direction dir) {
|
||||||
return dir == Direction::FromBack ? pool_managers_tail[static_cast<size_t>(pool)]
|
return dir == Direction::FromBack ? m_pool_managers_tail[static_cast<size_t>(pool)]
|
||||||
: pool_managers_head[static_cast<size_t>(pool)];
|
: m_pool_managers_head[static_cast<size_t>(pool)];
|
||||||
}
|
}
|
||||||
|
|
||||||
constexpr Impl* GetNextManager(Impl* cur, Direction dir) const {
|
constexpr Impl* GetNextManager(Impl* cur, Direction dir) {
|
||||||
if (dir == Direction::FromBack) {
|
if (dir == Direction::FromBack) {
|
||||||
return cur->GetPrev();
|
return cur->GetPrev();
|
||||||
} else {
|
} else {
|
||||||
|
@ -263,15 +352,21 @@ private:
|
||||||
}
|
}
|
||||||
|
|
||||||
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
|
Result AllocatePageGroupImpl(KPageGroup* out, size_t num_pages, Pool pool, Direction dir,
|
||||||
bool random);
|
bool unoptimized, bool random);
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Core::System& system;
|
template <typename T>
|
||||||
std::array<KLightLock, static_cast<size_t>(Pool::Count)> pool_locks;
|
using PoolArray = std::array<T, static_cast<size_t>(Pool::Count)>;
|
||||||
std::array<Impl*, MaxManagerCount> pool_managers_head{};
|
|
||||||
std::array<Impl*, MaxManagerCount> pool_managers_tail{};
|
Core::System& m_system;
|
||||||
std::array<Impl, MaxManagerCount> managers;
|
const KMemoryLayout& m_memory_layout;
|
||||||
size_t num_managers{};
|
PoolArray<KLightLock> m_pool_locks;
|
||||||
|
std::array<Impl*, MaxManagerCount> m_pool_managers_head{};
|
||||||
|
std::array<Impl*, MaxManagerCount> m_pool_managers_tail{};
|
||||||
|
std::array<Impl, MaxManagerCount> m_managers;
|
||||||
|
size_t m_num_managers{};
|
||||||
|
PoolArray<u64> m_optimized_process_ids{};
|
||||||
|
PoolArray<bool> m_has_optimized_process{};
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -114,7 +114,7 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
|
||||||
|
|
||||||
// Set other basic fields
|
// Set other basic fields
|
||||||
m_enable_aslr = enable_aslr;
|
m_enable_aslr = enable_aslr;
|
||||||
m_enable_device_address_space_merge = false;
|
m_enable_device_address_space_merge = enable_das_merge;
|
||||||
m_address_space_start = start;
|
m_address_space_start = start;
|
||||||
m_address_space_end = end;
|
m_address_space_end = end;
|
||||||
m_is_kernel = false;
|
m_is_kernel = false;
|
||||||
|
@ -219,10 +219,22 @@ Result KPageTable::InitializeForProcess(FileSys::ProgramAddressSpaceType as_type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set heap members
|
// Set heap and fill members.
|
||||||
m_current_heap_end = m_heap_region_start;
|
m_current_heap_end = m_heap_region_start;
|
||||||
m_max_heap_size = 0;
|
m_max_heap_size = 0;
|
||||||
m_max_physical_memory_size = 0;
|
m_mapped_physical_memory_size = 0;
|
||||||
|
m_mapped_unsafe_physical_memory = 0;
|
||||||
|
m_mapped_insecure_memory = 0;
|
||||||
|
m_mapped_ipc_server_memory = 0;
|
||||||
|
|
||||||
|
m_heap_fill_value = 0;
|
||||||
|
m_ipc_fill_value = 0;
|
||||||
|
m_stack_fill_value = 0;
|
||||||
|
|
||||||
|
// Set allocation option.
|
||||||
|
m_allocate_option =
|
||||||
|
KMemoryManager::EncodeOption(pool, from_back ? KMemoryManager::Direction::FromBack
|
||||||
|
: KMemoryManager::Direction::FromFront);
|
||||||
|
|
||||||
// Ensure that we regions inside our address space
|
// Ensure that we regions inside our address space
|
||||||
auto IsInAddressSpace = [&](VAddr addr) {
|
auto IsInAddressSpace = [&](VAddr addr) {
|
||||||
|
@ -271,6 +283,16 @@ void KPageTable::Finalize() {
|
||||||
m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
|
m_system.Memory().UnmapRegion(*m_page_table_impl, addr, size);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
// Release any insecure mapped memory.
|
||||||
|
if (m_mapped_insecure_memory) {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Release any ipc server memory.
|
||||||
|
if (m_mapped_ipc_server_memory) {
|
||||||
|
UNIMPLEMENTED();
|
||||||
|
}
|
||||||
|
|
||||||
// Close the backing page table, as the destructor is not called for guest objects.
|
// Close the backing page table, as the destructor is not called for guest objects.
|
||||||
m_page_table_impl.reset();
|
m_page_table_impl.reset();
|
||||||
}
|
}
|
||||||
|
@ -690,9 +712,20 @@ Result KPageTable::UnmapProcessMemory(VAddr dst_addr, size_t size, KPageTable& s
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void KPageTable::HACK_OpenPages(PAddr phys_addr, size_t num_pages) {
|
||||||
|
m_system.Kernel().MemoryManager().OpenFirst(phys_addr, num_pages);
|
||||||
|
}
|
||||||
|
|
||||||
|
void KPageTable::HACK_ClosePages(VAddr virt_addr, size_t num_pages) {
|
||||||
|
for (size_t index = 0; index < num_pages; ++index) {
|
||||||
|
const auto paddr = GetPhysicalAddr(virt_addr + (index * PageSize));
|
||||||
|
m_system.Kernel().MemoryManager().Close(paddr, 1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
// Lock the physical memory lock.
|
// Lock the physical memory lock.
|
||||||
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
|
KScopedLightLock phys_lk(m_map_physical_memory_lock);
|
||||||
|
|
||||||
// Calculate the last address for convenience.
|
// Calculate the last address for convenience.
|
||||||
const VAddr last_address = address + size - 1;
|
const VAddr last_address = address + size - 1;
|
||||||
|
@ -746,15 +779,19 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
{
|
{
|
||||||
// Reserve the memory from the process resource limit.
|
// Reserve the memory from the process resource limit.
|
||||||
KScopedResourceReservation memory_reservation(
|
KScopedResourceReservation memory_reservation(
|
||||||
m_system.Kernel().CurrentProcess()->GetResourceLimit(),
|
m_resource_limit, LimitableResource::PhysicalMemory, size - mapped_size);
|
||||||
LimitableResource::PhysicalMemory, size - mapped_size);
|
|
||||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||||
|
|
||||||
// Allocate pages for the new memory.
|
// Allocate pages for the new memory.
|
||||||
KPageGroup pg;
|
KPageGroup pg;
|
||||||
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
||||||
&pg, (size - mapped_size) / PageSize,
|
&pg, (size - mapped_size) / PageSize, m_allocate_option, 0, 0));
|
||||||
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
|
|
||||||
|
// If we fail in the next bit (or retry), we need to cleanup the pages.
|
||||||
|
// auto pg_guard = SCOPE_GUARD {
|
||||||
|
// pg.OpenFirst();
|
||||||
|
// pg.Close();
|
||||||
|
//};
|
||||||
|
|
||||||
// Map the memory.
|
// Map the memory.
|
||||||
{
|
{
|
||||||
|
@ -814,15 +851,24 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
|
|
||||||
// Create an update allocator.
|
// Create an update allocator.
|
||||||
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
||||||
Result allocator_result{ResultSuccess};
|
Result allocator_result;
|
||||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||||
m_memory_block_slab_manager,
|
m_memory_block_slab_manager,
|
||||||
num_allocator_blocks);
|
num_allocator_blocks);
|
||||||
R_TRY(allocator_result);
|
R_TRY(allocator_result);
|
||||||
|
|
||||||
|
// We're going to perform an update, so create a helper.
|
||||||
|
// KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
|
// Prepare to iterate over the memory.
|
||||||
|
auto pg_it = pg.Nodes().begin();
|
||||||
|
PAddr pg_phys_addr = pg_it->GetAddress();
|
||||||
|
size_t pg_pages = pg_it->GetNumPages();
|
||||||
|
|
||||||
// Reset the current tracking address, and make sure we clean up on failure.
|
// Reset the current tracking address, and make sure we clean up on failure.
|
||||||
|
// pg_guard.Cancel();
|
||||||
cur_address = address;
|
cur_address = address;
|
||||||
auto unmap_guard = detail::ScopeExit([&] {
|
ON_RESULT_FAILURE {
|
||||||
if (cur_address > address) {
|
if (cur_address > address) {
|
||||||
const VAddr last_unmap_address = cur_address - 1;
|
const VAddr last_unmap_address = cur_address - 1;
|
||||||
|
|
||||||
|
@ -845,6 +891,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
last_unmap_address + 1 - cur_address) /
|
last_unmap_address + 1 - cur_address) /
|
||||||
PageSize;
|
PageSize;
|
||||||
|
|
||||||
|
// HACK: Manually close the pages.
|
||||||
|
HACK_ClosePages(cur_address, cur_pages);
|
||||||
|
|
||||||
// Unmap.
|
// Unmap.
|
||||||
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
|
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None,
|
||||||
OperationType::Unmap)
|
OperationType::Unmap)
|
||||||
|
@ -861,12 +910,17 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
++it;
|
++it;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
|
||||||
|
|
||||||
// Iterate over the memory.
|
// Release any remaining unmapped memory.
|
||||||
auto pg_it = pg.Nodes().begin();
|
m_system.Kernel().MemoryManager().OpenFirst(pg_phys_addr, pg_pages);
|
||||||
PAddr pg_phys_addr = pg_it->GetAddress();
|
m_system.Kernel().MemoryManager().Close(pg_phys_addr, pg_pages);
|
||||||
size_t pg_pages = pg_it->GetNumPages();
|
for (++pg_it; pg_it != pg.Nodes().end(); ++pg_it) {
|
||||||
|
m_system.Kernel().MemoryManager().OpenFirst(pg_it->GetAddress(),
|
||||||
|
pg_it->GetNumPages());
|
||||||
|
m_system.Kernel().MemoryManager().Close(pg_it->GetAddress(),
|
||||||
|
pg_it->GetNumPages());
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -901,6 +955,9 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
|
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::UserReadWrite,
|
||||||
OperationType::Map, pg_phys_addr));
|
OperationType::Map, pg_phys_addr));
|
||||||
|
|
||||||
|
// HACK: Manually open the pages.
|
||||||
|
HACK_OpenPages(pg_phys_addr, cur_pages);
|
||||||
|
|
||||||
// Advance.
|
// Advance.
|
||||||
cur_address += cur_pages * PageSize;
|
cur_address += cur_pages * PageSize;
|
||||||
map_pages -= cur_pages;
|
map_pages -= cur_pages;
|
||||||
|
@ -932,9 +989,6 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
|
KMemoryPermission::None, KMemoryAttribute::None, KMemoryState::Normal,
|
||||||
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
|
KMemoryPermission::UserReadWrite, KMemoryAttribute::None);
|
||||||
|
|
||||||
// Cancel our guard.
|
|
||||||
unmap_guard.Cancel();
|
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -943,7 +997,7 @@ Result KPageTable::MapPhysicalMemory(VAddr address, size_t size) {
|
||||||
|
|
||||||
Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||||
// Lock the physical memory lock.
|
// Lock the physical memory lock.
|
||||||
KScopedLightLock map_phys_mem_lk(m_map_physical_memory_lock);
|
KScopedLightLock phys_lk(m_map_physical_memory_lock);
|
||||||
|
|
||||||
// Lock the table.
|
// Lock the table.
|
||||||
KScopedLightLock lk(m_general_lock);
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
@ -952,8 +1006,11 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||||
const VAddr last_address = address + size - 1;
|
const VAddr last_address = address + size - 1;
|
||||||
|
|
||||||
// Define iteration variables.
|
// Define iteration variables.
|
||||||
VAddr cur_address = 0;
|
VAddr map_start_address = 0;
|
||||||
size_t mapped_size = 0;
|
VAddr map_last_address = 0;
|
||||||
|
|
||||||
|
VAddr cur_address;
|
||||||
|
size_t mapped_size;
|
||||||
size_t num_allocator_blocks = 0;
|
size_t num_allocator_blocks = 0;
|
||||||
|
|
||||||
// Check if the memory is mapped.
|
// Check if the memory is mapped.
|
||||||
|
@ -979,27 +1036,27 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||||
if (is_normal) {
|
if (is_normal) {
|
||||||
R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
|
R_UNLESS(info.GetAttribute() == KMemoryAttribute::None, ResultInvalidCurrentMemory);
|
||||||
|
|
||||||
|
if (map_start_address == 0) {
|
||||||
|
map_start_address = cur_address;
|
||||||
|
}
|
||||||
|
map_last_address =
|
||||||
|
(last_address >= info.GetLastAddress()) ? info.GetLastAddress() : last_address;
|
||||||
|
|
||||||
if (info.GetAddress() < address) {
|
if (info.GetAddress() < address) {
|
||||||
++num_allocator_blocks;
|
++num_allocator_blocks;
|
||||||
}
|
}
|
||||||
if (last_address < info.GetLastAddress()) {
|
if (last_address < info.GetLastAddress()) {
|
||||||
++num_allocator_blocks;
|
++num_allocator_blocks;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mapped_size += (map_last_address + 1 - cur_address);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we're done.
|
// Check if we're done.
|
||||||
if (last_address <= info.GetLastAddress()) {
|
if (last_address <= info.GetLastAddress()) {
|
||||||
if (is_normal) {
|
|
||||||
mapped_size += (last_address + 1 - cur_address);
|
|
||||||
}
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Track the memory if it's mapped.
|
|
||||||
if (is_normal) {
|
|
||||||
mapped_size += VAddr(info.GetEndAddress()) - cur_address;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Advance.
|
// Advance.
|
||||||
cur_address = info.GetEndAddress();
|
cur_address = info.GetEndAddress();
|
||||||
++it;
|
++it;
|
||||||
|
@ -1009,125 +1066,22 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||||
R_SUCCEED_IF(mapped_size == 0);
|
R_SUCCEED_IF(mapped_size == 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make a page group for the unmap region.
|
|
||||||
KPageGroup pg;
|
|
||||||
{
|
|
||||||
auto& impl = this->PageTableImpl();
|
|
||||||
|
|
||||||
// Begin traversal.
|
|
||||||
Common::PageTable::TraversalContext context;
|
|
||||||
Common::PageTable::TraversalEntry cur_entry = {.phys_addr = 0, .block_size = 0};
|
|
||||||
bool cur_valid = false;
|
|
||||||
Common::PageTable::TraversalEntry next_entry;
|
|
||||||
bool next_valid = false;
|
|
||||||
size_t tot_size = 0;
|
|
||||||
|
|
||||||
cur_address = address;
|
|
||||||
next_valid = impl.BeginTraversal(next_entry, context, cur_address);
|
|
||||||
next_entry.block_size =
|
|
||||||
(next_entry.block_size - (next_entry.phys_addr & (next_entry.block_size - 1)));
|
|
||||||
|
|
||||||
// Iterate, building the group.
|
|
||||||
while (true) {
|
|
||||||
if ((!next_valid && !cur_valid) ||
|
|
||||||
(next_valid && cur_valid &&
|
|
||||||
next_entry.phys_addr == cur_entry.phys_addr + cur_entry.block_size)) {
|
|
||||||
cur_entry.block_size += next_entry.block_size;
|
|
||||||
} else {
|
|
||||||
if (cur_valid) {
|
|
||||||
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
|
||||||
R_TRY(pg.AddBlock(cur_entry.phys_addr, cur_entry.block_size / PageSize));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update tracking variables.
|
|
||||||
tot_size += cur_entry.block_size;
|
|
||||||
cur_entry = next_entry;
|
|
||||||
cur_valid = next_valid;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cur_entry.block_size + tot_size >= size) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
next_valid = impl.ContinueTraversal(next_entry, context);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add the last block.
|
|
||||||
if (cur_valid) {
|
|
||||||
// ASSERT(IsHeapPhysicalAddress(cur_entry.phys_addr));
|
|
||||||
R_TRY(pg.AddBlock(cur_entry.phys_addr, (size - tot_size) / PageSize));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ASSERT(pg.GetNumPages() == mapped_size / PageSize);
|
|
||||||
|
|
||||||
// Create an update allocator.
|
// Create an update allocator.
|
||||||
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
ASSERT(num_allocator_blocks <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
|
||||||
Result allocator_result{ResultSuccess};
|
Result allocator_result;
|
||||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||||
m_memory_block_slab_manager, num_allocator_blocks);
|
m_memory_block_slab_manager, num_allocator_blocks);
|
||||||
R_TRY(allocator_result);
|
R_TRY(allocator_result);
|
||||||
|
|
||||||
|
// We're going to perform an update, so create a helper.
|
||||||
|
// KScopedPageTableUpdater updater(this);
|
||||||
|
|
||||||
|
// Separate the mapping.
|
||||||
|
R_TRY(Operate(map_start_address, (map_last_address + 1 - map_start_address) / PageSize,
|
||||||
|
KMemoryPermission::None, OperationType::Separate));
|
||||||
|
|
||||||
// Reset the current tracking address, and make sure we clean up on failure.
|
// Reset the current tracking address, and make sure we clean up on failure.
|
||||||
cur_address = address;
|
cur_address = address;
|
||||||
auto remap_guard = detail::ScopeExit([&] {
|
|
||||||
if (cur_address > address) {
|
|
||||||
const VAddr last_map_address = cur_address - 1;
|
|
||||||
cur_address = address;
|
|
||||||
|
|
||||||
// Iterate over the memory we unmapped.
|
|
||||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
|
||||||
auto pg_it = pg.Nodes().begin();
|
|
||||||
PAddr pg_phys_addr = pg_it->GetAddress();
|
|
||||||
size_t pg_pages = pg_it->GetNumPages();
|
|
||||||
|
|
||||||
while (true) {
|
|
||||||
// Get the memory info for the pages we unmapped, convert to property.
|
|
||||||
const KMemoryInfo info = it->GetMemoryInfo();
|
|
||||||
|
|
||||||
// If the memory is normal, we unmapped it and need to re-map it.
|
|
||||||
if (info.GetState() == KMemoryState::Normal) {
|
|
||||||
// Determine the range to map.
|
|
||||||
size_t map_pages = std::min(VAddr(info.GetEndAddress()) - cur_address,
|
|
||||||
last_map_address + 1 - cur_address) /
|
|
||||||
PageSize;
|
|
||||||
|
|
||||||
// While we have pages to map, map them.
|
|
||||||
while (map_pages > 0) {
|
|
||||||
// Check if we're at the end of the physical block.
|
|
||||||
if (pg_pages == 0) {
|
|
||||||
// Ensure there are more pages to map.
|
|
||||||
ASSERT(pg_it != pg.Nodes().end());
|
|
||||||
|
|
||||||
// Advance our physical block.
|
|
||||||
++pg_it;
|
|
||||||
pg_phys_addr = pg_it->GetAddress();
|
|
||||||
pg_pages = pg_it->GetNumPages();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map whatever we can.
|
|
||||||
const size_t cur_pages = std::min(pg_pages, map_pages);
|
|
||||||
ASSERT(this->Operate(cur_address, cur_pages, info.GetPermission(),
|
|
||||||
OperationType::Map, pg_phys_addr) == ResultSuccess);
|
|
||||||
|
|
||||||
// Advance.
|
|
||||||
cur_address += cur_pages * PageSize;
|
|
||||||
map_pages -= cur_pages;
|
|
||||||
|
|
||||||
pg_phys_addr += cur_pages * PageSize;
|
|
||||||
pg_pages -= cur_pages;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we're done.
|
|
||||||
if (last_map_address <= info.GetLastAddress()) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Advance.
|
|
||||||
++it;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Iterate over the memory, unmapping as we go.
|
// Iterate over the memory, unmapping as we go.
|
||||||
auto it = m_memory_block_manager.FindIterator(cur_address);
|
auto it = m_memory_block_manager.FindIterator(cur_address);
|
||||||
|
@ -1145,8 +1099,12 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||||
last_address + 1 - cur_address) /
|
last_address + 1 - cur_address) /
|
||||||
PageSize;
|
PageSize;
|
||||||
|
|
||||||
|
// HACK: Manually close the pages.
|
||||||
|
HACK_ClosePages(cur_address, cur_pages);
|
||||||
|
|
||||||
// Unmap.
|
// Unmap.
|
||||||
R_TRY(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap));
|
ASSERT(Operate(cur_address, cur_pages, KMemoryPermission::None, OperationType::Unmap)
|
||||||
|
.IsSuccess());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check if we're done.
|
// Check if we're done.
|
||||||
|
@ -1161,8 +1119,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||||
|
|
||||||
// Release the memory resource.
|
// Release the memory resource.
|
||||||
m_mapped_physical_memory_size -= mapped_size;
|
m_mapped_physical_memory_size -= mapped_size;
|
||||||
auto process{m_system.Kernel().CurrentProcess()};
|
m_resource_limit->Release(LimitableResource::PhysicalMemory, mapped_size);
|
||||||
process->GetResourceLimit()->Release(LimitableResource::PhysicalMemory, mapped_size);
|
|
||||||
|
|
||||||
// Update memory blocks.
|
// Update memory blocks.
|
||||||
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
|
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize,
|
||||||
|
@ -1170,14 +1127,7 @@ Result KPageTable::UnmapPhysicalMemory(VAddr address, size_t size) {
|
||||||
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
KMemoryAttribute::None, KMemoryBlockDisableMergeAttribute::None,
|
||||||
KMemoryBlockDisableMergeAttribute::None);
|
KMemoryBlockDisableMergeAttribute::None);
|
||||||
|
|
||||||
// TODO(bunnei): This is a workaround until the next set of changes, where we add reference
|
|
||||||
// counting for mapped pages. Until then, we must manually close the reference to the page
|
|
||||||
// group.
|
|
||||||
m_system.Kernel().MemoryManager().Close(pg);
|
|
||||||
|
|
||||||
// We succeeded.
|
// We succeeded.
|
||||||
remap_guard.Cancel();
|
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1753,8 +1703,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
||||||
OperationType::Unmap));
|
OperationType::Unmap));
|
||||||
|
|
||||||
// Release the memory from the resource limit.
|
// Release the memory from the resource limit.
|
||||||
m_system.Kernel().CurrentProcess()->GetResourceLimit()->Release(
|
m_resource_limit->Release(LimitableResource::PhysicalMemory, num_pages * PageSize);
|
||||||
LimitableResource::PhysicalMemory, num_pages * PageSize);
|
|
||||||
|
|
||||||
// Apply the memory block update.
|
// Apply the memory block update.
|
||||||
m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
|
m_memory_block_manager.Update(std::addressof(allocator), m_heap_region_start + size,
|
||||||
|
@ -1784,8 +1733,7 @@ Result KPageTable::SetHeapSize(VAddr* out, size_t size) {
|
||||||
|
|
||||||
// Reserve memory for the heap extension.
|
// Reserve memory for the heap extension.
|
||||||
KScopedResourceReservation memory_reservation(
|
KScopedResourceReservation memory_reservation(
|
||||||
m_system.Kernel().CurrentProcess()->GetResourceLimit(), LimitableResource::PhysicalMemory,
|
m_resource_limit, LimitableResource::PhysicalMemory, allocation_size);
|
||||||
allocation_size);
|
|
||||||
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
R_UNLESS(memory_reservation.Succeeded(), ResultLimitReached);
|
||||||
|
|
||||||
// Allocate pages for the heap extension.
|
// Allocate pages for the heap extension.
|
||||||
|
@ -1873,7 +1821,7 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
|
||||||
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
R_TRY(Operate(addr, needed_num_pages, perm, OperationType::Map, map_addr));
|
||||||
} else {
|
} else {
|
||||||
KPageGroup page_group;
|
KPageGroup page_group;
|
||||||
R_TRY(m_system.Kernel().MemoryManager().AllocateAndOpenForProcess(
|
R_TRY(m_system.Kernel().MemoryManager().AllocateForProcess(
|
||||||
&page_group, needed_num_pages,
|
&page_group, needed_num_pages,
|
||||||
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
|
KMemoryManager::EncodeOption(m_memory_pool, m_allocation_option), 0, 0));
|
||||||
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
|
R_TRY(Operate(addr, needed_num_pages, page_group, OperationType::MapGroup));
|
||||||
|
@ -1887,8 +1835,9 @@ ResultVal<VAddr> KPageTable::AllocateAndMapMemory(size_t needed_num_pages, size_
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMemoryPermission perm,
|
Result KPageTable::LockForMapDeviceAddressSpace(bool* out_is_io, VAddr address, size_t size,
|
||||||
bool is_aligned) {
|
KMemoryPermission perm, bool is_aligned,
|
||||||
|
bool check_heap) {
|
||||||
// Lightly validate the range before doing anything else.
|
// Lightly validate the range before doing anything else.
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||||
|
@ -1898,15 +1847,18 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem
|
||||||
|
|
||||||
// Check the memory state.
|
// Check the memory state.
|
||||||
const auto test_state =
|
const auto test_state =
|
||||||
(is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap);
|
(is_aligned ? KMemoryState::FlagCanAlignedDeviceMap : KMemoryState::FlagCanDeviceMap) |
|
||||||
|
(check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
|
||||||
size_t num_allocator_blocks;
|
size_t num_allocator_blocks;
|
||||||
R_TRY(this->CheckMemoryState(std::addressof(num_allocator_blocks), address, size, test_state,
|
KMemoryState old_state;
|
||||||
|
R_TRY(this->CheckMemoryState(std::addressof(old_state), nullptr, nullptr,
|
||||||
|
std::addressof(num_allocator_blocks), address, size, test_state,
|
||||||
test_state, perm, perm,
|
test_state, perm, perm,
|
||||||
KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
|
KMemoryAttribute::IpcLocked | KMemoryAttribute::Locked,
|
||||||
KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
|
KMemoryAttribute::None, KMemoryAttribute::DeviceShared));
|
||||||
|
|
||||||
// Create an update allocator.
|
// Create an update allocator.
|
||||||
Result allocator_result{ResultSuccess};
|
Result allocator_result;
|
||||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||||
m_memory_block_slab_manager, num_allocator_blocks);
|
m_memory_block_slab_manager, num_allocator_blocks);
|
||||||
R_TRY(allocator_result);
|
R_TRY(allocator_result);
|
||||||
|
@ -1915,10 +1867,13 @@ Result KPageTable::LockForMapDeviceAddressSpace(VAddr address, size_t size, KMem
|
||||||
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
|
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages,
|
||||||
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
|
&KMemoryBlock::ShareToDevice, KMemoryPermission::None);
|
||||||
|
|
||||||
|
// Set whether the locked memory was io.
|
||||||
|
*out_is_io = old_state == KMemoryState::Io;
|
||||||
|
|
||||||
R_SUCCEED();
|
R_SUCCEED();
|
||||||
}
|
}
|
||||||
|
|
||||||
Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
|
Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size, bool check_heap) {
|
||||||
// Lightly validate the range before doing anything else.
|
// Lightly validate the range before doing anything else.
|
||||||
const size_t num_pages = size / PageSize;
|
const size_t num_pages = size / PageSize;
|
||||||
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
R_UNLESS(this->Contains(address, size), ResultInvalidCurrentMemory);
|
||||||
|
@ -1927,16 +1882,16 @@ Result KPageTable::LockForUnmapDeviceAddressSpace(VAddr address, size_t size) {
|
||||||
KScopedLightLock lk(m_general_lock);
|
KScopedLightLock lk(m_general_lock);
|
||||||
|
|
||||||
// Check the memory state.
|
// Check the memory state.
|
||||||
|
const auto test_state = KMemoryState::FlagCanDeviceMap |
|
||||||
|
(check_heap ? KMemoryState::FlagReferenceCounted : KMemoryState::None);
|
||||||
size_t num_allocator_blocks;
|
size_t num_allocator_blocks;
|
||||||
R_TRY(this->CheckMemoryStateContiguous(
|
R_TRY(this->CheckMemoryStateContiguous(
|
||||||
std::addressof(num_allocator_blocks), address, size,
|
std::addressof(num_allocator_blocks), address, size, test_state, test_state,
|
||||||
KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
|
|
||||||
KMemoryState::FlagReferenceCounted | KMemoryState::FlagCanDeviceMap,
|
|
||||||
KMemoryPermission::None, KMemoryPermission::None,
|
KMemoryPermission::None, KMemoryPermission::None,
|
||||||
KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
|
KMemoryAttribute::DeviceShared | KMemoryAttribute::Locked, KMemoryAttribute::DeviceShared));
|
||||||
|
|
||||||
// Create an update allocator.
|
// Create an update allocator.
|
||||||
Result allocator_result{ResultSuccess};
|
Result allocator_result;
|
||||||
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
KMemoryBlockManagerUpdateAllocator allocator(std::addressof(allocator_result),
|
||||||
m_memory_block_slab_manager, num_allocator_blocks);
|
m_memory_block_slab_manager, num_allocator_blocks);
|
||||||
R_TRY(allocator_result);
|
R_TRY(allocator_result);
|
||||||
|
@ -2070,6 +2025,10 @@ Result KPageTable::Operate(VAddr addr, size_t num_pages, KMemoryPermission perm,
|
||||||
m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
|
m_system.Memory().MapMemoryRegion(*m_page_table_impl, addr, num_pages * PageSize, map_addr);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case OperationType::Separate: {
|
||||||
|
// HACK: Unimplemented.
|
||||||
|
break;
|
||||||
|
}
|
||||||
case OperationType::ChangePermissions:
|
case OperationType::ChangePermissions:
|
||||||
case OperationType::ChangePermissionsAndRefresh:
|
case OperationType::ChangePermissionsAndRefresh:
|
||||||
break;
|
break;
|
||||||
|
@ -2105,6 +2064,7 @@ VAddr KPageTable::GetRegionAddress(KMemoryState state) const {
|
||||||
case KMemoryState::GeneratedCode:
|
case KMemoryState::GeneratedCode:
|
||||||
case KMemoryState::CodeOut:
|
case KMemoryState::CodeOut:
|
||||||
case KMemoryState::Coverage:
|
case KMemoryState::Coverage:
|
||||||
|
case KMemoryState::Insecure:
|
||||||
return m_alias_code_region_start;
|
return m_alias_code_region_start;
|
||||||
case KMemoryState::Code:
|
case KMemoryState::Code:
|
||||||
case KMemoryState::CodeData:
|
case KMemoryState::CodeData:
|
||||||
|
@ -2140,6 +2100,7 @@ size_t KPageTable::GetRegionSize(KMemoryState state) const {
|
||||||
case KMemoryState::GeneratedCode:
|
case KMemoryState::GeneratedCode:
|
||||||
case KMemoryState::CodeOut:
|
case KMemoryState::CodeOut:
|
||||||
case KMemoryState::Coverage:
|
case KMemoryState::Coverage:
|
||||||
|
case KMemoryState::Insecure:
|
||||||
return m_alias_code_region_end - m_alias_code_region_start;
|
return m_alias_code_region_end - m_alias_code_region_start;
|
||||||
case KMemoryState::Code:
|
case KMemoryState::Code:
|
||||||
case KMemoryState::CodeData:
|
case KMemoryState::CodeData:
|
||||||
|
@ -2181,6 +2142,7 @@ bool KPageTable::CanContain(VAddr addr, size_t size, KMemoryState state) const {
|
||||||
case KMemoryState::GeneratedCode:
|
case KMemoryState::GeneratedCode:
|
||||||
case KMemoryState::CodeOut:
|
case KMemoryState::CodeOut:
|
||||||
case KMemoryState::Coverage:
|
case KMemoryState::Coverage:
|
||||||
|
case KMemoryState::Insecure:
|
||||||
return is_in_region && !is_in_heap && !is_in_alias;
|
return is_in_region && !is_in_heap && !is_in_alias;
|
||||||
case KMemoryState::Normal:
|
case KMemoryState::Normal:
|
||||||
ASSERT(is_in_heap);
|
ASSERT(is_in_heap);
|
||||||
|
|
|
@ -126,10 +126,12 @@ NvResult nvmap::IocAlloc(const std::vector<u8>& input, std::vector<u8>& output)
|
||||||
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
LOG_CRITICAL(Service_NVDRV, "Object failed to allocate, handle={:08X}", params.handle);
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
bool is_out_io{};
|
||||||
ASSERT(system.CurrentProcess()
|
ASSERT(system.CurrentProcess()
|
||||||
->PageTable()
|
->PageTable()
|
||||||
.LockForMapDeviceAddressSpace(handle_description->address, handle_description->size,
|
.LockForMapDeviceAddressSpace(&is_out_io, handle_description->address,
|
||||||
Kernel::KMemoryPermission::None, true)
|
handle_description->size,
|
||||||
|
Kernel::KMemoryPermission::None, true, false)
|
||||||
.IsSuccess());
|
.IsSuccess());
|
||||||
std::memcpy(output.data(), ¶ms, sizeof(params));
|
std::memcpy(output.data(), ¶ms, sizeof(params));
|
||||||
return result;
|
return result;
|
||||||
|
|
Loading…
Reference in a new issue