mirror of
https://github.com/Lime3DS/Lime3DS
synced 2024-12-27 09:32:30 -06:00
kernel/thread: Change owner_process to std::weak_ptr (#5325)
* kernel/thread: Change owner_process to std::weak_ptr Previously this leaked almost all kernel objects. In short, Threads own Processes which own HandleTables which own maps of Objects which include Threads. Changing this to weak_ptr at least got the camera interfaces to destruct properly. Did not really check the other objects though, and I think there are probably more leaks. * hle/kernel: Lock certain objects while deserializing When deserializing other kernel objects, these objects (`MemoryRegion`s and `VMManager`s) can possibly get modified. To avoid inconsistent state caused by destructor side-effects, we may as well simply lock them until loading is fully completed. * Fix silly typo Somehow this didn't break?!
This commit is contained in:
parent
80c9f9abbb
commit
de3d7cf49f
13 changed files with 121 additions and 31 deletions
|
@ -243,7 +243,6 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
||||||
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
|
std::vector<std::unique_ptr<WaitTreeItem>> list(WaitTreeWaitObject::GetChildren());
|
||||||
|
|
||||||
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
const auto& thread = static_cast<const Kernel::Thread&>(object);
|
||||||
const auto& process = thread.owner_process;
|
|
||||||
|
|
||||||
QString processor;
|
QString processor;
|
||||||
switch (thread.processor_id) {
|
switch (thread.processor_id) {
|
||||||
|
@ -267,10 +266,12 @@ std::vector<std::unique_ptr<WaitTreeItem>> WaitTreeThread::GetChildren() const {
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("object id = %1").arg(thread.GetObjectId())));
|
list.push_back(std::make_unique<WaitTreeText>(tr("object id = %1").arg(thread.GetObjectId())));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
|
list.push_back(std::make_unique<WaitTreeText>(tr("processor = %1").arg(processor)));
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
|
list.push_back(std::make_unique<WaitTreeText>(tr("thread id = %1").arg(thread.GetThreadId())));
|
||||||
list.push_back(
|
if (auto process = thread.owner_process.lock()) {
|
||||||
std::make_unique<WaitTreeText>(tr("process = %1 (%2)")
|
list.push_back(
|
||||||
.arg(QString::fromStdString(process->GetName()))
|
std::make_unique<WaitTreeText>(tr("process = %1 (%2)")
|
||||||
.arg(process->process_id)));
|
.arg(QString::fromStdString(process->GetName()))
|
||||||
|
.arg(process->process_id)));
|
||||||
|
}
|
||||||
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
list.push_back(std::make_unique<WaitTreeText>(tr("priority = %1(current) / %2(normal)")
|
||||||
.arg(thread.current_priority)
|
.arg(thread.current_priority)
|
||||||
.arg(thread.nominal_priority)));
|
.arg(thread.nominal_priority)));
|
||||||
|
|
|
@ -29,7 +29,9 @@ public:
|
||||||
callback->WakeUp(thread, *context, reason);
|
callback->WakeUp(thread, *context, reason);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto& process = thread->owner_process;
|
auto process = thread->owner_process.lock();
|
||||||
|
ASSERT(process);
|
||||||
|
|
||||||
// We must copy the entire command buffer *plus* the entire static buffers area, since
|
// We must copy the entire command buffer *plus* the entire static buffers area, since
|
||||||
// the translation might need to read from it in order to retrieve the StaticBuffer
|
// the translation might need to read from it in order to retrieve the StaticBuffer
|
||||||
// target addresses.
|
// target addresses.
|
||||||
|
|
|
@ -24,8 +24,9 @@ ResultCode TranslateCommandBuffer(Kernel::KernelSystem& kernel, Memory::MemorySy
|
||||||
VAddr dst_address,
|
VAddr dst_address,
|
||||||
std::vector<MappedBufferContext>& mapped_buffer_context,
|
std::vector<MappedBufferContext>& mapped_buffer_context,
|
||||||
bool reply) {
|
bool reply) {
|
||||||
auto& src_process = src_thread->owner_process;
|
auto src_process = src_thread->owner_process.lock();
|
||||||
auto& dst_process = dst_thread->owner_process;
|
auto dst_process = dst_thread->owner_process.lock();
|
||||||
|
ASSERT(src_process && dst_process);
|
||||||
|
|
||||||
IPC::Header header;
|
IPC::Header header;
|
||||||
// TODO(Subv): Replace by Memory::Read32 when possible.
|
// TODO(Subv): Replace by Memory::Read32 when possible.
|
||||||
|
|
|
@ -50,19 +50,21 @@ void Recorder::RegisterRequest(const std::shared_ptr<Kernel::ClientSession>& cli
|
||||||
const std::shared_ptr<Kernel::Thread>& client_thread) {
|
const std::shared_ptr<Kernel::Thread>& client_thread) {
|
||||||
const u32 thread_id = client_thread->GetThreadId();
|
const u32 thread_id = client_thread->GetThreadId();
|
||||||
|
|
||||||
RequestRecord record = {/* id */ ++record_count,
|
if (auto owner_process = client_thread->owner_process.lock()) {
|
||||||
/* status */ RequestStatus::Sent,
|
RequestRecord record = {/* id */ ++record_count,
|
||||||
/* client_process */ GetObjectInfo(client_thread->owner_process.get()),
|
/* status */ RequestStatus::Sent,
|
||||||
/* client_thread */ GetObjectInfo(client_thread.get()),
|
/* client_process */ GetObjectInfo(owner_process.get()),
|
||||||
/* client_session */ GetObjectInfo(client_session.get()),
|
/* client_thread */ GetObjectInfo(client_thread.get()),
|
||||||
/* client_port */ GetObjectInfo(client_session->parent->port.get()),
|
/* client_session */ GetObjectInfo(client_session.get()),
|
||||||
/* server_process */ {},
|
/* client_port */ GetObjectInfo(client_session->parent->port.get()),
|
||||||
/* server_thread */ {},
|
/* server_process */ {},
|
||||||
/* server_session */ GetObjectInfo(client_session->parent->server)};
|
/* server_thread */ {},
|
||||||
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
|
/* server_session */ GetObjectInfo(client_session->parent->server)};
|
||||||
client_session_map.insert_or_assign(thread_id, client_session);
|
record_map.insert_or_assign(thread_id, std::make_unique<RequestRecord>(record));
|
||||||
|
client_session_map.insert_or_assign(thread_id, client_session);
|
||||||
|
|
||||||
InvokeCallbacks(record);
|
InvokeCallbacks(record);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
|
void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thread,
|
||||||
|
@ -82,7 +84,9 @@ void Recorder::SetRequestInfo(const std::shared_ptr<Kernel::Thread>& client_thre
|
||||||
record.translated_request_cmdbuf = std::move(translated_cmdbuf);
|
record.translated_request_cmdbuf = std::move(translated_cmdbuf);
|
||||||
|
|
||||||
if (server_thread) {
|
if (server_thread) {
|
||||||
record.server_process = GetObjectInfo(server_thread->owner_process.get());
|
if (auto owner_process = server_thread->owner_process.lock()) {
|
||||||
|
record.server_process = GetObjectInfo(owner_process.get());
|
||||||
|
}
|
||||||
record.server_thread = GetObjectInfo(server_thread.get());
|
record.server_thread = GetObjectInfo(server_thread.get());
|
||||||
} else {
|
} else {
|
||||||
record.is_hle = true;
|
record.is_hle = true;
|
||||||
|
|
|
@ -177,6 +177,15 @@ void KernelSystem::serialize(Archive& ar, const unsigned int file_version) {
|
||||||
ar& stored_processes;
|
ar& stored_processes;
|
||||||
ar& next_thread_id;
|
ar& next_thread_id;
|
||||||
// Deliberately don't include debugger info to allow debugging through loads
|
// Deliberately don't include debugger info to allow debugging through loads
|
||||||
|
|
||||||
|
if (Archive::is_loading::value) {
|
||||||
|
for (auto& memory_region : memory_regions) {
|
||||||
|
memory_region->Unlock();
|
||||||
|
}
|
||||||
|
for (auto& process : process_list) {
|
||||||
|
process->vm_manager.Unlock();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SERIALIZE_IMPL(KernelSystem)
|
SERIALIZE_IMPL(KernelSystem)
|
||||||
|
|
|
@ -174,6 +174,8 @@ void KernelSystem::MapSharedPages(VMManager& address_space) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryRegionInfo::Reset(u32 base, u32 size) {
|
void MemoryRegionInfo::Reset(u32 base, u32 size) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
this->base = base;
|
this->base = base;
|
||||||
this->size = size;
|
this->size = size;
|
||||||
used = 0;
|
used = 0;
|
||||||
|
@ -184,6 +186,8 @@ void MemoryRegionInfo::Reset(u32 base, u32 size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryRegionInfo::IntervalSet MemoryRegionInfo::HeapAllocate(u32 size) {
|
MemoryRegionInfo::IntervalSet MemoryRegionInfo::HeapAllocate(u32 size) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
IntervalSet result;
|
IntervalSet result;
|
||||||
u32 rest = size;
|
u32 rest = size;
|
||||||
|
|
||||||
|
@ -211,6 +215,8 @@ MemoryRegionInfo::IntervalSet MemoryRegionInfo::HeapAllocate(u32 size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool MemoryRegionInfo::LinearAllocate(u32 offset, u32 size) {
|
bool MemoryRegionInfo::LinearAllocate(u32 offset, u32 size) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
Interval interval(offset, offset + size);
|
Interval interval(offset, offset + size);
|
||||||
if (!boost::icl::contains(free_blocks, interval)) {
|
if (!boost::icl::contains(free_blocks, interval)) {
|
||||||
// The requested range is already allocated
|
// The requested range is already allocated
|
||||||
|
@ -222,6 +228,8 @@ bool MemoryRegionInfo::LinearAllocate(u32 offset, u32 size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<u32> MemoryRegionInfo::LinearAllocate(u32 size) {
|
std::optional<u32> MemoryRegionInfo::LinearAllocate(u32 size) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
// Find the first sufficient continuous block from the lower address
|
// Find the first sufficient continuous block from the lower address
|
||||||
for (const auto& interval : free_blocks) {
|
for (const auto& interval : free_blocks) {
|
||||||
ASSERT(interval.bounds() == boost::icl::interval_bounds::right_open());
|
ASSERT(interval.bounds() == boost::icl::interval_bounds::right_open());
|
||||||
|
@ -238,10 +246,18 @@ std::optional<u32> MemoryRegionInfo::LinearAllocate(u32 size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void MemoryRegionInfo::Free(u32 offset, u32 size) {
|
void MemoryRegionInfo::Free(u32 offset, u32 size) {
|
||||||
|
if (is_locked) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
Interval interval(offset, offset + size);
|
Interval interval(offset, offset + size);
|
||||||
ASSERT(!boost::icl::intersects(free_blocks, interval)); // must be allocated blocks
|
ASSERT(!boost::icl::intersects(free_blocks, interval)); // must be allocated blocks
|
||||||
free_blocks += interval;
|
free_blocks += interval;
|
||||||
used -= size;
|
used -= size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void MemoryRegionInfo::Unlock() {
|
||||||
|
is_locked = false;
|
||||||
|
}
|
||||||
|
|
||||||
} // namespace Kernel
|
} // namespace Kernel
|
||||||
|
|
|
@ -26,6 +26,10 @@ struct MemoryRegionInfo {
|
||||||
|
|
||||||
IntervalSet free_blocks;
|
IntervalSet free_blocks;
|
||||||
|
|
||||||
|
// When locked, Free calls will be ignored, while Allocate calls will hit an assert. A memory
|
||||||
|
// region locks itself after deserialization.
|
||||||
|
bool is_locked{};
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reset the allocator state
|
* Reset the allocator state
|
||||||
* @param base The base offset the beginning of FCRAM.
|
* @param base The base offset the beginning of FCRAM.
|
||||||
|
@ -63,6 +67,11 @@ struct MemoryRegionInfo {
|
||||||
*/
|
*/
|
||||||
void Free(u32 offset, u32 size);
|
void Free(u32 offset, u32 size);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unlock the MemoryRegion. Used after loading is completed.
|
||||||
|
*/
|
||||||
|
void Unlock();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
|
@ -71,6 +80,9 @@ private:
|
||||||
ar& size;
|
ar& size;
|
||||||
ar& used;
|
ar& used;
|
||||||
ar& free_blocks;
|
ar& free_blocks;
|
||||||
|
if (Archive::is_loading::value) {
|
||||||
|
is_locked = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,8 @@ ResultCode ServerSession::HandleSyncRequest(std::shared_ptr<Thread> thread) {
|
||||||
// If this ServerSession has an associated HLE handler, forward the request to it.
|
// If this ServerSession has an associated HLE handler, forward the request to it.
|
||||||
if (hle_handler != nullptr) {
|
if (hle_handler != nullptr) {
|
||||||
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
|
std::array<u32_le, IPC::COMMAND_BUFFER_LENGTH + 2 * IPC::MAX_STATIC_BUFFERS> cmd_buf;
|
||||||
auto current_process = thread->owner_process;
|
auto current_process = thread->owner_process.lock();
|
||||||
|
ASSERT(current_process);
|
||||||
kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(), cmd_buf.data(),
|
kernel.memory.ReadBlock(*current_process, thread->GetCommandBufferAddress(), cmd_buf.data(),
|
||||||
cmd_buf.size() * sizeof(u32));
|
cmd_buf.size() * sizeof(u32));
|
||||||
|
|
||||||
|
|
|
@ -283,7 +283,7 @@ void SVC::ExitProcess() {
|
||||||
// Stop all the process threads that are currently waiting for objects.
|
// Stop all the process threads that are currently waiting for objects.
|
||||||
auto& thread_list = kernel.GetCurrentThreadManager().GetThreadList();
|
auto& thread_list = kernel.GetCurrentThreadManager().GetThreadList();
|
||||||
for (auto& thread : thread_list) {
|
for (auto& thread : thread_list) {
|
||||||
if (thread->owner_process != current_process)
|
if (thread->owner_process.lock() != current_process)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
if (thread.get() == kernel.GetCurrentThreadManager().GetCurrentThread())
|
if (thread.get() == kernel.GetCurrentThreadManager().GetCurrentThread())
|
||||||
|
@ -1041,8 +1041,7 @@ ResultCode SVC::GetProcessIdOfThread(u32* process_id, Handle thread_handle) {
|
||||||
if (thread == nullptr)
|
if (thread == nullptr)
|
||||||
return ERR_INVALID_HANDLE;
|
return ERR_INVALID_HANDLE;
|
||||||
|
|
||||||
const std::shared_ptr<Process> process = thread->owner_process;
|
const std::shared_ptr<Process> process = thread->owner_process.lock();
|
||||||
|
|
||||||
ASSERT_MSG(process != nullptr, "Invalid parent process for thread={:#010X}", thread_handle);
|
ASSERT_MSG(process != nullptr, "Invalid parent process for thread={:#010X}", thread_handle);
|
||||||
|
|
||||||
*process_id = process->process_id;
|
*process_id = process->process_id;
|
||||||
|
|
|
@ -45,7 +45,16 @@ void Thread::serialize(Archive& ar, const unsigned int file_version) {
|
||||||
ar& tls_address;
|
ar& tls_address;
|
||||||
ar& held_mutexes;
|
ar& held_mutexes;
|
||||||
ar& pending_mutexes;
|
ar& pending_mutexes;
|
||||||
ar& owner_process;
|
|
||||||
|
// Note: this is equivalent of what is done in boost/serialization/weak_ptr.hpp, but it's
|
||||||
|
// compatible with previous versions of savestates.
|
||||||
|
// TODO(SaveStates): When the savestate version is bumped, simplify this again.
|
||||||
|
std::shared_ptr<Process> shared_owner_process = owner_process.lock();
|
||||||
|
ar& shared_owner_process;
|
||||||
|
if (Archive::is_loading::value) {
|
||||||
|
owner_process = shared_owner_process;
|
||||||
|
}
|
||||||
|
|
||||||
ar& wait_objects;
|
ar& wait_objects;
|
||||||
ar& wait_address;
|
ar& wait_address;
|
||||||
ar& name;
|
ar& name;
|
||||||
|
@ -99,7 +108,8 @@ void Thread::Stop() {
|
||||||
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
|
u32 tls_page = (tls_address - Memory::TLS_AREA_VADDR) / Memory::PAGE_SIZE;
|
||||||
u32 tls_slot =
|
u32 tls_slot =
|
||||||
((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
|
((tls_address - Memory::TLS_AREA_VADDR) % Memory::PAGE_SIZE) / Memory::TLS_ENTRY_SIZE;
|
||||||
owner_process->tls_slots[tls_page].reset(tls_slot);
|
ASSERT(owner_process.lock());
|
||||||
|
owner_process.lock()->tls_slots[tls_page].reset(tls_slot);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ThreadManager::SwitchContext(Thread* new_thread) {
|
void ThreadManager::SwitchContext(Thread* new_thread) {
|
||||||
|
@ -110,7 +120,7 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
||||||
|
|
||||||
// Save context for previous thread
|
// Save context for previous thread
|
||||||
if (previous_thread) {
|
if (previous_thread) {
|
||||||
previous_process = previous_thread->owner_process;
|
previous_process = previous_thread->owner_process.lock();
|
||||||
previous_thread->last_running_ticks = cpu->GetTimer().GetTicks();
|
previous_thread->last_running_ticks = cpu->GetTimer().GetTicks();
|
||||||
cpu->SaveContext(previous_thread->context);
|
cpu->SaveContext(previous_thread->context);
|
||||||
|
|
||||||
|
@ -135,8 +145,9 @@ void ThreadManager::SwitchContext(Thread* new_thread) {
|
||||||
ready_queue.remove(new_thread->current_priority, new_thread);
|
ready_queue.remove(new_thread->current_priority, new_thread);
|
||||||
new_thread->status = ThreadStatus::Running;
|
new_thread->status = ThreadStatus::Running;
|
||||||
|
|
||||||
if (previous_process != current_thread->owner_process) {
|
ASSERT(current_thread->owner_process.lock());
|
||||||
kernel.SetCurrentProcessForCPU(current_thread->owner_process, cpu->GetID());
|
if (previous_process != current_thread->owner_process.lock()) {
|
||||||
|
kernel.SetCurrentProcessForCPU(current_thread->owner_process.lock(), cpu->GetID());
|
||||||
}
|
}
|
||||||
|
|
||||||
cpu->LoadContext(new_thread->context);
|
cpu->LoadContext(new_thread->context);
|
||||||
|
|
|
@ -308,7 +308,7 @@ public:
|
||||||
/// Mutexes that this thread is currently waiting for.
|
/// Mutexes that this thread is currently waiting for.
|
||||||
boost::container::flat_set<std::shared_ptr<Mutex>> pending_mutexes{};
|
boost::container::flat_set<std::shared_ptr<Mutex>> pending_mutexes{};
|
||||||
|
|
||||||
std::shared_ptr<Process> owner_process{}; ///< Process that owns this thread
|
std::weak_ptr<Process> owner_process{}; ///< Process that owns this thread
|
||||||
|
|
||||||
/// Objects that the thread is waiting on, in the same order as they were
|
/// Objects that the thread is waiting on, in the same order as they were
|
||||||
// passed to WaitSynchronization1/N.
|
// passed to WaitSynchronization1/N.
|
||||||
|
|
|
@ -45,6 +45,8 @@ VMManager::VMManager(Memory::MemorySystem& memory)
|
||||||
VMManager::~VMManager() = default;
|
VMManager::~VMManager() = default;
|
||||||
|
|
||||||
void VMManager::Reset() {
|
void VMManager::Reset() {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
vma_map.clear();
|
vma_map.clear();
|
||||||
|
|
||||||
// Initialize the map with a single free region covering the entire managed space.
|
// Initialize the map with a single free region covering the entire managed space.
|
||||||
|
@ -67,6 +69,7 @@ VMManager::VMAHandle VMManager::FindVMA(VAddr target) const {
|
||||||
|
|
||||||
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory,
|
ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size, MemoryRef memory,
|
||||||
u32 size, MemoryState state) {
|
u32 size, MemoryState state) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
// Find the first Free VMA.
|
// Find the first Free VMA.
|
||||||
VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
|
VMAHandle vma_handle = std::find_if(vma_map.begin(), vma_map.end(), [&](const auto& vma) {
|
||||||
|
@ -96,6 +99,7 @@ ResultVal<VAddr> VMManager::MapBackingMemoryToBase(VAddr base, u32 region_size,
|
||||||
|
|
||||||
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, MemoryRef memory,
|
ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, MemoryRef memory,
|
||||||
u32 size, MemoryState state) {
|
u32 size, MemoryState state) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
ASSERT(memory.GetPtr() != nullptr);
|
ASSERT(memory.GetPtr() != nullptr);
|
||||||
|
|
||||||
// This is the appropriately sized VMA that will turn into our allocation.
|
// This is the appropriately sized VMA that will turn into our allocation.
|
||||||
|
@ -115,6 +119,8 @@ ResultVal<VMManager::VMAHandle> VMManager::MapBackingMemory(VAddr target, Memory
|
||||||
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size,
|
ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u32 size,
|
||||||
MemoryState state,
|
MemoryState state,
|
||||||
Memory::MMIORegionPointer mmio_handler) {
|
Memory::MMIORegionPointer mmio_handler) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
// This is the appropriately sized VMA that will turn into our allocation.
|
// This is the appropriately sized VMA that will turn into our allocation.
|
||||||
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
CASCADE_RESULT(VMAIter vma_handle, CarveVMA(target, size));
|
||||||
VirtualMemoryArea& final_vma = vma_handle->second;
|
VirtualMemoryArea& final_vma = vma_handle->second;
|
||||||
|
@ -133,6 +139,10 @@ ResultVal<VMManager::VMAHandle> VMManager::MapMMIO(VAddr target, PAddr paddr, u3
|
||||||
ResultCode VMManager::ChangeMemoryState(VAddr target, u32 size, MemoryState expected_state,
|
ResultCode VMManager::ChangeMemoryState(VAddr target, u32 size, MemoryState expected_state,
|
||||||
VMAPermission expected_perms, MemoryState new_state,
|
VMAPermission expected_perms, MemoryState new_state,
|
||||||
VMAPermission new_perms) {
|
VMAPermission new_perms) {
|
||||||
|
if (is_locked) {
|
||||||
|
return RESULT_SUCCESS;
|
||||||
|
}
|
||||||
|
|
||||||
VAddr target_end = target + size;
|
VAddr target_end = target + size;
|
||||||
VMAIter begin_vma = StripIterConstness(FindVMA(target));
|
VMAIter begin_vma = StripIterConstness(FindVMA(target));
|
||||||
VMAIter i_end = vma_map.lower_bound(target_end);
|
VMAIter i_end = vma_map.lower_bound(target_end);
|
||||||
|
@ -167,6 +177,8 @@ ResultCode VMManager::ChangeMemoryState(VAddr target, u32 size, MemoryState expe
|
||||||
}
|
}
|
||||||
|
|
||||||
VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
|
VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
VirtualMemoryArea& vma = vma_handle->second;
|
VirtualMemoryArea& vma = vma_handle->second;
|
||||||
vma.type = VMAType::Free;
|
vma.type = VMAType::Free;
|
||||||
vma.permissions = VMAPermission::None;
|
vma.permissions = VMAPermission::None;
|
||||||
|
@ -181,6 +193,8 @@ VMManager::VMAIter VMManager::Unmap(VMAIter vma_handle) {
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
|
ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
||||||
const VAddr target_end = target + size;
|
const VAddr target_end = target + size;
|
||||||
|
|
||||||
|
@ -196,6 +210,8 @@ ResultCode VMManager::UnmapRange(VAddr target, u32 size) {
|
||||||
}
|
}
|
||||||
|
|
||||||
VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
|
VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission new_perms) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
VMAIter iter = StripIterConstness(vma_handle);
|
VMAIter iter = StripIterConstness(vma_handle);
|
||||||
|
|
||||||
VirtualMemoryArea& vma = iter->second;
|
VirtualMemoryArea& vma = iter->second;
|
||||||
|
@ -206,6 +222,8 @@ VMManager::VMAHandle VMManager::Reprotect(VMAHandle vma_handle, VMAPermission ne
|
||||||
}
|
}
|
||||||
|
|
||||||
ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) {
|
ResultCode VMManager::ReprotectRange(VAddr target, u32 size, VMAPermission new_perms) {
|
||||||
|
ASSERT(!is_locked);
|
||||||
|
|
||||||
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
CASCADE_RESULT(VMAIter vma, CarveVMARange(target, size));
|
||||||
const VAddr target_end = target + size;
|
const VAddr target_end = target + size;
|
||||||
|
|
||||||
|
@ -231,6 +249,10 @@ void VMManager::LogLayout(Log::Level log_level) const {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void VMManager::Unlock() {
|
||||||
|
is_locked = false;
|
||||||
|
}
|
||||||
|
|
||||||
VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
|
VMManager::VMAIter VMManager::StripIterConstness(const VMAHandle& iter) {
|
||||||
// This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
|
// This uses a neat C++ trick to convert a const_iterator to a regular iterator, given
|
||||||
// non-const access to its container.
|
// non-const access to its container.
|
||||||
|
|
|
@ -212,6 +212,11 @@ public:
|
||||||
/// is scheduled.
|
/// is scheduled.
|
||||||
std::shared_ptr<Memory::PageTable> page_table;
|
std::shared_ptr<Memory::PageTable> page_table;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Unlock the VMManager. Used after loading is completed.
|
||||||
|
*/
|
||||||
|
void Unlock();
|
||||||
|
|
||||||
private:
|
private:
|
||||||
using VMAIter = decltype(vma_map)::iterator;
|
using VMAIter = decltype(vma_map)::iterator;
|
||||||
|
|
||||||
|
@ -250,10 +255,17 @@ private:
|
||||||
|
|
||||||
Memory::MemorySystem& memory;
|
Memory::MemorySystem& memory;
|
||||||
|
|
||||||
|
// When locked, ChangeMemoryState calls will be ignored, other modification calls will hit an
|
||||||
|
// assert. VMManager locks itself after deserialization.
|
||||||
|
bool is_locked{};
|
||||||
|
|
||||||
template <class Archive>
|
template <class Archive>
|
||||||
void serialize(Archive& ar, const unsigned int) {
|
void serialize(Archive& ar, const unsigned int) {
|
||||||
ar& vma_map;
|
ar& vma_map;
|
||||||
ar& page_table;
|
ar& page_table;
|
||||||
|
if (Archive::is_loading::value) {
|
||||||
|
is_locked = true;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in a new issue