mirror of
https://github.com/yuzu-emu/yuzu-android.git
synced 2024-12-28 11:25:37 +00:00
Mutex: Replace g_mutex_held_locks with a set inside Thread
This commit is contained in:
parent
0f69668fc6
commit
4e84df8be3
|
@ -5,6 +5,8 @@
|
|||
#include <map>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/range/algorithm_ext/erase.hpp>
|
||||
|
||||
#include "common/common.h"
|
||||
|
||||
#include "core/hle/kernel/kernel.h"
|
||||
|
@ -13,9 +15,6 @@
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
typedef std::multimap<SharedPtr<Thread>, SharedPtr<Mutex>> MutexMap;
|
||||
static MutexMap g_mutex_held_locks;
|
||||
|
||||
/**
|
||||
* Resumes a thread waiting for the specified mutex
|
||||
* @param mutex The mutex that some thread is waiting on
|
||||
|
@ -33,15 +32,10 @@ static void ResumeWaitingThread(Mutex* mutex) {
|
|||
}
|
||||
|
||||
void ReleaseThreadMutexes(Thread* thread) {
|
||||
auto locked_range = g_mutex_held_locks.equal_range(thread);
|
||||
|
||||
// Release every mutex that the thread holds, and resume execution on the waiting threads
|
||||
for (auto iter = locked_range.first; iter != locked_range.second; ++iter) {
|
||||
ResumeWaitingThread(iter->second.get());
|
||||
for (auto& mtx : thread->held_mutexes) {
|
||||
ResumeWaitingThread(mtx.get());
|
||||
}
|
||||
|
||||
// Erase all the locks that this thread holds
|
||||
g_mutex_held_locks.erase(thread);
|
||||
thread->held_mutexes.clear();
|
||||
}
|
||||
|
||||
ResultVal<SharedPtr<Mutex>> Mutex::Create(bool initial_locked, std::string name) {
|
||||
|
@ -76,7 +70,7 @@ void Mutex::Acquire(Thread* thread) {
|
|||
|
||||
locked = true;
|
||||
|
||||
g_mutex_held_locks.insert(std::make_pair(thread, this));
|
||||
thread->held_mutexes.insert(this);
|
||||
holding_thread = thread;
|
||||
}
|
||||
|
||||
|
@ -84,15 +78,7 @@ void Mutex::Release() {
|
|||
if (!locked)
|
||||
return;
|
||||
|
||||
auto locked_range = g_mutex_held_locks.equal_range(holding_thread);
|
||||
|
||||
for (MutexMap::iterator iter = locked_range.first; iter != locked_range.second; ++iter) {
|
||||
if (iter->second == this) {
|
||||
g_mutex_held_locks.erase(iter);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
holding_thread->held_mutexes.erase(this);
|
||||
ResumeWaitingThread(this);
|
||||
}
|
||||
|
||||
|
|
|
@ -40,6 +40,9 @@ static Thread* current_thread;
|
|||
static const u32 INITIAL_THREAD_ID = 1; ///< The first available thread id at startup
|
||||
static u32 next_thread_id; ///< The next available thread id
|
||||
|
||||
Thread::Thread() {
|
||||
}
|
||||
|
||||
Thread* GetCurrentThread() {
|
||||
return current_thread;
|
||||
}
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include <boost/container/flat_set.hpp>
|
||||
|
||||
#include "common/common_types.h"
|
||||
|
||||
#include "core/core.h"
|
||||
|
@ -40,6 +42,8 @@ enum ThreadStatus {
|
|||
|
||||
namespace Kernel {
|
||||
|
||||
class Mutex;
|
||||
|
||||
class Thread final : public WaitObject {
|
||||
public:
|
||||
static ResultVal<SharedPtr<Thread>> Create(std::string name, VAddr entry_point, s32 priority,
|
||||
|
@ -109,8 +113,10 @@ public:
|
|||
|
||||
s32 processor_id;
|
||||
|
||||
std::vector<SharedPtr<WaitObject>> wait_objects; ///< Objects that the thread is waiting on
|
||||
/// Mutexes currently held by this thread, which will be released when it exits.
|
||||
boost::container::flat_set<SharedPtr<Mutex>> held_mutexes;
|
||||
|
||||
std::vector<SharedPtr<WaitObject>> wait_objects; ///< Objects that the thread is waiting on
|
||||
VAddr wait_address; ///< If waiting on an AddressArbiter, this is the arbitration address
|
||||
bool wait_all; ///< True if the thread is waiting on all objects before resuming
|
||||
bool wait_set_output; ///< True if the output parameter should be set on thread wakeup
|
||||
|
@ -121,7 +127,7 @@ public:
|
|||
bool idle = false;
|
||||
|
||||
private:
|
||||
Thread() = default;
|
||||
Thread();
|
||||
|
||||
/// Handle used as userdata to reference this object when inserting into the CoreTiming queue.
|
||||
Handle callback_handle;
|
||||
|
|
Loading…
Reference in a new issue