cleanup more synchronization code

This commit is contained in:
Nguyen Anh Quynh 2017-01-09 14:05:39 +08:00
parent d7ead1135d
commit 52cb0ba78e
13 changed files with 0 additions and 279 deletions

View file

@ -144,9 +144,6 @@ static inline bool _hook_exists_bounded(struct list_item *cur, uint64_t addr)
struct uc_struct {
uc_arch arch;
uc_mode mode;
QemuMutex qemu_global_mutex; // qemu/cpus.c
QemuCond qemu_cpu_cond; // qemu/cpus.c
QemuCond *tcg_halt_cond; // qemu/cpus.c
uc_err errnum; // qemu/cpu-exec.c
AddressSpace as;
query_t query;

View file

@ -90,16 +90,13 @@ int qemu_init_vcpu(CPUState *cpu)
return 0;
}
static void *qemu_tcg_cpu_loop(struct uc_struct *uc)
{
CPUState *cpu = uc->cpu;
//qemu_tcg_init_cpu_signals();
qemu_mutex_lock(&uc->qemu_global_mutex);
cpu->created = true;
qemu_cond_signal(&uc->qemu_cpu_cond);
while (1) {
if (tcg_exec_all(uc))
@ -107,29 +104,14 @@ static void *qemu_tcg_cpu_loop(struct uc_struct *uc)
}
cpu->created = false;
qemu_cond_destroy(cpu->halt_cond);
g_free(cpu->halt_cond);
cpu->halt_cond = NULL;
qemu_mutex_unlock(&uc->qemu_global_mutex);
return NULL;
}
static int qemu_tcg_init_vcpu(CPUState *cpu)
{
struct uc_struct *uc = cpu->uc;
tcg_cpu_address_space_init(cpu, cpu->as);
/* share a single thread for all cpus with TCG */
if (!cpu->halt_cond) {
cpu->halt_cond = g_malloc0(sizeof(QemuCond));
qemu_cond_init(cpu->halt_cond);
}
uc->tcg_halt_cond = cpu->halt_cond;
return 0;
}

View file

@ -7,28 +7,6 @@ struct QemuMutex {
pthread_mutex_t lock;
};
struct QemuCond {
pthread_cond_t cond;
};
struct QemuSemaphore {
#if defined(__APPLE__) || defined(__NetBSD__)
pthread_mutex_t lock;
pthread_cond_t cond;
unsigned int count;
#else
sem_t sem;
#endif
};
struct QemuEvent {
#ifndef __linux__
pthread_mutex_t lock;
pthread_cond_t cond;
#endif
unsigned value;
};
struct QemuThread {
pthread_t thread;
};

View file

@ -7,20 +7,6 @@ struct QemuMutex {
LONG owner;
};
struct QemuCond {
LONG waiters, target;
HANDLE sema;
HANDLE continue_event;
};
struct QemuSemaphore {
HANDLE sema;
};
struct QemuEvent {
HANDLE event;
};
typedef struct QemuThreadData QemuThreadData;
struct QemuThread {
QemuThreadData *data;

View file

@ -5,9 +5,6 @@
#include <stdbool.h>
typedef struct QemuMutex QemuMutex;
typedef struct QemuCond QemuCond;
typedef struct QemuSemaphore QemuSemaphore;
typedef struct QemuEvent QemuEvent;
typedef struct QemuThread QemuThread;
#ifdef _WIN32
@ -22,24 +19,8 @@ typedef struct QemuThread QemuThread;
void qemu_mutex_init(QemuMutex *mutex);
void qemu_mutex_destroy(QemuMutex *mutex);
void qemu_mutex_lock(QemuMutex *mutex);
int qemu_mutex_trylock(QemuMutex *mutex);
void qemu_mutex_unlock(QemuMutex *mutex);
#define rcu_read_lock() do { } while (0)
#define rcu_read_unlock() do { } while (0)
void qemu_cond_init(QemuCond *cond);
void qemu_cond_destroy(QemuCond *cond);
/*
* IMPORTANT: The implementation does not guarantee that pthread_cond_signal
* and pthread_cond_broadcast can be called except while the same mutex is
* held as in the corresponding pthread_cond_wait calls!
*/
void qemu_cond_signal(QemuCond *cond);
void qemu_cond_broadcast(QemuCond *cond);
void qemu_cond_wait(QemuCond *cond, QemuMutex *mutex);
struct uc_struct;
// return -1 on error, 0 on success
int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name,

View file

@ -1,52 +0,0 @@
/*
* Abstraction layer for defining and using TLS variables
*
* Copyright (c) 2011 Red Hat, Inc
* Copyright (c) 2011 Linaro Limited
*
* Authors:
* Paolo Bonzini <pbonzini@redhat.com>
* Peter Maydell <peter.maydell@linaro.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#ifndef QEMU_TLS_H
#define QEMU_TLS_H
/* Per-thread variables. Note that we only have implementations
* which are really thread-local on Linux; the dummy implementations
* define plain global variables.
*
* This means that for the moment use should be restricted to
* per-VCPU variables, which are OK because:
* - the only -user mode supporting multiple VCPU threads is linux-user
* - TCG system mode is single-threaded regarding VCPUs
* - KVM system mode is multi-threaded but limited to Linux
*
* TODO: proper implementations via Win32 .tls sections and
* POSIX pthread_getspecific.
*/
#ifdef __linux__
#define DECLARE_TLS(type, x) extern DEFINE_TLS(type, x)
#define DEFINE_TLS(type, x) __thread __typeof__(type) tls__##x
#define tls_var(x) tls__##x
#else
/* Dummy implementations which define plain global variables */
#define DECLARE_TLS(type, x) extern DEFINE_TLS(type, x)
#define DEFINE_TLS(type, x) __typeof__(type) tls__##x
#define tls_var(x) tls__##x
#endif
#endif

View file

@ -26,7 +26,6 @@
#include "exec/hwaddr.h"
#include "qemu/queue.h"
#include "qemu/thread.h"
#include "qemu/tls.h"
#include "qemu/typedefs.h"
typedef int (*WriteCoreDumpFunction)(const void *buf, size_t size,
@ -70,9 +69,6 @@ typedef void (*CPUUnassignedAccess)(CPUState *cpu, hwaddr addr,
struct TranslationBlock;
//DECLARE_TLS(CPUState *, current_cpu);
//#define current_cpu tls_var(current_cpu)
/**
* CPUClass:
* @class_by_name: Callback to map -cpu command line model name to an
@ -222,7 +218,6 @@ struct CPUState {
int thread_id;
uint32_t host_tid;
bool running;
struct QemuCond *halt_cond;
struct qemu_work_item *queued_work_first, *queued_work_last;
bool thread_kicked;
bool created;

View file

@ -4,7 +4,6 @@
struct uc_struct;
/* cpus.c */
void qemu_init_cpu_loop(struct uc_struct*);
int resume_all_vcpus(struct uc_struct*);
void cpu_stop_current(struct uc_struct*);

View file

@ -70,9 +70,6 @@ struct QEMUTimerList {
QLIST_ENTRY(QEMUTimerList) list;
QEMUTimerListNotifyCB *notify_cb;
void *notify_opaque;
/* lightweight method to mark the end of timerlist's running */
QemuEvent timers_done_ev;
};
/**

View file

@ -74,42 +74,6 @@ void qemu_mutex_unlock(QemuMutex *mutex)
error_exit(err, __func__);
}
void qemu_cond_init(QemuCond *cond)
{
int err;
err = pthread_cond_init(&cond->cond, NULL);
if (err)
error_exit(err, __func__);
}
void qemu_cond_destroy(QemuCond *cond)
{
int err;
err = pthread_cond_destroy(&cond->cond);
if (err)
error_exit(err, __func__);
}
void qemu_cond_signal(QemuCond *cond)
{
int err;
err = pthread_cond_signal(&cond->cond);
if (err)
error_exit(err, __func__);
}
void qemu_cond_broadcast(QemuCond *cond)
{
int err;
err = pthread_cond_broadcast(&cond->cond);
if (err)
error_exit(err, __func__);
}
int qemu_thread_create(struct uc_struct *uc, QemuThread *thread, const char *name,
void *(*start_routine)(void*),
void *arg, int mode)

View file

@ -60,93 +60,6 @@ void qemu_mutex_unlock(QemuMutex *mutex)
LeaveCriticalSection(&mutex->lock);
}
void qemu_cond_init(QemuCond *cond)
{
memset(cond, 0, sizeof(*cond));
cond->sema = CreateSemaphore(NULL, 0, LONG_MAX, NULL);
if (!cond->sema) {
error_exit(GetLastError(), __func__);
}
cond->continue_event = CreateEvent(NULL, /* security */
FALSE, /* auto-reset */
FALSE, /* not signaled */
NULL); /* name */
if (!cond->continue_event) {
error_exit(GetLastError(), __func__);
}
}
void qemu_cond_destroy(QemuCond *cond)
{
BOOL result;
result = CloseHandle(cond->continue_event);
if (!result) {
error_exit(GetLastError(), __func__);
}
cond->continue_event = 0;
result = CloseHandle(cond->sema);
if (!result) {
error_exit(GetLastError(), __func__);
}
cond->sema = 0;
}
void qemu_cond_signal(QemuCond *cond)
{
DWORD result;
/*
* Signal only when there are waiters. cond->waiters is
* incremented by pthread_cond_wait under the external lock,
* so we are safe about that.
*/
if (cond->waiters == 0) {
return;
}
/*
* Waiting threads decrement it outside the external lock, but
* only if another thread is executing pthread_cond_broadcast and
* has the mutex. So, it also cannot be decremented concurrently
* with this particular access.
*/
cond->target = cond->waiters - 1;
result = SignalObjectAndWait(cond->sema, cond->continue_event,
INFINITE, FALSE);
if (result == WAIT_ABANDONED || result == WAIT_FAILED) {
error_exit(GetLastError(), __func__);
}
}
void qemu_cond_broadcast(QemuCond *cond)
{
BOOLEAN result;
/*
* As in pthread_cond_signal, access to cond->waiters and
* cond->target is locked via the external mutex.
*/
if (cond->waiters == 0) {
return;
}
cond->target = 0;
result = ReleaseSemaphore(cond->sema, cond->waiters, NULL);
if (!result) {
error_exit(GetLastError(), __func__);
}
/*
* At this point all waiters continue. Each one takes its
* slice of the semaphore. Now it's our turn to wait: Since
* the external mutex is held, no thread can leave cond_wait,
* yet. For this reason, we can be sure that no thread gets
* a chance to eat *more* than one slice. OTOH, it means
* that the last waiter must send us a wake-up.
*/
WaitForSingleObject(cond->continue_event, INFINITE);
}
struct QemuThreadData {
/* Passed to win32_start_routine. */
void *(*start_routine)(void *);

View file

@ -42,19 +42,6 @@ void cpu_resume(CPUState *cpu)
{
cpu->stop = false;
cpu->stopped = false;
qemu_cpu_kick(cpu);
}
void qemu_cpu_kick(CPUState *cpu)
{
qemu_cond_broadcast(cpu->halt_cond);
}
void qemu_init_cpu_loop(struct uc_struct* uc)
{
qemu_cond_init(&uc->qemu_cpu_cond);
qemu_mutex_init(&uc->qemu_global_mutex);
}
void cpu_stop_current(struct uc_struct *uc)
@ -122,8 +109,6 @@ int machine_initialize(struct uc_struct *uc)
machine_class->max_cpus = 1;
configure_accelerator(current_machine);
qemu_init_cpu_loop(uc);
current_machine->cpu_model = NULL;
return machine_class->init(uc, current_machine);

4
uc.c
View file

@ -292,7 +292,6 @@ uc_err uc_close(uc_engine *uc)
// Cleanup CPU.
g_free(uc->cpu->tcg_as_listener);
g_free(uc->cpu->thread);
g_free(uc->cpu->halt_cond);
// Cleanup all objects.
OBJECT(uc->machine_state->accelerator)->ref = 1;
@ -315,9 +314,6 @@ uc_err uc_close(uc_engine *uc)
if (uc->qemu_thread_data)
free(uc->qemu_thread_data);
qemu_mutex_destroy(&uc->qemu_global_mutex);
qemu_cond_destroy(&uc->qemu_cpu_cond);
// Other auxilaries.
free(uc->l1_map);