tcg: comment on which functions have to be called with tb_lock held

softmmu requires more functions to be thread-safe, because translation
blocks can be invalidated from e.g. notdirty callbacks. Probably the
same holds for user-mode emulation, it's just that no one has ever
tried to produce a coherent locking there.

This patch will guide the introduction of more tb_lock and tb_unlock
calls for system emulation.

Note that after this patch some (most) of the mentioned functions are
still called outside tb_lock/tb_unlock. The next one will rectify this.

Backports commit 7d7500d99895f888f97397ef32bb536bb0df3b74 from qemu
This commit is contained in:
Paolo Bonzini 2018-02-28 10:26:17 -05:00 committed by Lioncash
parent 7aab0bd9a6
commit 9d64a89acf
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
4 changed files with 32 additions and 5 deletions

View file

@ -334,6 +334,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb,
#endif #endif
/* Called with tb_lock held. */
static inline void tb_add_jump(TranslationBlock *tb, int n, static inline void tb_add_jump(TranslationBlock *tb, int n,
TranslationBlock *tb_next) TranslationBlock *tb_next)
{ {

View file

@ -277,7 +277,10 @@ struct CPUState {
MemoryRegion *memory; MemoryRegion *memory;
void *env_ptr; /* CPUArchState */ void *env_ptr; /* CPUArchState */
/* Writes protected by tb_lock, reads not thread-safe */
struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE];
QTAILQ_ENTRY(CPUState) node; QTAILQ_ENTRY(CPUState) node;
/* ice debug support */ /* ice debug support */

View file

@ -620,6 +620,7 @@ QEMU_BUILD_BUG_ON(sizeof(TCGOp) > 8);
/* pool based memory allocation */ /* pool based memory allocation */
/* tb_lock must be held for tcg_malloc_internal. */
void *tcg_malloc_internal(TCGContext *s, int size); void *tcg_malloc_internal(TCGContext *s, int size);
void tcg_pool_reset(TCGContext *s); void tcg_pool_reset(TCGContext *s);
@ -1031,6 +1032,7 @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg);
void tcg_optimize(TCGContext *s); void tcg_optimize(TCGContext *s);
/* Called with tb_lock held. */
static inline void *tcg_malloc(TCGContext *s, int size) static inline void *tcg_malloc(TCGContext *s, int size)
{ {
uint8_t *ptr, *ptr_end; uint8_t *ptr, *ptr_end;

View file

@ -287,7 +287,9 @@ static int encode_search(TCGContext *tcg_ctx, TranslationBlock *tb, uint8_t *blo
return p - block; return p - block;
} }
/* The cpu state corresponding to 'searched_pc' is restored. */ /* The cpu state corresponding to 'searched_pc' is restored.
* Called with tb_lock held.
*/
static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
uintptr_t searched_pc) uintptr_t searched_pc)
{ {
@ -442,6 +444,10 @@ static void page_init(struct uc_struct *uc)
#endif #endif
} }
/* If alloc=1:
* Called with tb_lock held for system emulation.
* Called with mmap_lock held for user-mode emulation.
*/
static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int alloc) static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int alloc)
{ {
PageDesc *pd; PageDesc *pd;
@ -861,8 +867,12 @@ bool tcg_enabled(struct uc_struct *uc)
return tcg_ctx->code_gen_buffer != NULL; return tcg_ctx->code_gen_buffer != NULL;
} }
/* Allocate a new translation block. Flush the translation buffer if /*
too many translation blocks or too much generated code. */ * Allocate a new translation block. Flush the translation buffer if
* too many translation blocks or too much generated code.
*
* Called with tb_lock held.
*/
static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc) static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc)
{ {
TranslationBlock *tb; TranslationBlock *tb;
@ -878,6 +888,7 @@ static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc)
return tb; return tb;
} }
/* Called with tb_lock held. */
void tb_free(struct uc_struct *uc, TranslationBlock *tb) void tb_free(struct uc_struct *uc, TranslationBlock *tb)
{ {
TCGContext *tcg_ctx = uc->tcg_ctx; TCGContext *tcg_ctx = uc->tcg_ctx;
@ -975,6 +986,10 @@ void tb_flush(CPUState *cpu)
#ifdef DEBUG_TB_CHECK #ifdef DEBUG_TB_CHECK
/* verify that all the pages have correct rights for code
*
* Called with tb_lock held.
*/
static void tb_invalidate_check(target_ulong address) static void tb_invalidate_check(target_ulong address)
{ {
TranslationBlock *tb; TranslationBlock *tb;
@ -1106,7 +1121,10 @@ static inline void tb_jmp_unlink(TranslationBlock *tb)
} }
} }
/* invalidate one TB */ /* invalidate one TB
*
* Called with tb_lock held.
*/
void tb_phys_invalidate(struct uc_struct *uc, void tb_phys_invalidate(struct uc_struct *uc,
TranslationBlock *tb, tb_page_addr_t page_addr) TranslationBlock *tb, tb_page_addr_t page_addr)
{ {
@ -1576,7 +1594,9 @@ void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, in
} }
if (!p->code_bitmap && if (!p->code_bitmap &&
++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) {
/* build code bitmap */ /* build code bitmap. FIXME: writes should be protected by
* tb_lock, reads by tb_lock or RCU.
*/
build_page_bitmap(p); build_page_bitmap(p);
} }
if (p->code_bitmap) { if (p->code_bitmap) {
@ -1715,6 +1735,7 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr)
} }
#endif /* !defined(CONFIG_USER_ONLY) */ #endif /* !defined(CONFIG_USER_ONLY) */
/* Called with tb_lock held. */
void tb_check_watchpoint(CPUState *cpu) void tb_check_watchpoint(CPUState *cpu)
{ {
TranslationBlock *tb; TranslationBlock *tb;