diff --git a/qemu/include/exec/exec-all.h b/qemu/include/exec/exec-all.h index 09f341df..741eb102 100644 --- a/qemu/include/exec/exec-all.h +++ b/qemu/include/exec/exec-all.h @@ -334,6 +334,7 @@ static inline void tb_set_jmp_target(TranslationBlock *tb, #endif +/* Called with tb_lock held. */ static inline void tb_add_jump(TranslationBlock *tb, int n, TranslationBlock *tb_next) { diff --git a/qemu/include/qom/cpu.h b/qemu/include/qom/cpu.h index 124409e0..d3af9f49 100644 --- a/qemu/include/qom/cpu.h +++ b/qemu/include/qom/cpu.h @@ -277,7 +277,10 @@ struct CPUState { MemoryRegion *memory; void *env_ptr; /* CPUArchState */ + + /* Writes protected by tb_lock, reads not thread-safe */ struct TranslationBlock *tb_jmp_cache[TB_JMP_CACHE_SIZE]; + QTAILQ_ENTRY(CPUState) node; /* ice debug support */ diff --git a/qemu/tcg/tcg.h b/qemu/tcg/tcg.h index 13a7fdc4..525f5eee 100644 --- a/qemu/tcg/tcg.h +++ b/qemu/tcg/tcg.h @@ -620,6 +620,7 @@ QEMU_BUILD_BUG_ON(sizeof(TCGOp) > 8); /* pool based memory allocation */ +/* tb_lock must be held for tcg_malloc_internal. */ void *tcg_malloc_internal(TCGContext *s, int size); void tcg_pool_reset(TCGContext *s); @@ -1031,6 +1032,7 @@ TCGOp *tcg_op_insert_after(TCGContext *s, TCGOp *op, TCGOpcode opc, int narg); void tcg_optimize(TCGContext *s); +/* Called with tb_lock held. */ static inline void *tcg_malloc(TCGContext *s, int size) { uint8_t *ptr, *ptr_end; diff --git a/qemu/translate-all.c b/qemu/translate-all.c index dcc8c995..9e3c9db6 100644 --- a/qemu/translate-all.c +++ b/qemu/translate-all.c @@ -287,7 +287,9 @@ static int encode_search(TCGContext *tcg_ctx, TranslationBlock *tb, uint8_t *blo return p - block; } -/* The cpu state corresponding to 'searched_pc' is restored. */ +/* The cpu state corresponding to 'searched_pc' is restored. + * Called with tb_lock held. + */ static int cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb, uintptr_t searched_pc) { @@ -442,6 +444,10 @@ static void page_init(struct uc_struct *uc) #endif } +/* If alloc=1: + * Called with tb_lock held for system emulation. + * Called with mmap_lock held for user-mode emulation. + */ static PageDesc *page_find_alloc(struct uc_struct *uc, tb_page_addr_t index, int alloc) { PageDesc *pd; @@ -861,8 +867,12 @@ bool tcg_enabled(struct uc_struct *uc) return tcg_ctx->code_gen_buffer != NULL; } -/* Allocate a new translation block. Flush the translation buffer if - too many translation blocks or too much generated code. */ +/* + * Allocate a new translation block. Flush the translation buffer if + * too many translation blocks or too much generated code. + * + * Called with tb_lock held. + */ static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc) { TranslationBlock *tb; @@ -878,6 +888,7 @@ static TranslationBlock *tb_alloc(struct uc_struct *uc, target_ulong pc) return tb; } +/* Called with tb_lock held. */ void tb_free(struct uc_struct *uc, TranslationBlock *tb) { TCGContext *tcg_ctx = uc->tcg_ctx; @@ -975,6 +986,10 @@ void tb_flush(CPUState *cpu) #ifdef DEBUG_TB_CHECK +/* verify that all the pages have correct rights for code + * + * Called with tb_lock held. + */ static void tb_invalidate_check(target_ulong address) { TranslationBlock *tb; @@ -1106,7 +1121,10 @@ static inline void tb_jmp_unlink(TranslationBlock *tb) } } -/* invalidate one TB */ +/* invalidate one TB + * + * Called with tb_lock held. + */ void tb_phys_invalidate(struct uc_struct *uc, TranslationBlock *tb, tb_page_addr_t page_addr) { @@ -1576,7 +1594,9 @@ void tb_invalidate_phys_page_fast(struct uc_struct* uc, tb_page_addr_t start, in } if (!p->code_bitmap && ++p->code_write_count >= SMC_BITMAP_USE_THRESHOLD) { - /* build code bitmap */ + /* build code bitmap. FIXME: writes should be protected by + * tb_lock, reads by tb_lock or RCU. + */ build_page_bitmap(p); } if (p->code_bitmap) { @@ -1715,6 +1735,7 @@ void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr) } #endif /* !defined(CONFIG_USER_ONLY) */ +/* Called with tb_lock held. */ void tb_check_watchpoint(CPUState *cpu) { TranslationBlock *tb;