targets: Initial RISC-V port

Functionally everything is here, however it crashes for some bizarre
reason upon initialization. Yay for qemu having an overcomplicated
initialization process that's difficult to keep a mental model of.
This commit is contained in:
Lioncash 2018-10-06 05:14:45 -04:00
parent cc3d618e61
commit 94dbf9eb96
No known key found for this signature in database
GPG key ID: 4E3C3CC1031BA9C7
47 changed files with 13068 additions and 47 deletions

2
.gitignore vendored
View file

@ -23,6 +23,8 @@ qemu/mips64el-softmmu/
qemu/mips64-softmmu/
qemu/mipsel-softmmu/
qemu/mips-softmmu/
qemu/riscv32-softmmu/
qemu/riscv64-softmmu/
qemu/sparc64-softmmu/
qemu/sparc-softmmu/
qemu/i386-softmmu/

View file

@ -65,6 +65,15 @@ ifneq (,$(findstring sparc,$(UNICORN_ARCHS)))
UNICORN_CFLAGS += -DUNICORN_HAS_SPARC
UNICORN_TARGETS += sparc-softmmu,sparc64-softmmu,
endif
ifneq (,$(findstring riscv,$(UNICORN_ARCHS)))
UC_TARGET_OBJ += $(call GENOBJ,riscv32-softmmu)
UC_TARGET_OBJ += $(call GENOBJ,riscv64-softmmu)
UNICORN_CFLAGS += -DUNICORN_HAS_RISCV
UNICORN_CFLAGS += -DUNICORN_HAS_RISCV32
UNICORN_CFLAGS += -DUNICORN_HAS_RISCV64
UNICORN_TARGETS += riscv32-softmmu
UNICORN_TARGETS += riscv64-softmmu
endif
UNICORN_CFLAGS += -fPIC
@ -318,7 +327,7 @@ dist:
# run "make header" whenever qemu/header_gen.py is modified
header:
$(eval TARGETS := m68k arm armeb aarch64 aarch64eb mips mipsel mips64 mips64el\
powerpc sparc sparc64 x86_64)
powerpc riscv32 riscv64 sparc sparc64 x86_64)
$(foreach var,$(TARGETS),\
$(shell python qemu/header_gen.py $(var) > qemu/$(var).h;))
@echo "Generated headers for $(TARGETS)."

View file

@ -15,11 +15,12 @@
// These are masks of supported modes for each cpu/arch.
// They should be updated when changes are made to the uc_mode enum typedef.
#define UC_MODE_ARM_MASK (UC_MODE_ARM|UC_MODE_THUMB|UC_MODE_LITTLE_ENDIAN|UC_MODE_MCLASS|UC_MODE_BIG_ENDIAN)
#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN)
#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN)
#define UC_MODE_PPC_MASK (UC_MODE_PPC64|UC_MODE_BIG_ENDIAN)
#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN)
#define UC_MODE_M68K_MASK (UC_MODE_BIG_ENDIAN)
#define UC_MODE_MIPS_MASK (UC_MODE_MIPS32|UC_MODE_MIPS64|UC_MODE_LITTLE_ENDIAN|UC_MODE_BIG_ENDIAN)
#define UC_MODE_PPC_MASK (UC_MODE_PPC64|UC_MODE_BIG_ENDIAN)
#define UC_MODE_RISCV_MASK (UC_MODE_RISCV32|UC_MODE_RISCV64)
#define UC_MODE_SPARC_MASK (UC_MODE_SPARC32|UC_MODE_SPARC64|UC_MODE_BIG_ENDIAN)
#define UC_MODE_X86_MASK (UC_MODE_16|UC_MODE_32|UC_MODE_64|UC_MODE_LITTLE_ENDIAN)
#define ARR_SIZE(a) (sizeof(a)/sizeof(a[0]))

142
include/unicorn/riscv.h Normal file
View file

@ -0,0 +1,142 @@
/* Unicorn Emulator Engine */
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015-2017 */
/* This file is released under LGPL2.
See COPYING.LGPL2 in root directory for more details
*/
#ifndef UNICORN_RISCV_H
#define UNICORN_RISCV_H
#ifdef __cplusplus
extern "C" {
#endif
typedef enum uc_riscv_reg {
UC_RISCV_REG_INVALID = 0,
/* General-purpose registers */
UC_RISCV_REG_X0,
UC_RISCV_REG_X1,
UC_RISCV_REG_X2,
UC_RISCV_REG_X3,
UC_RISCV_REG_X4,
UC_RISCV_REG_X5,
UC_RISCV_REG_X6,
UC_RISCV_REG_X7,
UC_RISCV_REG_X8,
UC_RISCV_REG_X9,
UC_RISCV_REG_X10,
UC_RISCV_REG_X11,
UC_RISCV_REG_X12,
UC_RISCV_REG_X13,
UC_RISCV_REG_X14,
UC_RISCV_REG_X15,
UC_RISCV_REG_X16,
UC_RISCV_REG_X17,
UC_RISCV_REG_X18,
UC_RISCV_REG_X19,
UC_RISCV_REG_X20,
UC_RISCV_REG_X21,
UC_RISCV_REG_X22,
UC_RISCV_REG_X23,
UC_RISCV_REG_X24,
UC_RISCV_REG_X25,
UC_RISCV_REG_X26,
UC_RISCV_REG_X27,
UC_RISCV_REG_X28,
UC_RISCV_REG_X29,
UC_RISCV_REG_X30,
UC_RISCV_REG_X31,
/* General-purpose register aliases */
/* Zero register */
UC_RISCV_REG_ZERO = UC_RISCV_REG_X0,
/* Return address */
UC_RISCV_REG_RA = UC_RISCV_REG_X1,
/* Stack pointer */
UC_RISCV_REG_SP = UC_RISCV_REG_X2,
/* Global pointer */
UC_RISCV_REG_GP = UC_RISCV_REG_X3,
/* Thread pointer */
UC_RISCV_REG_TP = UC_RISCV_REG_X4,
/* Temporary registers */
UC_RISCV_REG_T0 = UC_RISCV_REG_X5,
UC_RISCV_REG_T1 = UC_RISCV_REG_X6,
UC_RISCV_REG_T2 = UC_RISCV_REG_X7,
UC_RISCV_REG_T3 = UC_RISCV_REG_X28,
UC_RISCV_REG_T4 = UC_RISCV_REG_X29,
UC_RISCV_REG_T5 = UC_RISCV_REG_X30,
UC_RISCV_REG_T6 = UC_RISCV_REG_X31,
/* Frame pointer */
UC_RISCV_REG_FP = UC_RISCV_REG_X8,
/* Saved registers */
UC_RISCV_REG_S0 = UC_RISCV_REG_X8,
UC_RISCV_REG_S1 = UC_RISCV_REG_X9,
UC_RISCV_REG_S2 = UC_RISCV_REG_X18,
UC_RISCV_REG_S3 = UC_RISCV_REG_X19,
UC_RISCV_REG_S4 = UC_RISCV_REG_X20,
UC_RISCV_REG_S5 = UC_RISCV_REG_X21,
UC_RISCV_REG_S6 = UC_RISCV_REG_X22,
UC_RISCV_REG_S7 = UC_RISCV_REG_X23,
UC_RISCV_REG_S8 = UC_RISCV_REG_X24,
UC_RISCV_REG_S9 = UC_RISCV_REG_X25,
UC_RISCV_REG_S10 = UC_RISCV_REG_X26,
UC_RISCV_REG_S11 = UC_RISCV_REG_X27,
/* Function argument registers */
UC_RISCV_REG_A0 = UC_RISCV_REG_X10,
UC_RISCV_REG_A1 = UC_RISCV_REG_X11,
UC_RISCV_REG_A2 = UC_RISCV_REG_X12,
UC_RISCV_REG_A3 = UC_RISCV_REG_X13,
UC_RISCV_REG_A4 = UC_RISCV_REG_X14,
UC_RISCV_REG_A5 = UC_RISCV_REG_X15,
UC_RISCV_REG_A6 = UC_RISCV_REG_X16,
UC_RISCV_REG_A7 = UC_RISCV_REG_X17,
/* Program counter */
UC_RISCV_REG_PC,
/* Floating-point registers */
UC_RISCV_REG_F0,
UC_RISCV_REG_F1,
UC_RISCV_REG_F2,
UC_RISCV_REG_F3,
UC_RISCV_REG_F4,
UC_RISCV_REG_F5,
UC_RISCV_REG_F6,
UC_RISCV_REG_F7,
UC_RISCV_REG_F8,
UC_RISCV_REG_F9,
UC_RISCV_REG_F10,
UC_RISCV_REG_F11,
UC_RISCV_REG_F12,
UC_RISCV_REG_F13,
UC_RISCV_REG_F14,
UC_RISCV_REG_F15,
UC_RISCV_REG_F16,
UC_RISCV_REG_F17,
UC_RISCV_REG_F18,
UC_RISCV_REG_F19,
UC_RISCV_REG_F20,
UC_RISCV_REG_F21,
UC_RISCV_REG_F22,
UC_RISCV_REG_F23,
UC_RISCV_REG_F24,
UC_RISCV_REG_F25,
UC_RISCV_REG_F26,
UC_RISCV_REG_F27,
UC_RISCV_REG_F28,
UC_RISCV_REG_F29,
UC_RISCV_REG_F30,
UC_RISCV_REG_F31,
} uc_riscv_reg;
#ifdef __cplusplus
}
#endif
#endif /* UNICORN_RISCV_H */

View file

@ -31,6 +31,7 @@ typedef size_t uc_hook;
#include "arm.h"
#include "arm64.h"
#include "mips.h"
#include "riscv.h"
#include "sparc.h"
#ifdef __GNUC__
@ -95,6 +96,7 @@ typedef enum uc_arch {
UC_ARCH_PPC, // PowerPC architecture (currently unsupported)
UC_ARCH_SPARC, // Sparc architecture
UC_ARCH_M68K, // M68K architecture
UC_ARCH_RISCV, // RISC-V architecture
UC_ARCH_MAX,
} uc_arch;
@ -126,6 +128,9 @@ typedef enum uc_mode {
UC_MODE_SPARC64 = 1 << 3, // 64-bit mode
UC_MODE_V9 = 1 << 4, // SparcV9 mode (currently unsupported)
// m68k
// risc-v
UC_MODE_RISCV32 = 1 << 2, // 32-bit mode
UC_MODE_RISCV64 = 1 << 3, // 64-bit mode
} uc_mode;
// All type of errors encountered by Unicorn API.

View file

@ -217,7 +217,6 @@
#define clz32 clz32_aarch64
#define clz64 clz64_aarch64
#define cmp_flatrange_addr cmp_flatrange_addr_aarch64
#define code_gen_alloc code_gen_alloc_aarch64
#define commonNaNToFloat128 commonNaNToFloat128_aarch64
#define commonNaNToFloat16 commonNaNToFloat16_aarch64
#define commonNaNToFloat32 commonNaNToFloat32_aarch64
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_aarch64
#define cpu_exec_step_atomic cpu_exec_step_atomic_aarch64
#define cpu_flush_icache_range cpu_flush_icache_range_aarch64
#define cpu_gen_init cpu_gen_init_aarch64
#define cpu_get_address_space cpu_get_address_space_aarch64
#define cpu_get_clock cpu_get_clock_aarch64
#define cpu_get_real_ticks cpu_get_real_ticks_aarch64

View file

@ -217,7 +217,6 @@
#define clz32 clz32_aarch64eb
#define clz64 clz64_aarch64eb
#define cmp_flatrange_addr cmp_flatrange_addr_aarch64eb
#define code_gen_alloc code_gen_alloc_aarch64eb
#define commonNaNToFloat128 commonNaNToFloat128_aarch64eb
#define commonNaNToFloat16 commonNaNToFloat16_aarch64eb
#define commonNaNToFloat32 commonNaNToFloat32_aarch64eb
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_aarch64eb
#define cpu_exec_step_atomic cpu_exec_step_atomic_aarch64eb
#define cpu_flush_icache_range cpu_flush_icache_range_aarch64eb
#define cpu_gen_init cpu_gen_init_aarch64eb
#define cpu_get_address_space cpu_get_address_space_aarch64eb
#define cpu_get_clock cpu_get_clock_aarch64eb
#define cpu_get_real_ticks cpu_get_real_ticks_aarch64eb

View file

@ -36,7 +36,6 @@
#define TCG_TB_SIZE 0
static bool tcg_allowed = true;
static int tcg_init(MachineState *ms);
static AccelClass *accel_find(struct uc_struct *uc, const char *opt_name);
static int accel_init_machine(AccelClass *acc, MachineState *ms);
static void tcg_accel_class_init(struct uc_struct *uc, ObjectClass *oc, void *data);

View file

@ -217,7 +217,6 @@
#define clz32 clz32_arm
#define clz64 clz64_arm
#define cmp_flatrange_addr cmp_flatrange_addr_arm
#define code_gen_alloc code_gen_alloc_arm
#define commonNaNToFloat128 commonNaNToFloat128_arm
#define commonNaNToFloat16 commonNaNToFloat16_arm
#define commonNaNToFloat32 commonNaNToFloat32_arm
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_arm
#define cpu_exec_step_atomic cpu_exec_step_atomic_arm
#define cpu_flush_icache_range cpu_flush_icache_range_arm
#define cpu_gen_init cpu_gen_init_arm
#define cpu_get_address_space cpu_get_address_space_arm
#define cpu_get_clock cpu_get_clock_arm
#define cpu_get_real_ticks cpu_get_real_ticks_arm

View file

@ -217,7 +217,6 @@
#define clz32 clz32_armeb
#define clz64 clz64_armeb
#define cmp_flatrange_addr cmp_flatrange_addr_armeb
#define code_gen_alloc code_gen_alloc_armeb
#define commonNaNToFloat128 commonNaNToFloat128_armeb
#define commonNaNToFloat16 commonNaNToFloat16_armeb
#define commonNaNToFloat32 commonNaNToFloat32_armeb
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_armeb
#define cpu_exec_step_atomic cpu_exec_step_atomic_armeb
#define cpu_flush_icache_range cpu_flush_icache_range_armeb
#define cpu_gen_init cpu_gen_init_armeb
#define cpu_get_address_space cpu_get_address_space_armeb
#define cpu_get_clock cpu_get_clock_armeb
#define cpu_get_real_ticks cpu_get_real_ticks_armeb

9
qemu/configure vendored
View file

@ -1659,6 +1659,15 @@ case "$target_name" in
TARGET_BASE_ARCH=ppc
echo "TARGET_ABI32=y" >> $config_target_mak
;;
riscv32)
TARGET_BASE_ARCH=riscv
TARGET_ABI_DIR=riscv
echo "TARGET_ABI32=y" >> $config_target_mak
;;
riscv64)
TARGET_BASE_ARCH=riscv
TARGET_ABI_DIR=riscv
;;
sh4|sh4eb)
TARGET_ARCH=sh4
bflt="yes"

View file

@ -0,0 +1,5 @@
# Default configuration for riscv-softmmu
CONFIG_SERIAL=y
CONFIG_VIRTIO_MMIO=y
CONFIG_CADENCE=y

View file

@ -0,0 +1,5 @@
# Default configuration for riscv-softmmu
CONFIG_SERIAL=y
CONFIG_VIRTIO_MMIO=y
CONFIG_CADENCE=y

View file

@ -223,7 +223,6 @@ symbols = (
'clz32',
'clz64',
'cmp_flatrange_addr',
'code_gen_alloc',
'commonNaNToFloat128',
'commonNaNToFloat16',
'commonNaNToFloat32',
@ -287,7 +286,6 @@ symbols = (
'cpu_exec_init_all',
'cpu_exec_step_atomic',
'cpu_flush_icache_range',
'cpu_gen_init',
'cpu_get_address_space',
'cpu_get_clock',
'cpu_get_real_ticks',
@ -5134,6 +5132,85 @@ mips_symbols = (
'sync_c0_status',
)
riscv_symbols = (
'RISCV32_REGS_STORAGE_SIZE',
'RISCV64_REGS_STORAGE_SIZE',
'cpu_riscv_get_fflags',
'cpu_riscv_set_fflags',
'csr_read_helper',
'csr_write_helper',
'do_raise_exception_err',
'helper_csrrc',
'helper_csrrs',
'helper_csrrw',
'helper_fadd_d',
'helper_fadd_s',
'helper_fclass_d',
'helper_fclass_s',
'helper_fcvt_d_s',
'helper_fcvt_d_w',
'helper_fcvt_d_wu',
'helper_fcvt_s_d',
'helper_fcvt_s_w',
'helper_fcvt_s_wu',
'helper_fcvt_w_d',
'helper_fcvt_w_s',
'helper_fcvt_wu_d',
'helper_fcvt_wu_s',
'helper_fdiv_d',
'helper_fdiv_s',
'helper_feq_d',
'helper_feq_s',
'helper_fle_d',
'helper_fle_s',
'helper_flt_d',
'helper_flt_s',
'helper_fmadd_d',
'helper_fmadd_s',
'helper_fmsub_d',
'helper_fmsub_s',
'helper_fmax_d',
'helper_fmax_s',
'helper_fmin_d',
'helper_fmin_s',
'helper_fmul_d',
'helper_fmul_s',
'helper_fnmadd_d',
'helper_fnmadd_s',
'helper_fnmsub_d',
'helper_fnmsub_s',
'helper_fsqrt_d',
'helper_fsqrt_s',
'helper_fsub_d',
'helper_fsub_s',
'helper_mret',
'helper_riscv_tlb_flush',
'helper_set_rounding_mode',
'helper_sret',
'pmp_hart_has_privs',
'pmpaddr_csr_read',
'pmpaddr_csr_write',
'pmpcfg_csr_read',
'pmpcfg_csr_write',
'riscv_cpu_do_interrupt',
'riscv_cpu_do_unaligned_access',
'riscv_cpu_exec_interrupt',
'riscv_cpu_get_phys_page_debug',
'riscv_cpu_handle_mmu_fault',
'riscv_cpu_list',
'riscv_cpu_mmu_index',
'riscv_cpu_register_types',
'riscv_excp_names',
'riscv_fpr_regnames',
'riscv_int_regnames',
'riscv_intr_names',
'riscv_isa_string',
'riscv_set_local_interrupt',
'riscv_set_mode',
'riscv_translate_init',
'spike_v1_10_0_machine_init_register_types',
)
sparc_symbols = (
'cpu_cwp_dec',
'cpu_cwp_inc',
@ -5232,6 +5309,10 @@ if __name__ == '__main__':
for s in mips_symbols:
print("#define %s %s_%s" %(s, s, arch))
if 'riscv' in arch:
for s in riscv_symbols:
print("#define %s %s_%s" %(s, s, arch))
if 'sparc' in arch:
for s in sparc_symbols:
print("#define %s %s_%s" %(s, s, arch))

View file

@ -0,0 +1 @@
obj-y += spike.o

55
qemu/hw/riscv/spike.c Normal file
View file

@ -0,0 +1,55 @@
/*
* QEMU RISC-V Spike Board
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017-2018 SiFive, Inc.
*
* This provides a RISC-V Board with the following devices:
*
* 0) HTIF Console and Poweroff
* 1) CLINT (Timer and IPI)
* 2) PLIC (Platform Level Interrupt Controller)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "cpu.h"
#include "hw/riscv/spike.h"
#include "hw/boards.h"
#include "exec/address-spaces.h"
static int spike_v1_10_0_board_init(struct uc_struct *uc, MachineState *machine)
{
uc->cpu = cpu_create(uc, machine->cpu_type);
if (uc->cpu == NULL) {
fprintf(stderr, "Unable to make CPU definition\n");
return -1;
}
return 0;
}
static void spike_v1_10_0_machine_init(struct uc_struct *uc, MachineClass *mc)
{
mc->init = spike_v1_10_0_board_init;
mc->max_cpus = 1;
mc->is_default = 1;
mc->default_cpu_type = SPIKE_V1_10_0_CPU;
mc->arch = UC_ARCH_RISCV;
}
DEFINE_MACHINE("spike_v1.10", spike_v1_10_0_machine_init)

View file

@ -0,0 +1,16 @@
#ifndef HW_RISCV_H
#define HW_RISCV_H
#if defined(TARGET_RISCV32)
#define SPIKE_V1_09_1_CPU TYPE_RISCV_CPU_RV32GCSU_V1_09_1
#define SPIKE_V1_10_0_CPU TYPE_RISCV_CPU_RV32GCSU_V1_10_0
#elif defined(TARGET_RISCV64)
#define SPIKE_V1_09_1_CPU TYPE_RISCV_CPU_RV64GCSU_V1_09_1
#define SPIKE_V1_10_0_CPU TYPE_RISCV_CPU_RV64GCSU_V1_10_0
#endif
void spike_v1_10_0_machine_init_register_types(struct uc_struct *uc);
void riscv_cpu_register_types(void *opaque);
#endif /* HW_ARM_H */

View file

@ -217,7 +217,6 @@
#define clz32 clz32_m68k
#define clz64 clz64_m68k
#define cmp_flatrange_addr cmp_flatrange_addr_m68k
#define code_gen_alloc code_gen_alloc_m68k
#define commonNaNToFloat128 commonNaNToFloat128_m68k
#define commonNaNToFloat16 commonNaNToFloat16_m68k
#define commonNaNToFloat32 commonNaNToFloat32_m68k
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_m68k
#define cpu_exec_step_atomic cpu_exec_step_atomic_m68k
#define cpu_flush_icache_range cpu_flush_icache_range_m68k
#define cpu_gen_init cpu_gen_init_m68k
#define cpu_get_address_space cpu_get_address_space_m68k
#define cpu_get_clock cpu_get_clock_m68k
#define cpu_get_real_ticks cpu_get_real_ticks_m68k

View file

@ -217,7 +217,6 @@
#define clz32 clz32_mips
#define clz64 clz64_mips
#define cmp_flatrange_addr cmp_flatrange_addr_mips
#define code_gen_alloc code_gen_alloc_mips
#define commonNaNToFloat128 commonNaNToFloat128_mips
#define commonNaNToFloat16 commonNaNToFloat16_mips
#define commonNaNToFloat32 commonNaNToFloat32_mips
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_mips
#define cpu_exec_step_atomic cpu_exec_step_atomic_mips
#define cpu_flush_icache_range cpu_flush_icache_range_mips
#define cpu_gen_init cpu_gen_init_mips
#define cpu_get_address_space cpu_get_address_space_mips
#define cpu_get_clock cpu_get_clock_mips
#define cpu_get_real_ticks cpu_get_real_ticks_mips

View file

@ -217,7 +217,6 @@
#define clz32 clz32_mips64
#define clz64 clz64_mips64
#define cmp_flatrange_addr cmp_flatrange_addr_mips64
#define code_gen_alloc code_gen_alloc_mips64
#define commonNaNToFloat128 commonNaNToFloat128_mips64
#define commonNaNToFloat16 commonNaNToFloat16_mips64
#define commonNaNToFloat32 commonNaNToFloat32_mips64
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_mips64
#define cpu_exec_step_atomic cpu_exec_step_atomic_mips64
#define cpu_flush_icache_range cpu_flush_icache_range_mips64
#define cpu_gen_init cpu_gen_init_mips64
#define cpu_get_address_space cpu_get_address_space_mips64
#define cpu_get_clock cpu_get_clock_mips64
#define cpu_get_real_ticks cpu_get_real_ticks_mips64

View file

@ -217,7 +217,6 @@
#define clz32 clz32_mips64el
#define clz64 clz64_mips64el
#define cmp_flatrange_addr cmp_flatrange_addr_mips64el
#define code_gen_alloc code_gen_alloc_mips64el
#define commonNaNToFloat128 commonNaNToFloat128_mips64el
#define commonNaNToFloat16 commonNaNToFloat16_mips64el
#define commonNaNToFloat32 commonNaNToFloat32_mips64el
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_mips64el
#define cpu_exec_step_atomic cpu_exec_step_atomic_mips64el
#define cpu_flush_icache_range cpu_flush_icache_range_mips64el
#define cpu_gen_init cpu_gen_init_mips64el
#define cpu_get_address_space cpu_get_address_space_mips64el
#define cpu_get_clock cpu_get_clock_mips64el
#define cpu_get_real_ticks cpu_get_real_ticks_mips64el

View file

@ -217,7 +217,6 @@
#define clz32 clz32_mipsel
#define clz64 clz64_mipsel
#define cmp_flatrange_addr cmp_flatrange_addr_mipsel
#define code_gen_alloc code_gen_alloc_mipsel
#define commonNaNToFloat128 commonNaNToFloat128_mipsel
#define commonNaNToFloat16 commonNaNToFloat16_mipsel
#define commonNaNToFloat32 commonNaNToFloat32_mipsel
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_mipsel
#define cpu_exec_step_atomic cpu_exec_step_atomic_mipsel
#define cpu_flush_icache_range cpu_flush_icache_range_mipsel
#define cpu_gen_init cpu_gen_init_mipsel
#define cpu_get_address_space cpu_get_address_space_mipsel
#define cpu_get_clock cpu_get_clock_mipsel
#define cpu_get_real_ticks cpu_get_real_ticks_mipsel

View file

@ -217,7 +217,6 @@
#define clz32 clz32_powerpc
#define clz64 clz64_powerpc
#define cmp_flatrange_addr cmp_flatrange_addr_powerpc
#define code_gen_alloc code_gen_alloc_powerpc
#define commonNaNToFloat128 commonNaNToFloat128_powerpc
#define commonNaNToFloat16 commonNaNToFloat16_powerpc
#define commonNaNToFloat32 commonNaNToFloat32_powerpc
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_powerpc
#define cpu_exec_step_atomic cpu_exec_step_atomic_powerpc
#define cpu_flush_icache_range cpu_flush_icache_range_powerpc
#define cpu_gen_init cpu_gen_init_powerpc
#define cpu_get_address_space cpu_get_address_space_powerpc
#define cpu_get_clock cpu_get_clock_powerpc
#define cpu_get_real_ticks cpu_get_real_ticks_powerpc

3343
qemu/riscv32.h Normal file

File diff suppressed because it is too large Load diff

3343
qemu/riscv64.h Normal file

File diff suppressed because it is too large Load diff

View file

@ -217,7 +217,6 @@
#define clz32 clz32_sparc
#define clz64 clz64_sparc
#define cmp_flatrange_addr cmp_flatrange_addr_sparc
#define code_gen_alloc code_gen_alloc_sparc
#define commonNaNToFloat128 commonNaNToFloat128_sparc
#define commonNaNToFloat16 commonNaNToFloat16_sparc
#define commonNaNToFloat32 commonNaNToFloat32_sparc
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_sparc
#define cpu_exec_step_atomic cpu_exec_step_atomic_sparc
#define cpu_flush_icache_range cpu_flush_icache_range_sparc
#define cpu_gen_init cpu_gen_init_sparc
#define cpu_get_address_space cpu_get_address_space_sparc
#define cpu_get_clock cpu_get_clock_sparc
#define cpu_get_real_ticks cpu_get_real_ticks_sparc

View file

@ -217,7 +217,6 @@
#define clz32 clz32_sparc64
#define clz64 clz64_sparc64
#define cmp_flatrange_addr cmp_flatrange_addr_sparc64
#define code_gen_alloc code_gen_alloc_sparc64
#define commonNaNToFloat128 commonNaNToFloat128_sparc64
#define commonNaNToFloat16 commonNaNToFloat16_sparc64
#define commonNaNToFloat32 commonNaNToFloat32_sparc64
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_sparc64
#define cpu_exec_step_atomic cpu_exec_step_atomic_sparc64
#define cpu_flush_icache_range cpu_flush_icache_range_sparc64
#define cpu_gen_init cpu_gen_init_sparc64
#define cpu_get_address_space cpu_get_address_space_sparc64
#define cpu_get_clock cpu_get_clock_sparc64
#define cpu_get_real_ticks cpu_get_real_ticks_sparc64

View file

@ -0,0 +1,2 @@
obj-y += translate.o op_helper.o helper.o cpu.o fpu_helper.o pmp.o
obj-y += unicorn.o

441
qemu/target/riscv/cpu.c Normal file
View file

@ -0,0 +1,441 @@
/*
* QEMU RISC-V CPU
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017-2018 SiFive, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "qapi/error.h"
#include "hw/riscv/spike.h"
#include "uc_priv.h"
/* RISC-V CPU definitions */
static const char riscv_exts[26] = "IEMAFDQCLBJTPVNSUHKORWXYZG";
const char * const riscv_int_regnames[] = {
"zero", "ra ", "sp ", "gp ", "tp ", "t0 ", "t1 ", "t2 ",
"s0 ", "s1 ", "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ",
"a6 ", "a7 ", "s2 ", "s3 ", "s4 ", "s5 ", "s6 ", "s7 ",
"s8 ", "s9 ", "s10 ", "s11 ", "t3 ", "t4 ", "t5 ", "t6 "
};
const char * const riscv_fpr_regnames[] = {
"ft0 ", "ft1 ", "ft2 ", "ft3 ", "ft4 ", "ft5 ", "ft6 ", "ft7 ",
"fs0 ", "fs1 ", "fa0 ", "fa1 ", "fa2 ", "fa3 ", "fa4 ", "fa5 ",
"fa6 ", "fa7 ", "fs2 ", "fs3 ", "fs4 ", "fs5 ", "fs6 ", "fs7 ",
"fs8 ", "fs9 ", "fs10", "fs11", "ft8 ", "ft9 ", "ft10", "ft11"
};
const char * const riscv_excp_names[] = {
"misaligned_fetch",
"fault_fetch",
"illegal_instruction",
"breakpoint",
"misaligned_load",
"fault_load",
"misaligned_store",
"fault_store",
"user_ecall",
"supervisor_ecall",
"hypervisor_ecall",
"machine_ecall",
"exec_page_fault",
"load_page_fault",
"reserved",
"store_page_fault"
};
const char * const riscv_intr_names[] = {
"u_software",
"s_software",
"h_software",
"m_software",
"u_timer",
"s_timer",
"h_timer",
"m_timer",
"u_external",
"s_external",
"h_external",
"m_external",
"coprocessor",
"host"
};
typedef struct RISCVCPUInfo {
const int bit_widths;
const char *name;
void (*initfn)(Object *obj);
} RISCVCPUInfo;
static void set_misa(CPURISCVState *env, target_ulong misa)
{
env->misa = misa;
}
static void set_versions(CPURISCVState *env, int user_ver, int priv_ver)
{
env->user_ver = user_ver;
env->priv_ver = priv_ver;
}
static void set_feature(CPURISCVState *env, int feature)
{
env->features |= (1ULL << feature);
}
static void set_resetvec(CPURISCVState *env, int resetvec)
{
#ifndef CONFIG_USER_ONLY
env->resetvec = resetvec;
#endif
}
static void riscv_any_cpu_init(struct uc_struct *uc, Object *obj, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
set_misa(env, RVXLEN | RVI | RVM | RVA | RVF | RVD | RVC | RVU);
set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
set_resetvec(env, DEFAULT_RSTVEC);
}
#if defined(TARGET_RISCV32)
static void rv32gcsu_priv1_09_1_cpu_init(struct uc_struct *uc, Object *obj, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_09_1);
set_resetvec(env, DEFAULT_RSTVEC);
set_feature(env, RISCV_FEATURE_MMU);
}
static void rv32gcsu_priv1_10_0_cpu_init(struct uc_struct *uc, Object *obj, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
set_misa(env, RV32 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
set_resetvec(env, DEFAULT_RSTVEC);
set_feature(env, RISCV_FEATURE_MMU);
}
static void rv32imacu_nommu_cpu_init(struct uc_struct *uc, Object *obj, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
set_misa(env, RV32 | RVI | RVM | RVA | RVC | RVU);
set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
set_resetvec(env, DEFAULT_RSTVEC);
}
#elif defined(TARGET_RISCV64)
static void rv64gcsu_priv1_09_1_cpu_init(struct uc_struct *uc, Object *obj, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_09_1);
set_resetvec(env, DEFAULT_RSTVEC);
set_feature(env, RISCV_FEATURE_MMU);
}
static void rv64gcsu_priv1_10_0_cpu_init(struct uc_struct *uc, Object *obj, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
set_misa(env, RV64 | RVI | RVM | RVA | RVF | RVD | RVC | RVS | RVU);
set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
set_resetvec(env, DEFAULT_RSTVEC);
set_feature(env, RISCV_FEATURE_MMU);
}
static void rv64imacu_nommu_cpu_init(struct uc_struct *uc, Object *obj, void *data)
{
CPURISCVState *env = &RISCV_CPU(obj)->env;
set_misa(env, RV64 | RVI | RVM | RVA | RVC | RVU);
set_versions(env, USER_VERSION_2_02_0, PRIV_VERSION_1_10_0);
set_resetvec(env, DEFAULT_RSTVEC);
}
#endif
static ObjectClass *riscv_cpu_class_by_name(struct uc_struct *uc, const char *cpu_model)
{
ObjectClass *oc;
char *typename;
char **cpuname;
cpuname = g_strsplit(cpu_model, ",", 1);
typename = g_strdup_printf(RISCV_CPU_TYPE_NAME("%s"), cpuname[0]);
oc = object_class_by_name(uc, typename);
g_strfreev(cpuname);
g_free(typename);
if (!oc || !object_class_dynamic_cast(uc, oc, TYPE_RISCV_CPU) ||
object_class_is_abstract(oc)) {
return NULL;
}
return oc;
}
static void riscv_cpu_dump_state(CPUState *cs, FILE *f,
fprintf_function cpu_fprintf, int flags)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int i;
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "pc ", env->pc);
#ifndef CONFIG_USER_ONLY
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mhartid ", env->mhartid);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mstatus ", env->mstatus);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mip ",
(target_ulong)atomic_read(&env->mip));
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mie ", env->mie);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mideleg ", env->mideleg);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "medeleg ", env->medeleg);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mtvec ", env->mtvec);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mepc ", env->mepc);
cpu_fprintf(f, " %s " TARGET_FMT_lx "\n", "mcause ", env->mcause);
#endif
for (i = 0; i < 32; i++) {
cpu_fprintf(f, " %s " TARGET_FMT_lx,
riscv_int_regnames[i], env->gpr[i]);
if ((i & 3) == 3) {
cpu_fprintf(f, "\n");
}
}
if (flags & CPU_DUMP_FPU) {
for (i = 0; i < 32; i++) {
cpu_fprintf(f, " %s %016" PRIx64,
riscv_fpr_regnames[i], env->fpr[i]);
if ((i & 3) == 3) {
cpu_fprintf(f, "\n");
}
}
}
}
static void riscv_cpu_set_pc(CPUState *cs, vaddr value)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
env->pc = value;
}
static void riscv_cpu_synchronize_from_tb(CPUState *cs, TranslationBlock *tb)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
env->pc = tb->pc;
}
static bool riscv_cpu_has_work(CPUState *cs)
{
#ifndef CONFIG_USER_ONLY
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
/*
* Definition of the WFI instruction requires it to ignore the privilege
* mode and delegation registers, but respect individual enables
*/
return (atomic_read(&env->mip) & env->mie) != 0;
#else
return true;
#endif
}
void restore_state_to_opc(CPURISCVState *env, TranslationBlock *tb,
target_ulong *data)
{
env->pc = data[0];
}
static void riscv_cpu_reset(CPUState *cs)
{
RISCVCPU *cpu = RISCV_CPU(cs);
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(cpu->env.uc, cpu);
CPURISCVState *env = &cpu->env;
mcc->parent_reset(cs);
#ifndef CONFIG_USER_ONLY
env->priv = PRV_M;
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
env->mcause = 0;
env->pc = env->resetvec;
#endif
cs->exception_index = EXCP_NONE;
set_default_nan_mode(1, &env->fp_status);
}
static int riscv_cpu_realize(struct uc_struct *uc, DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
RISCVCPUClass *mcc = RISCV_CPU_GET_CLASS(uc, dev);
qemu_init_vcpu(cs);
cpu_reset(cs);
mcc->parent_realize(uc, dev, errp);
return 0;
}
static void riscv_cpu_init(struct uc_struct *uc, Object *obj, void *opaque)
{
CPUState *cs = CPU(obj);
RISCVCPU *cpu = RISCV_CPU(obj);
cs->env_ptr = &cpu->env;
cpu_exec_init(cs, &error_abort, opaque);
}
static void riscv_cpu_class_init(struct uc_struct *uc, ObjectClass *c, void *data)
{
RISCVCPUClass *mcc = RISCV_CPU_CLASS(uc, c);
CPUClass *cc = CPU_CLASS(uc, c);
DeviceClass *dc = DEVICE_CLASS(uc, c);
mcc->parent_realize = dc->realize;
dc->realize = riscv_cpu_realize;
mcc->parent_reset = cc->reset;
cc->reset = riscv_cpu_reset;
cc->class_by_name = riscv_cpu_class_by_name;
cc->has_work = riscv_cpu_has_work;
cc->do_interrupt = riscv_cpu_do_interrupt;
cc->cpu_exec_interrupt = riscv_cpu_exec_interrupt;
cc->dump_state = riscv_cpu_dump_state;
cc->set_pc = riscv_cpu_set_pc;
cc->synchronize_from_tb = riscv_cpu_synchronize_from_tb;
#ifdef CONFIG_USER_ONLY
cc->handle_mmu_fault = riscv_cpu_handle_mmu_fault;
#else
cc->do_unaligned_access = riscv_cpu_do_unaligned_access;
cc->get_phys_page_debug = riscv_cpu_get_phys_page_debug;
#endif
#ifdef CONFIG_TCG
cc->tcg_initialize = riscv_translate_init;
#endif
}
char *riscv_isa_string(RISCVCPU *cpu)
{
int i;
const size_t maxlen = sizeof("rv128") + sizeof(riscv_exts) + 1;
char *isa_str = g_new(char, maxlen);
char *p = isa_str + snprintf(isa_str, maxlen, "rv%d", TARGET_LONG_BITS);
for (i = 0; i < sizeof(riscv_exts); i++) {
if (cpu->env.misa & RV(riscv_exts[i])) {
*p++ = qemu_tolower(riscv_exts[i]);
}
}
*p = '\0';
return isa_str;
}
/* Unicorn: commented out
typedef struct RISCVCPUListState {
fprintf_function cpu_fprintf;
FILE *file;
} RISCVCPUListState;
static gint riscv_cpu_list_compare(gconstpointer a, gconstpointer b)
{
ObjectClass *class_a = (ObjectClass *)a;
ObjectClass *class_b = (ObjectClass *)b;
const char *name_a, *name_b;
name_a = object_class_get_name(class_a);
name_b = object_class_get_name(class_b);
return strcmp(name_a, name_b);
}
static void riscv_cpu_list_entry(gpointer data, gpointer user_data)
{
RISCVCPUListState *s = user_data;
const char *typename = object_class_get_name(OBJECT_CLASS(data));
int len = strlen(typename) - strlen(RISCV_CPU_TYPE_SUFFIX);
(*s->cpu_fprintf)(s->file, "%.*s\n", len, typename);
}
*/
void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf)
{
/* Unicorn: Commented out
RISCVCPUListState s = {
.cpu_fprintf = cpu_fprintf,
.file = f,
};
GSList *list;
list = object_class_get_list(TYPE_RISCV_CPU, false);
list = g_slist_sort(list, riscv_cpu_list_compare);
g_slist_foreach(list, riscv_cpu_list_entry, &s);
g_slist_free(list);*/
}
#define DEFINE_CPU(type_name, initfn) \
{ \
.name = type_name, \
.parent = TYPE_RISCV_CPU, \
.instance_init = initfn \
}
static const TypeInfo riscv_cpu_type_infos[] = {
{
.name = TYPE_RISCV_CPU,
.parent = TYPE_CPU,
.instance_size = sizeof(RISCVCPU),
.instance_init = riscv_cpu_init,
.abstract = true,
.class_size = sizeof(RISCVCPUClass),
.class_init = riscv_cpu_class_init,
},
DEFINE_CPU(TYPE_RISCV_CPU_ANY, riscv_any_cpu_init),
#if defined(TARGET_RISCV32)
DEFINE_CPU(TYPE_RISCV_CPU_RV32GCSU_V1_09_1, rv32gcsu_priv1_09_1_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_RV32GCSU_V1_10_0, rv32gcsu_priv1_10_0_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_RV32IMACU_NOMMU, rv32imacu_nommu_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E31, rv32imacu_nommu_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U34, rv32gcsu_priv1_10_0_cpu_init)
#elif defined(TARGET_RISCV64)
DEFINE_CPU(TYPE_RISCV_CPU_RV64GCSU_V1_09_1, rv64gcsu_priv1_09_1_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_RV64GCSU_V1_10_0, rv64gcsu_priv1_10_0_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_RV64IMACU_NOMMU, rv64imacu_nommu_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_E51, rv64imacu_nommu_cpu_init),
DEFINE_CPU(TYPE_RISCV_CPU_SIFIVE_U54, rv64gcsu_priv1_10_0_cpu_init)
#endif
};
// Unicorn: Commented out to manually initialize types with the function below it.
//DEFINE_TYPES(riscv_cpu_type_infos)
void riscv_cpu_register_types(void *opaque)
{
TypeInfo riscv_cpu_type_info = riscv_cpu_type_infos[0];
riscv_cpu_type_info.instance_userdata = opaque,
type_register(opaque, &riscv_cpu_type_info);
for (int i = 1; i < ARRAY_SIZE(riscv_cpu_type_infos); i++) {
type_register(opaque, &riscv_cpu_type_infos[i]);
}
}

300
qemu/target/riscv/cpu.h Normal file
View file

@ -0,0 +1,300 @@
/*
* QEMU RISC-V CPU
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017-2018 SiFive, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef RISCV_CPU_H
#define RISCV_CPU_H
/* QEMU addressing/paging config */
#define TARGET_PAGE_BITS 12 /* 4 KiB Pages */
#if defined(TARGET_RISCV64)
#define TARGET_LONG_BITS 64
#define TARGET_PHYS_ADDR_SPACE_BITS 56 /* 44-bit PPN */
#define TARGET_VIRT_ADDR_SPACE_BITS 48 /* sv48 */
#elif defined(TARGET_RISCV32)
#define TARGET_LONG_BITS 32
#define TARGET_PHYS_ADDR_SPACE_BITS 34 /* 22-bit PPN */
#define TARGET_VIRT_ADDR_SPACE_BITS 32 /* sv32 */
#endif
#define TCG_GUEST_DEFAULT_MO 0
#define CPUArchState struct CPURISCVState
#include "config.h"
#include "qemu-common.h"
#include "qom/cpu.h"
#include "exec/cpu-defs.h"
#include "fpu/softfloat.h"
#define TYPE_RISCV_CPU "riscv-cpu"
#define RISCV_CPU_TYPE_SUFFIX "-" TYPE_RISCV_CPU
#define RISCV_CPU_TYPE_NAME(name) (name RISCV_CPU_TYPE_SUFFIX)
#define CPU_RESOLVING_TYPE TYPE_RISCV_CPU
#define TYPE_RISCV_CPU_ANY RISCV_CPU_TYPE_NAME("any")
#define TYPE_RISCV_CPU_RV32GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.9.1")
#define TYPE_RISCV_CPU_RV32GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv32gcsu-v1.10.0")
#define TYPE_RISCV_CPU_RV32IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv32imacu-nommu")
#define TYPE_RISCV_CPU_RV64GCSU_V1_09_1 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.9.1")
#define TYPE_RISCV_CPU_RV64GCSU_V1_10_0 RISCV_CPU_TYPE_NAME("rv64gcsu-v1.10.0")
#define TYPE_RISCV_CPU_RV64IMACU_NOMMU RISCV_CPU_TYPE_NAME("rv64imacu-nommu")
#define TYPE_RISCV_CPU_SIFIVE_E31 RISCV_CPU_TYPE_NAME("sifive-e31")
#define TYPE_RISCV_CPU_SIFIVE_E51 RISCV_CPU_TYPE_NAME("sifive-e51")
#define TYPE_RISCV_CPU_SIFIVE_U34 RISCV_CPU_TYPE_NAME("sifive-u34")
#define TYPE_RISCV_CPU_SIFIVE_U54 RISCV_CPU_TYPE_NAME("sifive-u54")
#define RV32 ((target_ulong)1 << (TARGET_LONG_BITS - 2))
#define RV64 ((target_ulong)2 << (TARGET_LONG_BITS - 2))
#if defined(TARGET_RISCV32)
#define RVXLEN RV32
#elif defined(TARGET_RISCV64)
#define RVXLEN RV64
#endif
#define RV(x) ((target_ulong)1 << (x - 'A'))
#define RVI RV('I')
#define RVE RV('E') /* E and I are mutually exclusive */
#define RVM RV('M')
#define RVA RV('A')
#define RVF RV('F')
#define RVD RV('D')
#define RVC RV('C')
#define RVS RV('S')
#define RVU RV('U')
/* S extension denotes that Supervisor mode exists, however it is possible
to have a core that support S mode but does not have an MMU and there
is currently no bit in misa to indicate whether an MMU exists or not
so a cpu features bitfield is required */
enum {
RISCV_FEATURE_MMU
};
#define USER_VERSION_2_02_0 0x00020200
#define PRIV_VERSION_1_09_1 0x00010901
#define PRIV_VERSION_1_10_0 0x00011000
#define TRANSLATE_FAIL 1
#define TRANSLATE_SUCCESS 0
#define NB_MMU_MODES 4
#define MMU_USER_IDX 3
#define MAX_RISCV_PMPS (16)
typedef struct CPURISCVState CPURISCVState;
#include "pmp.h"
struct CPURISCVState {
target_ulong gpr[32];
uint64_t fpr[32]; /* assume both F and D extensions */
target_ulong pc;
target_ulong load_res;
target_ulong load_val;
target_ulong frm;
target_ulong badaddr;
target_ulong user_ver;
target_ulong priv_ver;
target_ulong misa;
uint32_t features;
#ifndef CONFIG_USER_ONLY
target_ulong priv;
target_ulong resetvec;
target_ulong mhartid;
target_ulong mstatus;
/*
* CAUTION! Unlike the rest of this struct, mip is accessed asynchonously
* by I/O threads and other vCPUs, so hold the iothread mutex before
* operating on it. CPU_INTERRUPT_HARD should be in effect iff this is
* non-zero. Use riscv_cpu_set_local_interrupt.
*/
uint32_t mip; /* allow atomic_read for >= 32-bit hosts */
target_ulong mie;
target_ulong mideleg;
target_ulong sptbr; /* until: priv-1.9.1 */
target_ulong satp; /* since: priv-1.10.0 */
target_ulong sbadaddr;
target_ulong mbadaddr;
target_ulong medeleg;
target_ulong stvec;
target_ulong sepc;
target_ulong scause;
target_ulong mtvec;
target_ulong mepc;
target_ulong mcause;
target_ulong mtval; /* since: priv-1.10.0 */
target_ulong scounteren;
target_ulong mcounteren;
target_ulong sscratch;
target_ulong mscratch;
/* temporary htif regs */
uint64_t mfromhost;
uint64_t mtohost;
uint64_t timecmp;
/* physical memory protection */
pmp_table_t pmp_state;
#endif
float_status fp_status;
/* QEMU */
CPU_COMMON
/* Fields from here on are preserved across CPU reset. */
QEMUTimer *timer; /* Internal timer */
// Unicorn engine
struct uc_struct *uc;
};
#define RISCV_CPU_CLASS(uc, klass) \
OBJECT_CLASS_CHECK(uc, RISCVCPUClass, (klass), TYPE_RISCV_CPU)
#define RISCV_CPU(obj) ((RISCVCPU *)obj)
#define RISCV_CPU_GET_CLASS(uc, obj) \
OBJECT_GET_CLASS(uc, RISCVCPUClass, (obj), TYPE_RISCV_CPU)
/**
* RISCVCPUClass:
* @parent_realize: The parent class' realize handler.
* @parent_reset: The parent class' reset handler.
*
* A RISCV CPU model.
*/
typedef struct RISCVCPUClass {
/*< private >*/
CPUClass parent_class;
/*< public >*/
DeviceRealize parent_realize;
void (*parent_reset)(CPUState *cpu);
} RISCVCPUClass;
/**
* RISCVCPU:
* @env: #CPURISCVState
*
* A RISCV CPU.
*/
typedef struct RISCVCPU {
/*< private >*/
CPUState parent_obj;
/*< public >*/
CPURISCVState env;
} RISCVCPU;
static inline RISCVCPU *riscv_env_get_cpu(CPURISCVState *env)
{
return container_of(env, RISCVCPU, env);
}
static inline int riscv_has_ext(CPURISCVState *env, target_ulong ext)
{
return (env->misa & ext) != 0;
}
static inline bool riscv_feature(CPURISCVState *env, int feature)
{
return env->features & (1ULL << feature);
}
#include "cpu_user.h"
#include "cpu_bits.h"
extern const char * const riscv_int_regnames[];
extern const char * const riscv_fpr_regnames[];
extern const char * const riscv_excp_names[];
extern const char * const riscv_intr_names[];
#define ENV_GET_CPU(e) CPU(riscv_env_get_cpu(e))
#define ENV_OFFSET offsetof(RISCVCPU, env)
void riscv_cpu_do_interrupt(CPUState *cpu);
int riscv_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
int riscv_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request);
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch);
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr);
int riscv_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int size,
int rw, int mmu_idx);
char *riscv_isa_string(RISCVCPU *cpu);
void riscv_cpu_list(FILE *f, fprintf_function cpu_fprintf);
#define cpu_signal_handler cpu_riscv_signal_handler
#define cpu_list riscv_cpu_list
// Unicorn: Converted into a function to avoid preprocessor redefinition warnings.
static inline int cpu_mmu_index(CPURISCVState *env, bool ifetch) {
return riscv_cpu_mmu_index(env, ifetch);
}
void riscv_set_mode(CPURISCVState *env, target_ulong newpriv);
void riscv_translate_init(struct uc_struct *uc);
RISCVCPU *cpu_riscv_init(const char *cpu_model);
int cpu_riscv_signal_handler(int host_signum, void *pinfo, void *puc);
void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env,
uint32_t exception, uintptr_t pc);
target_ulong cpu_riscv_get_fflags(CPURISCVState *env);
void cpu_riscv_set_fflags(CPURISCVState *env, target_ulong);
#define TB_FLAGS_MMU_MASK 3
#define TB_FLAGS_FP_ENABLE MSTATUS_FS
static inline void cpu_get_tb_cpu_state(CPURISCVState *env, target_ulong *pc,
target_ulong *cs_base, uint32_t *flags)
{
*pc = env->pc;
*cs_base = 0;
#ifdef CONFIG_USER_ONLY
*flags = TB_FLAGS_FP_ENABLE;
#else
*flags = cpu_mmu_index(env, 0) | (env->mstatus & MSTATUS_FS);
#endif
}
void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
target_ulong csrno);
target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno);
#ifndef CONFIG_USER_ONLY
void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value);
#endif
#include "exec/cpu-all.h"
#endif /* RISCV_CPU_H */

View file

@ -0,0 +1,409 @@
/* RISC-V ISA constants */
#define get_field(reg, mask) (((reg) & \
(target_ulong)(mask)) / ((mask) & ~((mask) << 1)))
#define set_field(reg, mask, val) (((reg) & ~(target_ulong)(mask)) | \
(((target_ulong)(val) * ((mask) & ~((mask) << 1))) & \
(target_ulong)(mask)))
#define PGSHIFT 12
#define FSR_RD_SHIFT 5
#define FSR_RD (0x7 << FSR_RD_SHIFT)
#define FPEXC_NX 0x01
#define FPEXC_UF 0x02
#define FPEXC_OF 0x04
#define FPEXC_DZ 0x08
#define FPEXC_NV 0x10
#define FSR_AEXC_SHIFT 0
#define FSR_NVA (FPEXC_NV << FSR_AEXC_SHIFT)
#define FSR_OFA (FPEXC_OF << FSR_AEXC_SHIFT)
#define FSR_UFA (FPEXC_UF << FSR_AEXC_SHIFT)
#define FSR_DZA (FPEXC_DZ << FSR_AEXC_SHIFT)
#define FSR_NXA (FPEXC_NX << FSR_AEXC_SHIFT)
#define FSR_AEXC (FSR_NVA | FSR_OFA | FSR_UFA | FSR_DZA | FSR_NXA)
/* CSR numbers */
#define CSR_FFLAGS 0x1
#define CSR_FRM 0x2
#define CSR_FCSR 0x3
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
#define CSR_INSTRET 0xc02
#define CSR_HPMCOUNTER3 0xc03
#define CSR_HPMCOUNTER4 0xc04
#define CSR_HPMCOUNTER5 0xc05
#define CSR_HPMCOUNTER6 0xc06
#define CSR_HPMCOUNTER7 0xc07
#define CSR_HPMCOUNTER8 0xc08
#define CSR_HPMCOUNTER9 0xc09
#define CSR_HPMCOUNTER10 0xc0a
#define CSR_HPMCOUNTER11 0xc0b
#define CSR_HPMCOUNTER12 0xc0c
#define CSR_HPMCOUNTER13 0xc0d
#define CSR_HPMCOUNTER14 0xc0e
#define CSR_HPMCOUNTER15 0xc0f
#define CSR_HPMCOUNTER16 0xc10
#define CSR_HPMCOUNTER17 0xc11
#define CSR_HPMCOUNTER18 0xc12
#define CSR_HPMCOUNTER19 0xc13
#define CSR_HPMCOUNTER20 0xc14
#define CSR_HPMCOUNTER21 0xc15
#define CSR_HPMCOUNTER22 0xc16
#define CSR_HPMCOUNTER23 0xc17
#define CSR_HPMCOUNTER24 0xc18
#define CSR_HPMCOUNTER25 0xc19
#define CSR_HPMCOUNTER26 0xc1a
#define CSR_HPMCOUNTER27 0xc1b
#define CSR_HPMCOUNTER28 0xc1c
#define CSR_HPMCOUNTER29 0xc1d
#define CSR_HPMCOUNTER30 0xc1e
#define CSR_HPMCOUNTER31 0xc1f
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
#define CSR_STVEC 0x105
#define CSR_SCOUNTEREN 0x106
#define CSR_SSCRATCH 0x140
#define CSR_SEPC 0x141
#define CSR_SCAUSE 0x142
#define CSR_SBADADDR 0x143
#define CSR_SIP 0x144
#define CSR_SPTBR 0x180
#define CSR_SATP 0x180
#define CSR_MSTATUS 0x300
#define CSR_MISA 0x301
#define CSR_MEDELEG 0x302
#define CSR_MIDELEG 0x303
#define CSR_MIE 0x304
#define CSR_MTVEC 0x305
#define CSR_MCOUNTEREN 0x306
#define CSR_MSCRATCH 0x340
#define CSR_MEPC 0x341
#define CSR_MCAUSE 0x342
#define CSR_MBADADDR 0x343
#define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPCFG1 0x3a1
#define CSR_PMPCFG2 0x3a2
#define CSR_PMPCFG3 0x3a3
#define CSR_PMPADDR0 0x3b0
#define CSR_PMPADDR1 0x3b1
#define CSR_PMPADDR2 0x3b2
#define CSR_PMPADDR3 0x3b3
#define CSR_PMPADDR4 0x3b4
#define CSR_PMPADDR5 0x3b5
#define CSR_PMPADDR6 0x3b6
#define CSR_PMPADDR7 0x3b7
#define CSR_PMPADDR8 0x3b8
#define CSR_PMPADDR9 0x3b9
#define CSR_PMPADDR10 0x3ba
#define CSR_PMPADDR11 0x3bb
#define CSR_PMPADDR12 0x3bc
#define CSR_PMPADDR13 0x3bd
#define CSR_PMPADDR14 0x3be
#define CSR_PMPADDR15 0x3bf
#define CSR_TSELECT 0x7a0
#define CSR_TDATA1 0x7a1
#define CSR_TDATA2 0x7a2
#define CSR_TDATA3 0x7a3
#define CSR_DCSR 0x7b0
#define CSR_DPC 0x7b1
#define CSR_DSCRATCH 0x7b2
#define CSR_MCYCLE 0xb00
#define CSR_MINSTRET 0xb02
#define CSR_MHPMCOUNTER3 0xb03
#define CSR_MHPMCOUNTER4 0xb04
#define CSR_MHPMCOUNTER5 0xb05
#define CSR_MHPMCOUNTER6 0xb06
#define CSR_MHPMCOUNTER7 0xb07
#define CSR_MHPMCOUNTER8 0xb08
#define CSR_MHPMCOUNTER9 0xb09
#define CSR_MHPMCOUNTER10 0xb0a
#define CSR_MHPMCOUNTER11 0xb0b
#define CSR_MHPMCOUNTER12 0xb0c
#define CSR_MHPMCOUNTER13 0xb0d
#define CSR_MHPMCOUNTER14 0xb0e
#define CSR_MHPMCOUNTER15 0xb0f
#define CSR_MHPMCOUNTER16 0xb10
#define CSR_MHPMCOUNTER17 0xb11
#define CSR_MHPMCOUNTER18 0xb12
#define CSR_MHPMCOUNTER19 0xb13
#define CSR_MHPMCOUNTER20 0xb14
#define CSR_MHPMCOUNTER21 0xb15
#define CSR_MHPMCOUNTER22 0xb16
#define CSR_MHPMCOUNTER23 0xb17
#define CSR_MHPMCOUNTER24 0xb18
#define CSR_MHPMCOUNTER25 0xb19
#define CSR_MHPMCOUNTER26 0xb1a
#define CSR_MHPMCOUNTER27 0xb1b
#define CSR_MHPMCOUNTER28 0xb1c
#define CSR_MHPMCOUNTER29 0xb1d
#define CSR_MHPMCOUNTER30 0xb1e
#define CSR_MHPMCOUNTER31 0xb1f
#define CSR_MUCOUNTEREN 0x320
#define CSR_MSCOUNTEREN 0x321
#define CSR_MHPMEVENT3 0x323
#define CSR_MHPMEVENT4 0x324
#define CSR_MHPMEVENT5 0x325
#define CSR_MHPMEVENT6 0x326
#define CSR_MHPMEVENT7 0x327
#define CSR_MHPMEVENT8 0x328
#define CSR_MHPMEVENT9 0x329
#define CSR_MHPMEVENT10 0x32a
#define CSR_MHPMEVENT11 0x32b
#define CSR_MHPMEVENT12 0x32c
#define CSR_MHPMEVENT13 0x32d
#define CSR_MHPMEVENT14 0x32e
#define CSR_MHPMEVENT15 0x32f
#define CSR_MHPMEVENT16 0x330
#define CSR_MHPMEVENT17 0x331
#define CSR_MHPMEVENT18 0x332
#define CSR_MHPMEVENT19 0x333
#define CSR_MHPMEVENT20 0x334
#define CSR_MHPMEVENT21 0x335
#define CSR_MHPMEVENT22 0x336
#define CSR_MHPMEVENT23 0x337
#define CSR_MHPMEVENT24 0x338
#define CSR_MHPMEVENT25 0x339
#define CSR_MHPMEVENT26 0x33a
#define CSR_MHPMEVENT27 0x33b
#define CSR_MHPMEVENT28 0x33c
#define CSR_MHPMEVENT29 0x33d
#define CSR_MHPMEVENT30 0x33e
#define CSR_MHPMEVENT31 0x33f
#define CSR_MVENDORID 0xf11
#define CSR_MARCHID 0xf12
#define CSR_MIMPID 0xf13
#define CSR_MHARTID 0xf14
#define CSR_CYCLEH 0xc80
#define CSR_TIMEH 0xc81
#define CSR_INSTRETH 0xc82
#define CSR_HPMCOUNTER3H 0xc83
#define CSR_HPMCOUNTER4H 0xc84
#define CSR_HPMCOUNTER5H 0xc85
#define CSR_HPMCOUNTER6H 0xc86
#define CSR_HPMCOUNTER7H 0xc87
#define CSR_HPMCOUNTER8H 0xc88
#define CSR_HPMCOUNTER9H 0xc89
#define CSR_HPMCOUNTER10H 0xc8a
#define CSR_HPMCOUNTER11H 0xc8b
#define CSR_HPMCOUNTER12H 0xc8c
#define CSR_HPMCOUNTER13H 0xc8d
#define CSR_HPMCOUNTER14H 0xc8e
#define CSR_HPMCOUNTER15H 0xc8f
#define CSR_HPMCOUNTER16H 0xc90
#define CSR_HPMCOUNTER17H 0xc91
#define CSR_HPMCOUNTER18H 0xc92
#define CSR_HPMCOUNTER19H 0xc93
#define CSR_HPMCOUNTER20H 0xc94
#define CSR_HPMCOUNTER21H 0xc95
#define CSR_HPMCOUNTER22H 0xc96
#define CSR_HPMCOUNTER23H 0xc97
#define CSR_HPMCOUNTER24H 0xc98
#define CSR_HPMCOUNTER25H 0xc99
#define CSR_HPMCOUNTER26H 0xc9a
#define CSR_HPMCOUNTER27H 0xc9b
#define CSR_HPMCOUNTER28H 0xc9c
#define CSR_HPMCOUNTER29H 0xc9d
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
#define CSR_MCYCLEH 0xb80
#define CSR_MINSTRETH 0xb82
#define CSR_MHPMCOUNTER3H 0xb83
#define CSR_MHPMCOUNTER4H 0xb84
#define CSR_MHPMCOUNTER5H 0xb85
#define CSR_MHPMCOUNTER6H 0xb86
#define CSR_MHPMCOUNTER7H 0xb87
#define CSR_MHPMCOUNTER8H 0xb88
#define CSR_MHPMCOUNTER9H 0xb89
#define CSR_MHPMCOUNTER10H 0xb8a
#define CSR_MHPMCOUNTER11H 0xb8b
#define CSR_MHPMCOUNTER12H 0xb8c
#define CSR_MHPMCOUNTER13H 0xb8d
#define CSR_MHPMCOUNTER14H 0xb8e
#define CSR_MHPMCOUNTER15H 0xb8f
#define CSR_MHPMCOUNTER16H 0xb90
#define CSR_MHPMCOUNTER17H 0xb91
#define CSR_MHPMCOUNTER18H 0xb92
#define CSR_MHPMCOUNTER19H 0xb93
#define CSR_MHPMCOUNTER20H 0xb94
#define CSR_MHPMCOUNTER21H 0xb95
#define CSR_MHPMCOUNTER22H 0xb96
#define CSR_MHPMCOUNTER23H 0xb97
#define CSR_MHPMCOUNTER24H 0xb98
#define CSR_MHPMCOUNTER25H 0xb99
#define CSR_MHPMCOUNTER26H 0xb9a
#define CSR_MHPMCOUNTER27H 0xb9b
#define CSR_MHPMCOUNTER28H 0xb9c
#define CSR_MHPMCOUNTER29H 0xb9d
#define CSR_MHPMCOUNTER30H 0xb9e
#define CSR_MHPMCOUNTER31H 0xb9f
/* mstatus bits */
#define MSTATUS_UIE 0x00000001
#define MSTATUS_SIE 0x00000002
#define MSTATUS_HIE 0x00000004
#define MSTATUS_MIE 0x00000008
#define MSTATUS_UPIE 0x00000010
#define MSTATUS_SPIE 0x00000020
#define MSTATUS_HPIE 0x00000040
#define MSTATUS_MPIE 0x00000080
#define MSTATUS_SPP 0x00000100
#define MSTATUS_HPP 0x00000600
#define MSTATUS_MPP 0x00001800
#define MSTATUS_FS 0x00006000
#define MSTATUS_XS 0x00018000
#define MSTATUS_MPRV 0x00020000
#define MSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */
#define MSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define MSTATUS_MXR 0x00080000
#define MSTATUS_VM 0x1F000000 /* until: priv-1.9.1 */
#define MSTATUS_TVM 0x00100000 /* since: priv-1.10 */
#define MSTATUS_TW 0x20000000 /* since: priv-1.10 */
#define MSTATUS_TSR 0x40000000 /* since: priv-1.10 */
#define MSTATUS64_UXL 0x0000000300000000ULL
#define MSTATUS64_SXL 0x0000000C00000000ULL
#define MSTATUS32_SD 0x80000000
#define MSTATUS64_SD 0x8000000000000000ULL
#if defined(TARGET_RISCV32)
#define MSTATUS_SD MSTATUS32_SD
#elif defined(TARGET_RISCV64)
#define MSTATUS_SD MSTATUS64_SD
#endif
/* sstatus bits */
#define SSTATUS_UIE 0x00000001
#define SSTATUS_SIE 0x00000002
#define SSTATUS_UPIE 0x00000010
#define SSTATUS_SPIE 0x00000020
#define SSTATUS_SPP 0x00000100
#define SSTATUS_FS 0x00006000
#define SSTATUS_XS 0x00018000
#define SSTATUS_PUM 0x00040000 /* until: priv-1.9.1 */
#define SSTATUS_SUM 0x00040000 /* since: priv-1.10 */
#define SSTATUS_MXR 0x00080000
#define SSTATUS32_SD 0x80000000
#define SSTATUS64_SD 0x8000000000000000ULL
#if defined(TARGET_RISCV32)
#define SSTATUS_SD SSTATUS32_SD
#elif defined(TARGET_RISCV64)
#define SSTATUS_SD SSTATUS64_SD
#endif
/* irqs */
#define MIP_SSIP (1 << IRQ_S_SOFT)
#define MIP_HSIP (1 << IRQ_H_SOFT)
#define MIP_MSIP (1 << IRQ_M_SOFT)
#define MIP_STIP (1 << IRQ_S_TIMER)
#define MIP_HTIP (1 << IRQ_H_TIMER)
#define MIP_MTIP (1 << IRQ_M_TIMER)
#define MIP_SEIP (1 << IRQ_S_EXT)
#define MIP_HEIP (1 << IRQ_H_EXT)
#define MIP_MEIP (1 << IRQ_M_EXT)
#define SIP_SSIP MIP_SSIP
#define SIP_STIP MIP_STIP
#define SIP_SEIP MIP_SEIP
#define PRV_U 0
#define PRV_S 1
#define PRV_H 2
#define PRV_M 3
/* privileged ISA 1.9.1 VM modes (mstatus.vm) */
#define VM_1_09_MBARE 0
#define VM_1_09_MBB 1
#define VM_1_09_MBBID 2
#define VM_1_09_SV32 8
#define VM_1_09_SV39 9
#define VM_1_09_SV48 10
/* privileged ISA 1.10.0 VM modes (satp.mode) */
#define VM_1_10_MBARE 0
#define VM_1_10_SV32 1
#define VM_1_10_SV39 8
#define VM_1_10_SV48 9
#define VM_1_10_SV57 10
#define VM_1_10_SV64 11
/* privileged ISA interrupt causes */
#define IRQ_U_SOFT 0 /* since: priv-1.10 */
#define IRQ_S_SOFT 1
#define IRQ_H_SOFT 2 /* until: priv-1.9.1 */
#define IRQ_M_SOFT 3 /* until: priv-1.9.1 */
#define IRQ_U_TIMER 4 /* since: priv-1.10 */
#define IRQ_S_TIMER 5
#define IRQ_H_TIMER 6 /* until: priv-1.9.1 */
#define IRQ_M_TIMER 7 /* until: priv-1.9.1 */
#define IRQ_U_EXT 8 /* since: priv-1.10 */
#define IRQ_S_EXT 9
#define IRQ_H_EXT 10 /* until: priv-1.9.1 */
#define IRQ_M_EXT 11 /* until: priv-1.9.1 */
#define IRQ_X_COP 12 /* non-standard */
/* Default addresses */
#define DEFAULT_RSTVEC 0x00001000
/* RV32 satp field masks */
#define SATP32_MODE 0x80000000
#define SATP32_ASID 0x7fc00000
#define SATP32_PPN 0x003fffff
/* RV64 satp field masks */
#define SATP64_MODE 0xF000000000000000ULL
#define SATP64_ASID 0x0FFFF00000000000ULL
#define SATP64_PPN 0x00000FFFFFFFFFFFULL
#if defined(TARGET_RISCV32)
#define SATP_MODE SATP32_MODE
#define SATP_ASID SATP32_ASID
#define SATP_PPN SATP32_PPN
#endif
#if defined(TARGET_RISCV64)
#define SATP_MODE SATP64_MODE
#define SATP_ASID SATP64_ASID
#define SATP_PPN SATP64_PPN
#endif
/* RISCV Exception Codes */
#define EXCP_NONE -1 /* not a real RISCV exception code */
#define RISCV_EXCP_INST_ADDR_MIS 0x0
#define RISCV_EXCP_INST_ACCESS_FAULT 0x1
#define RISCV_EXCP_ILLEGAL_INST 0x2
#define RISCV_EXCP_BREAKPOINT 0x3
#define RISCV_EXCP_LOAD_ADDR_MIS 0x4
#define RISCV_EXCP_LOAD_ACCESS_FAULT 0x5
#define RISCV_EXCP_STORE_AMO_ADDR_MIS 0x6
#define RISCV_EXCP_STORE_AMO_ACCESS_FAULT 0x7
#define RISCV_EXCP_U_ECALL 0x8 /* for convenience, report all
ECALLs as this, handler
fixes */
#define RISCV_EXCP_S_ECALL 0x9
#define RISCV_EXCP_H_ECALL 0xa
#define RISCV_EXCP_M_ECALL 0xb
#define RISCV_EXCP_INST_PAGE_FAULT 0xc /* since: priv-1.10.0 */
#define RISCV_EXCP_LOAD_PAGE_FAULT 0xd /* since: priv-1.10.0 */
#define RISCV_EXCP_STORE_PAGE_FAULT 0xf /* since: priv-1.10.0 */
#define RISCV_EXCP_INT_FLAG 0x80000000
#define RISCV_EXCP_INT_MASK 0x7fffffff
/* page table entry (PTE) fields */
#define PTE_V 0x001 /* Valid */
#define PTE_R 0x002 /* Read */
#define PTE_W 0x004 /* Write */
#define PTE_X 0x008 /* Execute */
#define PTE_U 0x010 /* User */
#define PTE_G 0x020 /* Global */
#define PTE_A 0x040 /* Accessed */
#define PTE_D 0x080 /* Dirty */
#define PTE_SOFT 0x300 /* Reserved for Software */
#define PTE_PPN_SHIFT 10

View file

@ -0,0 +1,13 @@
#define xRA 1 /* return address (aka link register) */
#define xSP 2 /* stack pointer */
#define xGP 3 /* global pointer */
#define xTP 4 /* thread pointer */
#define xA0 10 /* gpr[10-17] are syscall arguments */
#define xA1 11
#define xA2 12
#define xA3 13
#define xA4 14
#define xA5 15
#define xA6 16
#define xA7 17 /* syscall number goes here */

View file

@ -0,0 +1,371 @@
/*
* RISC-V FPU Emulation Helpers for QEMU.
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include <stdlib.h>
#include "cpu.h"
#include "qemu/host-utils.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
target_ulong cpu_riscv_get_fflags(CPURISCVState *env)
{
int soft = get_float_exception_flags(&env->fp_status);
target_ulong hard = 0;
hard |= (soft & float_flag_inexact) ? FPEXC_NX : 0;
hard |= (soft & float_flag_underflow) ? FPEXC_UF : 0;
hard |= (soft & float_flag_overflow) ? FPEXC_OF : 0;
hard |= (soft & float_flag_divbyzero) ? FPEXC_DZ : 0;
hard |= (soft & float_flag_invalid) ? FPEXC_NV : 0;
return hard;
}
void cpu_riscv_set_fflags(CPURISCVState *env, target_ulong hard)
{
int soft = 0;
soft |= (hard & FPEXC_NX) ? float_flag_inexact : 0;
soft |= (hard & FPEXC_UF) ? float_flag_underflow : 0;
soft |= (hard & FPEXC_OF) ? float_flag_overflow : 0;
soft |= (hard & FPEXC_DZ) ? float_flag_divbyzero : 0;
soft |= (hard & FPEXC_NV) ? float_flag_invalid : 0;
set_float_exception_flags(soft, &env->fp_status);
}
void helper_set_rounding_mode(CPURISCVState *env, uint32_t rm)
{
int softrm;
if (rm == 7) {
rm = env->frm;
}
switch (rm) {
case 0:
softrm = float_round_nearest_even;
break;
case 1:
softrm = float_round_to_zero;
break;
case 2:
softrm = float_round_down;
break;
case 3:
softrm = float_round_up;
break;
case 4:
softrm = float_round_ties_away;
break;
default:
do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
set_float_rounding_mode(softrm, &env->fp_status);
}
uint64_t helper_fmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float32_muladd(frs1, frs2, frs3, 0, &env->fp_status);
}
uint64_t helper_fmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float64_muladd(frs1, frs2, frs3, 0, &env->fp_status);
}
uint64_t helper_fmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c,
&env->fp_status);
}
uint64_t helper_fmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c,
&env->fp_status);
}
uint64_t helper_fnmsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float32_muladd(frs1, frs2, frs3, float_muladd_negate_product,
&env->fp_status);
}
uint64_t helper_fnmsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float64_muladd(frs1, frs2, frs3, float_muladd_negate_product,
&env->fp_status);
}
uint64_t helper_fnmadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float32_muladd(frs1, frs2, frs3, float_muladd_negate_c |
float_muladd_negate_product, &env->fp_status);
}
uint64_t helper_fnmadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2,
uint64_t frs3)
{
return float64_muladd(frs1, frs2, frs3, float_muladd_negate_c |
float_muladd_negate_product, &env->fp_status);
}
uint64_t helper_fadd_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_add(frs1, frs2, &env->fp_status);
}
uint64_t helper_fsub_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_sub(frs1, frs2, &env->fp_status);
}
uint64_t helper_fmul_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_mul(frs1, frs2, &env->fp_status);
}
uint64_t helper_fdiv_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_div(frs1, frs2, &env->fp_status);
}
uint64_t helper_fmin_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_minnum(frs1, frs2, &env->fp_status);
}
uint64_t helper_fmax_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_maxnum(frs1, frs2, &env->fp_status);
}
uint64_t helper_fsqrt_s(CPURISCVState *env, uint64_t frs1)
{
return float32_sqrt(frs1, &env->fp_status);
}
target_ulong helper_fle_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_le(frs1, frs2, &env->fp_status);
}
target_ulong helper_flt_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_lt(frs1, frs2, &env->fp_status);
}
target_ulong helper_feq_s(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float32_eq_quiet(frs1, frs2, &env->fp_status);
}
target_ulong helper_fcvt_w_s(CPURISCVState *env, uint64_t frs1)
{
return float32_to_int32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_wu_s(CPURISCVState *env, uint64_t frs1)
{
return (int32_t)float32_to_uint32(frs1, &env->fp_status);
}
#if defined(TARGET_RISCV64)
uint64_t helper_fcvt_l_s(CPURISCVState *env, uint64_t frs1)
{
return float32_to_int64(frs1, &env->fp_status);
}
uint64_t helper_fcvt_lu_s(CPURISCVState *env, uint64_t frs1)
{
return float32_to_uint64(frs1, &env->fp_status);
}
#endif
uint64_t helper_fcvt_s_w(CPURISCVState *env, target_ulong rs1)
{
return int32_to_float32((int32_t)rs1, &env->fp_status);
}
uint64_t helper_fcvt_s_wu(CPURISCVState *env, target_ulong rs1)
{
return uint32_to_float32((uint32_t)rs1, &env->fp_status);
}
#if defined(TARGET_RISCV64)
uint64_t helper_fcvt_s_l(CPURISCVState *env, uint64_t rs1)
{
return int64_to_float32(rs1, &env->fp_status);
}
uint64_t helper_fcvt_s_lu(CPURISCVState *env, uint64_t rs1)
{
return uint64_to_float32(rs1, &env->fp_status);
}
#endif
target_ulong helper_fclass_s(uint64_t frs1)
{
float32 f = frs1;
bool sign = float32_is_neg(f);
if (float32_is_infinity(f)) {
return sign ? 1 << 0 : 1 << 7;
} else if (float32_is_zero(f)) {
return sign ? 1 << 3 : 1 << 4;
} else if (float32_is_zero_or_denormal(f)) {
return sign ? 1 << 2 : 1 << 5;
} else if (float32_is_any_nan(f)) {
float_status s = { }; /* for snan_bit_is_one */
return float32_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
} else {
return sign ? 1 << 1 : 1 << 6;
}
}
uint64_t helper_fadd_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_add(frs1, frs2, &env->fp_status);
}
uint64_t helper_fsub_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_sub(frs1, frs2, &env->fp_status);
}
uint64_t helper_fmul_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_mul(frs1, frs2, &env->fp_status);
}
uint64_t helper_fdiv_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_div(frs1, frs2, &env->fp_status);
}
uint64_t helper_fmin_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_minnum(frs1, frs2, &env->fp_status);
}
uint64_t helper_fmax_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_maxnum(frs1, frs2, &env->fp_status);
}
uint64_t helper_fcvt_s_d(CPURISCVState *env, uint64_t rs1)
{
return float64_to_float32(rs1, &env->fp_status);
}
uint64_t helper_fcvt_d_s(CPURISCVState *env, uint64_t rs1)
{
return float32_to_float64(rs1, &env->fp_status);
}
uint64_t helper_fsqrt_d(CPURISCVState *env, uint64_t frs1)
{
return float64_sqrt(frs1, &env->fp_status);
}
target_ulong helper_fle_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_le(frs1, frs2, &env->fp_status);
}
target_ulong helper_flt_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_lt(frs1, frs2, &env->fp_status);
}
target_ulong helper_feq_d(CPURISCVState *env, uint64_t frs1, uint64_t frs2)
{
return float64_eq_quiet(frs1, frs2, &env->fp_status);
}
target_ulong helper_fcvt_w_d(CPURISCVState *env, uint64_t frs1)
{
return float64_to_int32(frs1, &env->fp_status);
}
target_ulong helper_fcvt_wu_d(CPURISCVState *env, uint64_t frs1)
{
return (int32_t)float64_to_uint32(frs1, &env->fp_status);
}
#if defined(TARGET_RISCV64)
uint64_t helper_fcvt_l_d(CPURISCVState *env, uint64_t frs1)
{
return float64_to_int64(frs1, &env->fp_status);
}
uint64_t helper_fcvt_lu_d(CPURISCVState *env, uint64_t frs1)
{
return float64_to_uint64(frs1, &env->fp_status);
}
#endif
uint64_t helper_fcvt_d_w(CPURISCVState *env, target_ulong rs1)
{
return int32_to_float64((int32_t)rs1, &env->fp_status);
}
uint64_t helper_fcvt_d_wu(CPURISCVState *env, target_ulong rs1)
{
return uint32_to_float64((uint32_t)rs1, &env->fp_status);
}
#if defined(TARGET_RISCV64)
uint64_t helper_fcvt_d_l(CPURISCVState *env, uint64_t rs1)
{
return int64_to_float64(rs1, &env->fp_status);
}
uint64_t helper_fcvt_d_lu(CPURISCVState *env, uint64_t rs1)
{
return uint64_to_float64(rs1, &env->fp_status);
}
#endif
target_ulong helper_fclass_d(uint64_t frs1)
{
float64 f = frs1;
bool sign = float64_is_neg(f);
if (float64_is_infinity(f)) {
return sign ? 1 << 0 : 1 << 7;
} else if (float64_is_zero(f)) {
return sign ? 1 << 3 : 1 << 4;
} else if (float64_is_zero_or_denormal(f)) {
return sign ? 1 << 2 : 1 << 5;
} else if (float64_is_any_nan(f)) {
float_status s = { }; /* for snan_bit_is_one */
return float64_is_quiet_nan(f, &s) ? 1 << 9 : 1 << 8;
} else {
return sign ? 1 << 1 : 1 << 6;
}
}

526
qemu/target/riscv/helper.c Normal file
View file

@ -0,0 +1,526 @@
/*
* RISC-V emulation helpers for qemu.
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017-2018 SiFive, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "tcg-op.h"
#define RISCV_DEBUG_INTERRUPT 0
int riscv_cpu_mmu_index(CPURISCVState *env, bool ifetch)
{
#ifdef CONFIG_USER_ONLY
return 0;
#else
return env->priv;
#endif
}
#ifndef CONFIG_USER_ONLY
static int riscv_cpu_local_irq_pending(CPURISCVState *env)
{
target_ulong mstatus_mie = get_field(env->mstatus, MSTATUS_MIE);
target_ulong mstatus_sie = get_field(env->mstatus, MSTATUS_SIE);
target_ulong pending = atomic_read(&env->mip) & env->mie;
target_ulong mie = env->priv < PRV_M || (env->priv == PRV_M && mstatus_mie);
target_ulong sie = env->priv < PRV_S || (env->priv == PRV_S && mstatus_sie);
target_ulong irqs = (pending & ~env->mideleg & -mie) |
(pending & env->mideleg & -sie);
if (irqs) {
return ctz64(irqs); /* since non-zero */
} else {
return EXCP_NONE; /* indicates no pending interrupt */
}
}
#endif
bool riscv_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
{
#if !defined(CONFIG_USER_ONLY)
if (interrupt_request & CPU_INTERRUPT_HARD) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
int interruptno = riscv_cpu_local_irq_pending(env);
if (interruptno >= 0) {
cs->exception_index = RISCV_EXCP_INT_FLAG | interruptno;
riscv_cpu_do_interrupt(cs);
return true;
}
}
#endif
return false;
}
#if !defined(CONFIG_USER_ONLY)
/* get_physical_address - get the physical address for this virtual address
*
* Do a page table walk to obtain the physical address corresponding to a
* virtual address. Returns 0 if the translation was successful
*
* Adapted from Spike's mmu_t::translate and mmu_t::walk
*
*/
static int get_physical_address(CPURISCVState *env, hwaddr *physical,
int *prot, target_ulong addr,
int access_type, int mmu_idx)
{
/* NOTE: the env->pc value visible here will not be
* correct, but the value visible to the exception handler
* (riscv_cpu_do_interrupt) is correct */
int mode = mmu_idx;
if (mode == PRV_M && access_type != MMU_INST_FETCH) {
if (get_field(env->mstatus, MSTATUS_MPRV)) {
mode = get_field(env->mstatus, MSTATUS_MPP);
}
}
if (mode == PRV_M || !riscv_feature(env, RISCV_FEATURE_MMU)) {
*physical = addr;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return TRANSLATE_SUCCESS;
}
*prot = 0;
target_ulong base;
int levels, ptidxbits, ptesize, vm, sum;
int mxr = get_field(env->mstatus, MSTATUS_MXR);
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
base = get_field(env->satp, SATP_PPN) << PGSHIFT;
sum = get_field(env->mstatus, MSTATUS_SUM);
vm = get_field(env->satp, SATP_MODE);
switch (vm) {
case VM_1_10_SV32:
levels = 2; ptidxbits = 10; ptesize = 4; break;
case VM_1_10_SV39:
levels = 3; ptidxbits = 9; ptesize = 8; break;
case VM_1_10_SV48:
levels = 4; ptidxbits = 9; ptesize = 8; break;
case VM_1_10_SV57:
levels = 5; ptidxbits = 9; ptesize = 8; break;
case VM_1_10_MBARE:
*physical = addr;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return TRANSLATE_SUCCESS;
default:
g_assert_not_reached();
}
} else {
base = env->sptbr << PGSHIFT;
sum = !get_field(env->mstatus, MSTATUS_PUM);
vm = get_field(env->mstatus, MSTATUS_VM);
switch (vm) {
case VM_1_09_SV32:
levels = 2; ptidxbits = 10; ptesize = 4; break;
case VM_1_09_SV39:
levels = 3; ptidxbits = 9; ptesize = 8; break;
case VM_1_09_SV48:
levels = 4; ptidxbits = 9; ptesize = 8; break;
case VM_1_09_MBARE:
*physical = addr;
*prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
return TRANSLATE_SUCCESS;
default:
g_assert_not_reached();
}
}
CPUState *cs = CPU(riscv_env_get_cpu(env));
int va_bits = PGSHIFT + levels * ptidxbits;
target_ulong mask = (1L << (TARGET_LONG_BITS - (va_bits - 1))) - 1;
target_ulong masked_msbs = (addr >> (va_bits - 1)) & mask;
if (masked_msbs != 0 && masked_msbs != mask) {
return TRANSLATE_FAIL;
}
int ptshift = (levels - 1) * ptidxbits;
int i;
#if !TCG_OVERSIZED_GUEST
restart:
#endif
for (i = 0; i < levels; i++, ptshift -= ptidxbits) {
target_ulong idx = (addr >> (PGSHIFT + ptshift)) &
((1 << ptidxbits) - 1);
/* check that physical address of PTE is legal */
target_ulong pte_addr = base + idx * ptesize;
#if defined(TARGET_RISCV32)
target_ulong pte = ldl_phys(cs->as, pte_addr);
#elif defined(TARGET_RISCV64)
target_ulong pte = ldq_phys(cs->as, pte_addr);
#endif
target_ulong ppn = pte >> PTE_PPN_SHIFT;
if (!(pte & PTE_V)) {
/* Invalid PTE */
return TRANSLATE_FAIL;
} else if (!(pte & (PTE_R | PTE_W | PTE_X))) {
/* Inner PTE, continue walking */
base = ppn << PGSHIFT;
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == PTE_W) {
/* Reserved leaf PTE flags: PTE_W */
return TRANSLATE_FAIL;
} else if ((pte & (PTE_R | PTE_W | PTE_X)) == (PTE_W | PTE_X)) {
/* Reserved leaf PTE flags: PTE_W + PTE_X */
return TRANSLATE_FAIL;
} else if ((pte & PTE_U) && ((mode != PRV_U) &&
(!sum || access_type == MMU_INST_FETCH))) {
/* User PTE flags when not U mode and mstatus.SUM is not set,
or the access type is an instruction fetch */
return TRANSLATE_FAIL;
} else if (!(pte & PTE_U) && (mode != PRV_S)) {
/* Supervisor PTE flags when not S mode */
return TRANSLATE_FAIL;
} else if (ppn & ((1ULL << ptshift) - 1)) {
/* Misaligned PPN */
return TRANSLATE_FAIL;
} else if (access_type == MMU_DATA_LOAD && !((pte & PTE_R) ||
((pte & PTE_X) && mxr))) {
/* Read access check failed */
return TRANSLATE_FAIL;
} else if (access_type == MMU_DATA_STORE && !(pte & PTE_W)) {
/* Write access check failed */
return TRANSLATE_FAIL;
} else if (access_type == MMU_INST_FETCH && !(pte & PTE_X)) {
/* Fetch access check failed */
return TRANSLATE_FAIL;
} else {
/* if necessary, set accessed and dirty bits. */
target_ulong updated_pte = pte | PTE_A |
(access_type == MMU_DATA_STORE ? PTE_D : 0);
/* Page table updates need to be atomic with MTTCG enabled */
if (updated_pte != pte) {
/*
* - if accessed or dirty bits need updating, and the PTE is
* in RAM, then we do so atomically with a compare and swap.
* - if the PTE is in IO space or ROM, then it can't be updated
* and we return TRANSLATE_FAIL.
* - if the PTE changed by the time we went to update it, then
* it is no longer valid and we must re-walk the page table.
*/
MemoryRegion *mr;
hwaddr l = sizeof(target_ulong), addr1;
mr = address_space_translate(cs->as, pte_addr, &addr1, &l, false);
if (memory_region_is_ram(mr)) {
target_ulong *pte_pa =
qemu_map_ram_ptr(env->uc, mr->ram_block, addr1);
#if TCG_OVERSIZED_GUEST
/* MTTCG is not enabled on oversized TCG guests so
* page table updates do not need to be atomic */
*pte_pa = pte = updated_pte;
#else
target_ulong old_pte =
atomic_cmpxchg(pte_pa, pte, updated_pte);
if (old_pte != pte) {
goto restart;
} else {
pte = updated_pte;
}
#endif
} else {
/* misconfigured PTE in ROM (AD bits are not preset) or
* PTE is in IO space and can't be updated atomically */
return TRANSLATE_FAIL;
}
}
/* for superpage mappings, make a fake leaf PTE for the TLB's
benefit. */
target_ulong vpn = addr >> PGSHIFT;
*physical = (ppn | (vpn & ((1L << ptshift) - 1))) << PGSHIFT;
/* set permissions on the TLB entry */
if ((pte & PTE_R) || ((pte & PTE_X) && mxr)) {
*prot |= PAGE_READ;
}
if ((pte & PTE_X)) {
*prot |= PAGE_EXEC;
}
/* add write permission on stores or if the page is already dirty,
so that we TLB miss on later writes to update the dirty bit */
if ((pte & PTE_W) &&
(access_type == MMU_DATA_STORE || (pte & PTE_D))) {
*prot |= PAGE_WRITE;
}
return TRANSLATE_SUCCESS;
}
}
return TRANSLATE_FAIL;
}
static void raise_mmu_exception(CPURISCVState *env, target_ulong address,
MMUAccessType access_type)
{
CPUState *cs = CPU(riscv_env_get_cpu(env));
int page_fault_exceptions =
(env->priv_ver >= PRIV_VERSION_1_10_0) &&
get_field(env->satp, SATP_MODE) != VM_1_10_MBARE;
switch (access_type) {
case MMU_INST_FETCH:
cs->exception_index = page_fault_exceptions ?
RISCV_EXCP_INST_PAGE_FAULT : RISCV_EXCP_INST_ACCESS_FAULT;
break;
case MMU_DATA_LOAD:
cs->exception_index = page_fault_exceptions ?
RISCV_EXCP_LOAD_PAGE_FAULT : RISCV_EXCP_LOAD_ACCESS_FAULT;
break;
case MMU_DATA_STORE:
cs->exception_index = page_fault_exceptions ?
RISCV_EXCP_STORE_PAGE_FAULT : RISCV_EXCP_STORE_AMO_ACCESS_FAULT;
break;
default:
g_assert_not_reached();
}
env->badaddr = address;
}
hwaddr riscv_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
{
RISCVCPU *cpu = RISCV_CPU(cs);
hwaddr phys_addr;
int prot;
int mmu_idx = cpu_mmu_index(&cpu->env, false);
if (get_physical_address(&cpu->env, &phys_addr, &prot, addr, 0, mmu_idx)) {
return -1;
}
return phys_addr;
}
void riscv_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
MMUAccessType access_type, int mmu_idx,
uintptr_t retaddr)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
switch (access_type) {
case MMU_INST_FETCH:
cs->exception_index = RISCV_EXCP_INST_ADDR_MIS;
break;
case MMU_DATA_LOAD:
cs->exception_index = RISCV_EXCP_LOAD_ADDR_MIS;
break;
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_AMO_ADDR_MIS;
break;
default:
g_assert_not_reached();
}
env->badaddr = addr;
do_raise_exception_err(env, cs->exception_index, retaddr);
}
/* called by qemu's softmmu to fill the qemu tlb */
void tlb_fill(CPUState *cs, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
{
int ret;
ret = riscv_cpu_handle_mmu_fault(cs, addr, size, access_type, mmu_idx);
if (ret == TRANSLATE_FAIL) {
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
do_raise_exception_err(env, cs->exception_index, retaddr);
}
}
#endif
int riscv_cpu_handle_mmu_fault(CPUState *cs, vaddr address, int size,
int rw, int mmu_idx)
{
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
#if !defined(CONFIG_USER_ONLY)
hwaddr pa = 0;
int prot;
#endif
int ret = TRANSLATE_FAIL;
qemu_log_mask(CPU_LOG_MMU,
"%s pc " TARGET_FMT_lx " ad %" VADDR_PRIx " rw %d mmu_idx \
%d\n", __func__, env->pc, address, rw, mmu_idx);
#if !defined(CONFIG_USER_ONLY)
ret = get_physical_address(env, &pa, &prot, address, rw, mmu_idx);
qemu_log_mask(CPU_LOG_MMU,
"%s address=%" VADDR_PRIx " ret %d physical " TARGET_FMT_plx
" prot %d\n", __func__, address, ret, pa, prot);
if (!pmp_hart_has_privs(env, pa, TARGET_PAGE_SIZE, 1 << rw)) {
ret = TRANSLATE_FAIL;
}
if (ret == TRANSLATE_SUCCESS) {
tlb_set_page(cs, address & TARGET_PAGE_MASK, pa & TARGET_PAGE_MASK,
prot, mmu_idx, TARGET_PAGE_SIZE);
} else if (ret == TRANSLATE_FAIL) {
raise_mmu_exception(env, address, rw);
}
#else
switch (rw) {
case MMU_INST_FETCH:
cs->exception_index = RISCV_EXCP_INST_PAGE_FAULT;
break;
case MMU_DATA_LOAD:
cs->exception_index = RISCV_EXCP_LOAD_PAGE_FAULT;
break;
case MMU_DATA_STORE:
cs->exception_index = RISCV_EXCP_STORE_PAGE_FAULT;
break;
}
#endif
return ret;
}
/*
* Handle Traps
*
* Adapted from Spike's processor_t::take_trap.
*
*/
void riscv_cpu_do_interrupt(CPUState *cs)
{
#if !defined(CONFIG_USER_ONLY)
RISCVCPU *cpu = RISCV_CPU(cs);
CPURISCVState *env = &cpu->env;
if (RISCV_DEBUG_INTERRUPT) {
int log_cause = cs->exception_index & RISCV_EXCP_INT_MASK;
if (cs->exception_index & RISCV_EXCP_INT_FLAG) {
qemu_log_mask(LOG_TRACE, "core 0: trap %s, epc 0x" TARGET_FMT_lx,
riscv_intr_names[log_cause], env->pc);
} else {
qemu_log_mask(LOG_TRACE, "core 0: intr %s, epc 0x" TARGET_FMT_lx,
riscv_excp_names[log_cause], env->pc);
}
}
target_ulong fixed_cause = 0;
if (cs->exception_index & (RISCV_EXCP_INT_FLAG)) {
/* hacky for now. the MSB (bit 63) indicates interrupt but cs->exception
index is only 32 bits wide */
fixed_cause = cs->exception_index & RISCV_EXCP_INT_MASK;
fixed_cause |= ((target_ulong)1) << (TARGET_LONG_BITS - 1);
} else {
/* fixup User ECALL -> correct priv ECALL */
if (cs->exception_index == RISCV_EXCP_U_ECALL) {
switch (env->priv) {
case PRV_U:
fixed_cause = RISCV_EXCP_U_ECALL;
break;
case PRV_S:
fixed_cause = RISCV_EXCP_S_ECALL;
break;
case PRV_H:
fixed_cause = RISCV_EXCP_H_ECALL;
break;
case PRV_M:
fixed_cause = RISCV_EXCP_M_ECALL;
break;
}
} else {
fixed_cause = cs->exception_index;
}
}
target_ulong backup_epc = env->pc;
target_ulong bit = fixed_cause;
target_ulong deleg = env->medeleg;
int hasbadaddr =
(fixed_cause == RISCV_EXCP_INST_ADDR_MIS) ||
(fixed_cause == RISCV_EXCP_INST_ACCESS_FAULT) ||
(fixed_cause == RISCV_EXCP_LOAD_ADDR_MIS) ||
(fixed_cause == RISCV_EXCP_STORE_AMO_ADDR_MIS) ||
(fixed_cause == RISCV_EXCP_LOAD_ACCESS_FAULT) ||
(fixed_cause == RISCV_EXCP_STORE_AMO_ACCESS_FAULT) ||
(fixed_cause == RISCV_EXCP_INST_PAGE_FAULT) ||
(fixed_cause == RISCV_EXCP_LOAD_PAGE_FAULT) ||
(fixed_cause == RISCV_EXCP_STORE_PAGE_FAULT);
if (bit & ((target_ulong)1 << (TARGET_LONG_BITS - 1))) {
deleg = env->mideleg;
bit &= ~((target_ulong)1 << (TARGET_LONG_BITS - 1));
}
if (env->priv <= PRV_S && bit < 64 && ((deleg >> bit) & 1)) {
/* handle the trap in S-mode */
/* No need to check STVEC for misaligned - lower 2 bits cannot be set */
env->pc = env->stvec;
env->scause = fixed_cause;
env->sepc = backup_epc;
if (hasbadaddr) {
if (RISCV_DEBUG_INTERRUPT) {
qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld
": badaddr 0x" TARGET_FMT_lx, env->mhartid, env->badaddr);
}
env->sbadaddr = env->badaddr;
} else {
/* otherwise we must clear sbadaddr/stval
* todo: support populating stval on illegal instructions */
env->sbadaddr = 0;
}
target_ulong s = env->mstatus;
s = set_field(s, MSTATUS_SPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
get_field(s, MSTATUS_SIE) : get_field(s, MSTATUS_UIE << env->priv));
s = set_field(s, MSTATUS_SPP, env->priv);
s = set_field(s, MSTATUS_SIE, 0);
csr_write_helper(env, s, CSR_MSTATUS);
riscv_set_mode(env, PRV_S);
} else {
/* No need to check MTVEC for misaligned - lower 2 bits cannot be set */
env->pc = env->mtvec;
env->mepc = backup_epc;
env->mcause = fixed_cause;
if (hasbadaddr) {
if (RISCV_DEBUG_INTERRUPT) {
qemu_log_mask(LOG_TRACE, "core " TARGET_FMT_ld
": badaddr 0x" TARGET_FMT_lx, env->mhartid, env->badaddr);
}
env->mbadaddr = env->badaddr;
} else {
/* otherwise we must clear mbadaddr/mtval
* todo: support populating mtval on illegal instructions */
env->mbadaddr = 0;
}
target_ulong s = env->mstatus;
s = set_field(s, MSTATUS_MPIE, env->priv_ver >= PRIV_VERSION_1_10_0 ?
get_field(s, MSTATUS_MIE) : get_field(s, MSTATUS_UIE << env->priv));
s = set_field(s, MSTATUS_MPP, env->priv);
s = set_field(s, MSTATUS_MIE, 0);
csr_write_helper(env, s, CSR_MSTATUS);
riscv_set_mode(env, PRV_M);
}
/* TODO yield load reservation */
#endif
cs->exception_index = EXCP_NONE; /* mark handled to qemu */
}

View file

@ -0,0 +1,82 @@
// Unicorn
DEF_HELPER_4(uc_tracecode, void, i32, i32, ptr, i64)
/* Exceptions */
DEF_HELPER_2(raise_exception, noreturn, env, i32)
/* Floating Point - rounding mode */
DEF_HELPER_FLAGS_2(set_rounding_mode, TCG_CALL_NO_WG, void, env, i32)
/* Floating Point - fused */
DEF_HELPER_FLAGS_4(fmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
DEF_HELPER_FLAGS_4(fnmadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64, i64)
/* Floating Point - Single Precision */
DEF_HELPER_FLAGS_3(fadd_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fsub_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmul_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_s, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fsqrt_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_s, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_w_s, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_wu_s, TCG_CALL_NO_RWG, tl, env, i64)
#if defined(TARGET_RISCV64)
DEF_HELPER_FLAGS_2(fcvt_l_s, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_lu_s, TCG_CALL_NO_RWG, tl, env, i64)
#endif
DEF_HELPER_FLAGS_2(fcvt_s_w, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_wu, TCG_CALL_NO_RWG, i64, env, tl)
#if defined(TARGET_RISCV64)
DEF_HELPER_FLAGS_2(fcvt_s_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_s_lu, TCG_CALL_NO_RWG, i64, env, tl)
#endif
DEF_HELPER_FLAGS_1(fclass_s, TCG_CALL_NO_RWG_SE, tl, i64)
/* Floating Point - Double Precision */
DEF_HELPER_FLAGS_3(fadd_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fsub_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmul_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fdiv_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmin_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_3(fmax_d, TCG_CALL_NO_RWG, i64, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_s_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fcvt_d_s, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_2(fsqrt_d, TCG_CALL_NO_RWG, i64, env, i64)
DEF_HELPER_FLAGS_3(fle_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(flt_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_3(feq_d, TCG_CALL_NO_RWG, tl, env, i64, i64)
DEF_HELPER_FLAGS_2(fcvt_w_d, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_wu_d, TCG_CALL_NO_RWG, tl, env, i64)
#if defined(TARGET_RISCV64)
DEF_HELPER_FLAGS_2(fcvt_l_d, TCG_CALL_NO_RWG, tl, env, i64)
DEF_HELPER_FLAGS_2(fcvt_lu_d, TCG_CALL_NO_RWG, tl, env, i64)
#endif
DEF_HELPER_FLAGS_2(fcvt_d_w, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_d_wu, TCG_CALL_NO_RWG, i64, env, tl)
#if defined(TARGET_RISCV64)
DEF_HELPER_FLAGS_2(fcvt_d_l, TCG_CALL_NO_RWG, i64, env, tl)
DEF_HELPER_FLAGS_2(fcvt_d_lu, TCG_CALL_NO_RWG, i64, env, tl)
#endif
DEF_HELPER_FLAGS_1(fclass_d, TCG_CALL_NO_RWG_SE, tl, i64)
/* Special functions */
DEF_HELPER_3(csrrw, tl, env, tl, tl)
DEF_HELPER_4(csrrs, tl, env, tl, tl, tl)
DEF_HELPER_4(csrrc, tl, env, tl, tl, tl)
#ifndef CONFIG_USER_ONLY
DEF_HELPER_2(sret, tl, env, tl)
DEF_HELPER_2(mret, tl, env, tl)
DEF_HELPER_1(wfi, void, env)
// Unicorn: Renamed from tlb_flush to avoid clashing with the preprocessor redefinitions Unicorn uses.
DEF_HELPER_1(riscv_tlb_flush, void, env)
#endif

364
qemu/target/riscv/instmap.h Normal file
View file

@ -0,0 +1,364 @@
/*
* RISC-V emulation for qemu: Instruction decode helpers
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define MASK_OP_MAJOR(op) (op & 0x7F)
enum {
/* rv32i, rv64i, rv32m */
OPC_RISC_LUI = (0x37),
OPC_RISC_AUIPC = (0x17),
OPC_RISC_JAL = (0x6F),
OPC_RISC_JALR = (0x67),
OPC_RISC_BRANCH = (0x63),
OPC_RISC_LOAD = (0x03),
OPC_RISC_STORE = (0x23),
OPC_RISC_ARITH_IMM = (0x13),
OPC_RISC_ARITH = (0x33),
OPC_RISC_FENCE = (0x0F),
OPC_RISC_SYSTEM = (0x73),
/* rv64i, rv64m */
OPC_RISC_ARITH_IMM_W = (0x1B),
OPC_RISC_ARITH_W = (0x3B),
/* rv32a, rv64a */
OPC_RISC_ATOMIC = (0x2F),
/* floating point */
OPC_RISC_FP_LOAD = (0x7),
OPC_RISC_FP_STORE = (0x27),
OPC_RISC_FMADD = (0x43),
OPC_RISC_FMSUB = (0x47),
OPC_RISC_FNMSUB = (0x4B),
OPC_RISC_FNMADD = (0x4F),
OPC_RISC_FP_ARITH = (0x53),
};
#define MASK_OP_ARITH(op) (MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | \
(0x7F << 25))))
enum {
OPC_RISC_ADD = OPC_RISC_ARITH | (0x0 << 12) | (0x00 << 25),
OPC_RISC_SUB = OPC_RISC_ARITH | (0x0 << 12) | (0x20 << 25),
OPC_RISC_SLL = OPC_RISC_ARITH | (0x1 << 12) | (0x00 << 25),
OPC_RISC_SLT = OPC_RISC_ARITH | (0x2 << 12) | (0x00 << 25),
OPC_RISC_SLTU = OPC_RISC_ARITH | (0x3 << 12) | (0x00 << 25),
OPC_RISC_XOR = OPC_RISC_ARITH | (0x4 << 12) | (0x00 << 25),
OPC_RISC_SRL = OPC_RISC_ARITH | (0x5 << 12) | (0x00 << 25),
OPC_RISC_SRA = OPC_RISC_ARITH | (0x5 << 12) | (0x20 << 25),
OPC_RISC_OR = OPC_RISC_ARITH | (0x6 << 12) | (0x00 << 25),
OPC_RISC_AND = OPC_RISC_ARITH | (0x7 << 12) | (0x00 << 25),
/* RV64M */
OPC_RISC_MUL = OPC_RISC_ARITH | (0x0 << 12) | (0x01 << 25),
OPC_RISC_MULH = OPC_RISC_ARITH | (0x1 << 12) | (0x01 << 25),
OPC_RISC_MULHSU = OPC_RISC_ARITH | (0x2 << 12) | (0x01 << 25),
OPC_RISC_MULHU = OPC_RISC_ARITH | (0x3 << 12) | (0x01 << 25),
OPC_RISC_DIV = OPC_RISC_ARITH | (0x4 << 12) | (0x01 << 25),
OPC_RISC_DIVU = OPC_RISC_ARITH | (0x5 << 12) | (0x01 << 25),
OPC_RISC_REM = OPC_RISC_ARITH | (0x6 << 12) | (0x01 << 25),
OPC_RISC_REMU = OPC_RISC_ARITH | (0x7 << 12) | (0x01 << 25),
};
#define MASK_OP_ARITH_IMM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
enum {
OPC_RISC_ADDI = OPC_RISC_ARITH_IMM | (0x0 << 12),
OPC_RISC_SLTI = OPC_RISC_ARITH_IMM | (0x2 << 12),
OPC_RISC_SLTIU = OPC_RISC_ARITH_IMM | (0x3 << 12),
OPC_RISC_XORI = OPC_RISC_ARITH_IMM | (0x4 << 12),
OPC_RISC_ORI = OPC_RISC_ARITH_IMM | (0x6 << 12),
OPC_RISC_ANDI = OPC_RISC_ARITH_IMM | (0x7 << 12),
OPC_RISC_SLLI = OPC_RISC_ARITH_IMM | (0x1 << 12), /* additional part of
IMM */
OPC_RISC_SHIFT_RIGHT_I = OPC_RISC_ARITH_IMM | (0x5 << 12) /* SRAI, SRLI */
};
#define MASK_OP_BRANCH(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
enum {
OPC_RISC_BEQ = OPC_RISC_BRANCH | (0x0 << 12),
OPC_RISC_BNE = OPC_RISC_BRANCH | (0x1 << 12),
OPC_RISC_BLT = OPC_RISC_BRANCH | (0x4 << 12),
OPC_RISC_BGE = OPC_RISC_BRANCH | (0x5 << 12),
OPC_RISC_BLTU = OPC_RISC_BRANCH | (0x6 << 12),
OPC_RISC_BGEU = OPC_RISC_BRANCH | (0x7 << 12)
};
enum {
OPC_RISC_ADDIW = OPC_RISC_ARITH_IMM_W | (0x0 << 12),
OPC_RISC_SLLIW = OPC_RISC_ARITH_IMM_W | (0x1 << 12), /* additional part of
IMM */
OPC_RISC_SHIFT_RIGHT_IW = OPC_RISC_ARITH_IMM_W | (0x5 << 12) /* SRAI, SRLI
*/
};
enum {
OPC_RISC_ADDW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x00 << 25),
OPC_RISC_SUBW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x20 << 25),
OPC_RISC_SLLW = OPC_RISC_ARITH_W | (0x1 << 12) | (0x00 << 25),
OPC_RISC_SRLW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x00 << 25),
OPC_RISC_SRAW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x20 << 25),
/* RV64M */
OPC_RISC_MULW = OPC_RISC_ARITH_W | (0x0 << 12) | (0x01 << 25),
OPC_RISC_DIVW = OPC_RISC_ARITH_W | (0x4 << 12) | (0x01 << 25),
OPC_RISC_DIVUW = OPC_RISC_ARITH_W | (0x5 << 12) | (0x01 << 25),
OPC_RISC_REMW = OPC_RISC_ARITH_W | (0x6 << 12) | (0x01 << 25),
OPC_RISC_REMUW = OPC_RISC_ARITH_W | (0x7 << 12) | (0x01 << 25),
};
#define MASK_OP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
enum {
OPC_RISC_LB = OPC_RISC_LOAD | (0x0 << 12),
OPC_RISC_LH = OPC_RISC_LOAD | (0x1 << 12),
OPC_RISC_LW = OPC_RISC_LOAD | (0x2 << 12),
OPC_RISC_LD = OPC_RISC_LOAD | (0x3 << 12),
OPC_RISC_LBU = OPC_RISC_LOAD | (0x4 << 12),
OPC_RISC_LHU = OPC_RISC_LOAD | (0x5 << 12),
OPC_RISC_LWU = OPC_RISC_LOAD | (0x6 << 12),
};
#define MASK_OP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
enum {
OPC_RISC_SB = OPC_RISC_STORE | (0x0 << 12),
OPC_RISC_SH = OPC_RISC_STORE | (0x1 << 12),
OPC_RISC_SW = OPC_RISC_STORE | (0x2 << 12),
OPC_RISC_SD = OPC_RISC_STORE | (0x3 << 12),
};
#define MASK_OP_JALR(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
/* no enum since OPC_RISC_JALR is the actual value */
#define MASK_OP_ATOMIC(op) \
(MASK_OP_MAJOR(op) | (op & ((0x7 << 12) | (0x7F << 25))))
#define MASK_OP_ATOMIC_NO_AQ_RL_SZ(op) \
(MASK_OP_MAJOR(op) | (op & (0x1F << 27)))
enum {
OPC_RISC_LR = OPC_RISC_ATOMIC | (0x02 << 27),
OPC_RISC_SC = OPC_RISC_ATOMIC | (0x03 << 27),
OPC_RISC_AMOSWAP = OPC_RISC_ATOMIC | (0x01 << 27),
OPC_RISC_AMOADD = OPC_RISC_ATOMIC | (0x00 << 27),
OPC_RISC_AMOXOR = OPC_RISC_ATOMIC | (0x04 << 27),
OPC_RISC_AMOAND = OPC_RISC_ATOMIC | (0x0C << 27),
OPC_RISC_AMOOR = OPC_RISC_ATOMIC | (0x08 << 27),
OPC_RISC_AMOMIN = OPC_RISC_ATOMIC | (0x10 << 27),
OPC_RISC_AMOMAX = OPC_RISC_ATOMIC | (0x14 << 27),
OPC_RISC_AMOMINU = OPC_RISC_ATOMIC | (0x18 << 27),
OPC_RISC_AMOMAXU = OPC_RISC_ATOMIC | (0x1C << 27),
};
#define MASK_OP_SYSTEM(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
enum {
OPC_RISC_ECALL = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_EBREAK = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_ERET = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_MRTS = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_MRTH = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_HRTS = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_WFI = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_SFENCEVM = OPC_RISC_SYSTEM | (0x0 << 12),
OPC_RISC_CSRRW = OPC_RISC_SYSTEM | (0x1 << 12),
OPC_RISC_CSRRS = OPC_RISC_SYSTEM | (0x2 << 12),
OPC_RISC_CSRRC = OPC_RISC_SYSTEM | (0x3 << 12),
OPC_RISC_CSRRWI = OPC_RISC_SYSTEM | (0x5 << 12),
OPC_RISC_CSRRSI = OPC_RISC_SYSTEM | (0x6 << 12),
OPC_RISC_CSRRCI = OPC_RISC_SYSTEM | (0x7 << 12),
};
#define MASK_OP_FP_LOAD(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
enum {
OPC_RISC_FLW = OPC_RISC_FP_LOAD | (0x2 << 12),
OPC_RISC_FLD = OPC_RISC_FP_LOAD | (0x3 << 12),
};
#define MASK_OP_FP_STORE(op) (MASK_OP_MAJOR(op) | (op & (0x7 << 12)))
enum {
OPC_RISC_FSW = OPC_RISC_FP_STORE | (0x2 << 12),
OPC_RISC_FSD = OPC_RISC_FP_STORE | (0x3 << 12),
};
#define MASK_OP_FP_FMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
enum {
OPC_RISC_FMADD_S = OPC_RISC_FMADD | (0x0 << 25),
OPC_RISC_FMADD_D = OPC_RISC_FMADD | (0x1 << 25),
};
#define MASK_OP_FP_FMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
enum {
OPC_RISC_FMSUB_S = OPC_RISC_FMSUB | (0x0 << 25),
OPC_RISC_FMSUB_D = OPC_RISC_FMSUB | (0x1 << 25),
};
#define MASK_OP_FP_FNMADD(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
enum {
OPC_RISC_FNMADD_S = OPC_RISC_FNMADD | (0x0 << 25),
OPC_RISC_FNMADD_D = OPC_RISC_FNMADD | (0x1 << 25),
};
#define MASK_OP_FP_FNMSUB(op) (MASK_OP_MAJOR(op) | (op & (0x3 << 25)))
enum {
OPC_RISC_FNMSUB_S = OPC_RISC_FNMSUB | (0x0 << 25),
OPC_RISC_FNMSUB_D = OPC_RISC_FNMSUB | (0x1 << 25),
};
#define MASK_OP_FP_ARITH(op) (MASK_OP_MAJOR(op) | (op & (0x7F << 25)))
enum {
/* float */
OPC_RISC_FADD_S = OPC_RISC_FP_ARITH | (0x0 << 25),
OPC_RISC_FSUB_S = OPC_RISC_FP_ARITH | (0x4 << 25),
OPC_RISC_FMUL_S = OPC_RISC_FP_ARITH | (0x8 << 25),
OPC_RISC_FDIV_S = OPC_RISC_FP_ARITH | (0xC << 25),
OPC_RISC_FSGNJ_S = OPC_RISC_FP_ARITH | (0x10 << 25),
OPC_RISC_FSGNJN_S = OPC_RISC_FP_ARITH | (0x10 << 25),
OPC_RISC_FSGNJX_S = OPC_RISC_FP_ARITH | (0x10 << 25),
OPC_RISC_FMIN_S = OPC_RISC_FP_ARITH | (0x14 << 25),
OPC_RISC_FMAX_S = OPC_RISC_FP_ARITH | (0x14 << 25),
OPC_RISC_FSQRT_S = OPC_RISC_FP_ARITH | (0x2C << 25),
OPC_RISC_FEQ_S = OPC_RISC_FP_ARITH | (0x50 << 25),
OPC_RISC_FLT_S = OPC_RISC_FP_ARITH | (0x50 << 25),
OPC_RISC_FLE_S = OPC_RISC_FP_ARITH | (0x50 << 25),
OPC_RISC_FCVT_W_S = OPC_RISC_FP_ARITH | (0x60 << 25),
OPC_RISC_FCVT_WU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
OPC_RISC_FCVT_L_S = OPC_RISC_FP_ARITH | (0x60 << 25),
OPC_RISC_FCVT_LU_S = OPC_RISC_FP_ARITH | (0x60 << 25),
OPC_RISC_FCVT_S_W = OPC_RISC_FP_ARITH | (0x68 << 25),
OPC_RISC_FCVT_S_WU = OPC_RISC_FP_ARITH | (0x68 << 25),
OPC_RISC_FCVT_S_L = OPC_RISC_FP_ARITH | (0x68 << 25),
OPC_RISC_FCVT_S_LU = OPC_RISC_FP_ARITH | (0x68 << 25),
OPC_RISC_FMV_X_S = OPC_RISC_FP_ARITH | (0x70 << 25),
OPC_RISC_FCLASS_S = OPC_RISC_FP_ARITH | (0x70 << 25),
OPC_RISC_FMV_S_X = OPC_RISC_FP_ARITH | (0x78 << 25),
/* double */
OPC_RISC_FADD_D = OPC_RISC_FP_ARITH | (0x1 << 25),
OPC_RISC_FSUB_D = OPC_RISC_FP_ARITH | (0x5 << 25),
OPC_RISC_FMUL_D = OPC_RISC_FP_ARITH | (0x9 << 25),
OPC_RISC_FDIV_D = OPC_RISC_FP_ARITH | (0xD << 25),
OPC_RISC_FSGNJ_D = OPC_RISC_FP_ARITH | (0x11 << 25),
OPC_RISC_FSGNJN_D = OPC_RISC_FP_ARITH | (0x11 << 25),
OPC_RISC_FSGNJX_D = OPC_RISC_FP_ARITH | (0x11 << 25),
OPC_RISC_FMIN_D = OPC_RISC_FP_ARITH | (0x15 << 25),
OPC_RISC_FMAX_D = OPC_RISC_FP_ARITH | (0x15 << 25),
OPC_RISC_FCVT_S_D = OPC_RISC_FP_ARITH | (0x20 << 25),
OPC_RISC_FCVT_D_S = OPC_RISC_FP_ARITH | (0x21 << 25),
OPC_RISC_FSQRT_D = OPC_RISC_FP_ARITH | (0x2D << 25),
OPC_RISC_FEQ_D = OPC_RISC_FP_ARITH | (0x51 << 25),
OPC_RISC_FLT_D = OPC_RISC_FP_ARITH | (0x51 << 25),
OPC_RISC_FLE_D = OPC_RISC_FP_ARITH | (0x51 << 25),
OPC_RISC_FCVT_W_D = OPC_RISC_FP_ARITH | (0x61 << 25),
OPC_RISC_FCVT_WU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
OPC_RISC_FCVT_L_D = OPC_RISC_FP_ARITH | (0x61 << 25),
OPC_RISC_FCVT_LU_D = OPC_RISC_FP_ARITH | (0x61 << 25),
OPC_RISC_FCVT_D_W = OPC_RISC_FP_ARITH | (0x69 << 25),
OPC_RISC_FCVT_D_WU = OPC_RISC_FP_ARITH | (0x69 << 25),
OPC_RISC_FCVT_D_L = OPC_RISC_FP_ARITH | (0x69 << 25),
OPC_RISC_FCVT_D_LU = OPC_RISC_FP_ARITH | (0x69 << 25),
OPC_RISC_FMV_X_D = OPC_RISC_FP_ARITH | (0x71 << 25),
OPC_RISC_FCLASS_D = OPC_RISC_FP_ARITH | (0x71 << 25),
OPC_RISC_FMV_D_X = OPC_RISC_FP_ARITH | (0x79 << 25),
};
#define GET_B_IMM(inst) ((extract32(inst, 8, 4) << 1) \
| (extract32(inst, 25, 6) << 5) \
| (extract32(inst, 7, 1) << 11) \
| (sextract64(inst, 31, 1) << 12))
#define GET_STORE_IMM(inst) ((extract32(inst, 7, 5)) \
| (sextract64(inst, 25, 7) << 5))
#define GET_JAL_IMM(inst) ((extract32(inst, 21, 10) << 1) \
| (extract32(inst, 20, 1) << 11) \
| (extract32(inst, 12, 8) << 12) \
| (sextract64(inst, 31, 1) << 20))
#define GET_RM(inst) extract32(inst, 12, 3)
#define GET_RS3(inst) extract32(inst, 27, 5)
#define GET_RS1(inst) extract32(inst, 15, 5)
#define GET_RS2(inst) extract32(inst, 20, 5)
#define GET_RD(inst) extract32(inst, 7, 5)
#define GET_IMM(inst) sextract64(inst, 20, 12)
/* RVC decoding macros */
#define GET_C_IMM(inst) (extract32(inst, 2, 5) \
| (sextract64(inst, 12, 1) << 5))
#define GET_C_ZIMM(inst) (extract32(inst, 2, 5) \
| (extract32(inst, 12, 1) << 5))
#define GET_C_ADDI4SPN_IMM(inst) ((extract32(inst, 6, 1) << 2) \
| (extract32(inst, 5, 1) << 3) \
| (extract32(inst, 11, 2) << 4) \
| (extract32(inst, 7, 4) << 6))
#define GET_C_ADDI16SP_IMM(inst) ((extract32(inst, 6, 1) << 4) \
| (extract32(inst, 2, 1) << 5) \
| (extract32(inst, 5, 1) << 6) \
| (extract32(inst, 3, 2) << 7) \
| (sextract64(inst, 12, 1) << 9))
#define GET_C_LWSP_IMM(inst) ((extract32(inst, 4, 3) << 2) \
| (extract32(inst, 12, 1) << 5) \
| (extract32(inst, 2, 2) << 6))
#define GET_C_LDSP_IMM(inst) ((extract32(inst, 5, 2) << 3) \
| (extract32(inst, 12, 1) << 5) \
| (extract32(inst, 2, 3) << 6))
#define GET_C_SWSP_IMM(inst) ((extract32(inst, 9, 4) << 2) \
| (extract32(inst, 7, 2) << 6))
#define GET_C_SDSP_IMM(inst) ((extract32(inst, 10, 3) << 3) \
| (extract32(inst, 7, 3) << 6))
#define GET_C_LW_IMM(inst) ((extract32(inst, 6, 1) << 2) \
| (extract32(inst, 10, 3) << 3) \
| (extract32(inst, 5, 1) << 6))
#define GET_C_LD_IMM(inst) ((extract32(inst, 10, 3) << 3) \
| (extract32(inst, 5, 2) << 6))
#define GET_C_J_IMM(inst) ((extract32(inst, 3, 3) << 1) \
| (extract32(inst, 11, 1) << 4) \
| (extract32(inst, 2, 1) << 5) \
| (extract32(inst, 7, 1) << 6) \
| (extract32(inst, 6, 1) << 7) \
| (extract32(inst, 9, 2) << 8) \
| (extract32(inst, 8, 1) << 10) \
| (sextract64(inst, 12, 1) << 11))
#define GET_C_B_IMM(inst) ((extract32(inst, 3, 2) << 1) \
| (extract32(inst, 10, 2) << 3) \
| (extract32(inst, 2, 1) << 5) \
| (extract32(inst, 5, 2) << 6) \
| (sextract64(inst, 12, 1) << 8))
#define GET_C_SIMM3(inst) extract32(inst, 10, 3)
#define GET_C_RD(inst) GET_RD(inst)
#define GET_C_RS1(inst) GET_RD(inst)
#define GET_C_RS2(inst) extract32(inst, 2, 5)
#define GET_C_RS1S(inst) (8 + extract32(inst, 7, 3))
#define GET_C_RS2S(inst) (8 + extract32(inst, 2, 3))

View file

@ -0,0 +1,782 @@
/*
* RISC-V Emulation Helpers for QEMU.
*
* Copyright (c) 2016-2017 Sagar Karandikar, sagark@eecs.berkeley.edu
* Copyright (c) 2017-2018 SiFive, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "cpu.h"
#include "exec/exec-all.h"
#include "exec/helper-proto.h"
#ifndef CONFIG_USER_ONLY
#if defined(TARGET_RISCV32)
static const char valid_vm_1_09[16] = {
[VM_1_09_MBARE] = 1,
[VM_1_09_SV32] = 1,
};
static const char valid_vm_1_10[16] = {
[VM_1_10_MBARE] = 1,
[VM_1_10_SV32] = 1
};
#elif defined(TARGET_RISCV64)
static const char valid_vm_1_09[16] = {
[VM_1_09_MBARE] = 1,
[VM_1_09_SV39] = 1,
[VM_1_09_SV48] = 1,
};
static const char valid_vm_1_10[16] = {
[VM_1_10_MBARE] = 1,
[VM_1_10_SV39] = 1,
[VM_1_10_SV48] = 1,
[VM_1_10_SV57] = 1
};
#endif
static int validate_vm(CPURISCVState *env, target_ulong vm)
{
return (env->priv_ver >= PRIV_VERSION_1_10_0) ?
valid_vm_1_10[vm & 0xf] : valid_vm_1_09[vm & 0xf];
}
#endif
/* Exceptions processing helpers */
void QEMU_NORETURN do_raise_exception_err(CPURISCVState *env,
uint32_t exception, uintptr_t pc)
{
CPUState *cs = CPU(riscv_env_get_cpu(env));
qemu_log_mask(CPU_LOG_INT, "%s: %d\n", __func__, exception);
cs->exception_index = exception;
cpu_loop_exit_restore(cs, pc);
}
void helper_raise_exception(CPURISCVState *env, uint32_t exception)
{
do_raise_exception_err(env, exception, 0);
}
static void validate_mstatus_fs(CPURISCVState *env, uintptr_t ra)
{
#ifndef CONFIG_USER_ONLY
if (!(env->mstatus & MSTATUS_FS)) {
do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra);
}
#endif
}
/*
* Handle writes to CSRs and any resulting special behavior
*
* Adapted from Spike's processor_t::set_csr
*/
void csr_write_helper(CPURISCVState *env, target_ulong val_to_write,
target_ulong csrno)
{
#ifndef CONFIG_USER_ONLY
uint64_t delegable_ints = MIP_SSIP | MIP_STIP | MIP_SEIP | (1 << IRQ_X_COP);
uint64_t all_ints = delegable_ints | MIP_MSIP | MIP_MTIP;
#endif
switch (csrno) {
case CSR_FFLAGS:
validate_mstatus_fs(env, GETPC());
cpu_riscv_set_fflags(env, val_to_write & (FSR_AEXC >> FSR_AEXC_SHIFT));
break;
case CSR_FRM:
validate_mstatus_fs(env, GETPC());
env->frm = val_to_write & (FSR_RD >> FSR_RD_SHIFT);
break;
case CSR_FCSR:
validate_mstatus_fs(env, GETPC());
env->frm = (val_to_write & FSR_RD) >> FSR_RD_SHIFT;
cpu_riscv_set_fflags(env, (val_to_write & FSR_AEXC) >> FSR_AEXC_SHIFT);
break;
#ifndef CONFIG_USER_ONLY
case CSR_MSTATUS: {
target_ulong mstatus = env->mstatus;
target_ulong mask = 0;
target_ulong mpp = get_field(val_to_write, MSTATUS_MPP);
/* flush tlb on mstatus fields that affect VM */
if (env->priv_ver <= PRIV_VERSION_1_09_1) {
if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP |
MSTATUS_MPRV | MSTATUS_SUM | MSTATUS_VM)) {
helper_riscv_tlb_flush(env);
}
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
MSTATUS_MPP | MSTATUS_MXR |
(validate_vm(env, get_field(val_to_write, MSTATUS_VM)) ?
MSTATUS_VM : 0);
}
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
if ((val_to_write ^ mstatus) & (MSTATUS_MXR | MSTATUS_MPP |
MSTATUS_MPRV | MSTATUS_SUM)) {
helper_riscv_tlb_flush(env);
}
mask = MSTATUS_SIE | MSTATUS_SPIE | MSTATUS_MIE | MSTATUS_MPIE |
MSTATUS_SPP | MSTATUS_FS | MSTATUS_MPRV | MSTATUS_SUM |
MSTATUS_MPP | MSTATUS_MXR;
}
/* silenty discard mstatus.mpp writes for unsupported modes */
if (mpp == PRV_H ||
(!riscv_has_ext(env, RVS) && mpp == PRV_S) ||
(!riscv_has_ext(env, RVU) && mpp == PRV_U)) {
mask &= ~MSTATUS_MPP;
}
mstatus = (mstatus & ~mask) | (val_to_write & mask);
/* Note: this is a workaround for an issue where mstatus.FS
does not report dirty after floating point operations
that modify floating point state. This workaround is
technically compliant with the RISC-V Privileged
specification as it is legal to return only off, or dirty.
at the expense of extra floating point save/restore. */
/* FP is always dirty or off */
if (mstatus & MSTATUS_FS) {
mstatus |= MSTATUS_FS;
}
int dirty = ((mstatus & MSTATUS_FS) == MSTATUS_FS) |
((mstatus & MSTATUS_XS) == MSTATUS_XS);
mstatus = set_field(mstatus, MSTATUS_SD, dirty);
env->mstatus = mstatus;
break;
}
case CSR_MIP: {
/*
* Since the writeable bits in MIP are not set asynchrously by the
* CLINT, no additional locking is needed for read-modifiy-write
* CSR operations
*/
// Unicorn: commented out
//qemu_mutex_lock_iothread();
RISCVCPU *cpu = riscv_env_get_cpu(env);
riscv_set_local_interrupt(cpu, MIP_SSIP,
(val_to_write & MIP_SSIP) != 0);
riscv_set_local_interrupt(cpu, MIP_STIP,
(val_to_write & MIP_STIP) != 0);
/*
* csrs, csrc on mip.SEIP is not decomposable into separate read and
* write steps, so a different implementation is needed
*/
// Unicorn: commented out
//qemu_mutex_unlock_iothread();
break;
}
case CSR_MIE: {
env->mie = (env->mie & ~all_ints) |
(val_to_write & all_ints);
break;
}
case CSR_MIDELEG:
env->mideleg = (env->mideleg & ~delegable_ints)
| (val_to_write & delegable_ints);
break;
case CSR_MEDELEG: {
target_ulong mask = 0;
mask |= 1ULL << (RISCV_EXCP_INST_ADDR_MIS);
mask |= 1ULL << (RISCV_EXCP_INST_ACCESS_FAULT);
mask |= 1ULL << (RISCV_EXCP_ILLEGAL_INST);
mask |= 1ULL << (RISCV_EXCP_BREAKPOINT);
mask |= 1ULL << (RISCV_EXCP_LOAD_ADDR_MIS);
mask |= 1ULL << (RISCV_EXCP_LOAD_ACCESS_FAULT);
mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ADDR_MIS);
mask |= 1ULL << (RISCV_EXCP_STORE_AMO_ACCESS_FAULT);
mask |= 1ULL << (RISCV_EXCP_U_ECALL);
mask |= 1ULL << (RISCV_EXCP_S_ECALL);
mask |= 1ULL << (RISCV_EXCP_H_ECALL);
mask |= 1ULL << (RISCV_EXCP_M_ECALL);
mask |= 1ULL << (RISCV_EXCP_INST_PAGE_FAULT);
mask |= 1ULL << (RISCV_EXCP_LOAD_PAGE_FAULT);
mask |= 1ULL << (RISCV_EXCP_STORE_PAGE_FAULT);
env->medeleg = (env->medeleg & ~mask)
| (val_to_write & mask);
break;
}
case CSR_MINSTRET:
/* minstret is WARL so unsupported writes are ignored */
break;
case CSR_MCYCLE:
/* mcycle is WARL so unsupported writes are ignored */
break;
#if defined(TARGET_RISCV32)
case CSR_MINSTRETH:
/* minstreth is WARL so unsupported writes are ignored */
break;
case CSR_MCYCLEH:
/* mcycleh is WARL so unsupported writes are ignored */
break;
#endif
case CSR_MUCOUNTEREN:
if (env->priv_ver <= PRIV_VERSION_1_09_1) {
env->scounteren = val_to_write;
break;
} else {
goto do_illegal;
}
case CSR_MSCOUNTEREN:
if (env->priv_ver <= PRIV_VERSION_1_09_1) {
env->mcounteren = val_to_write;
break;
} else {
goto do_illegal;
}
case CSR_SSTATUS: {
target_ulong ms = env->mstatus;
target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE
| SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS
| SSTATUS_SUM | SSTATUS_SD;
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
mask |= SSTATUS_MXR;
}
ms = (ms & ~mask) | (val_to_write & mask);
csr_write_helper(env, ms, CSR_MSTATUS);
break;
}
case CSR_SIP: {
// Unicorn: commented out
//qemu_mutex_lock_iothread();
target_ulong next_mip = (env->mip & ~env->mideleg)
| (val_to_write & env->mideleg);
// Unicorn: commented out
//qemu_mutex_unlock_iothread();
csr_write_helper(env, next_mip, CSR_MIP);
break;
}
case CSR_SIE: {
target_ulong next_mie = (env->mie & ~env->mideleg)
| (val_to_write & env->mideleg);
csr_write_helper(env, next_mie, CSR_MIE);
break;
}
case CSR_SATP: /* CSR_SPTBR */ {
if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
break;
}
if (env->priv_ver <= PRIV_VERSION_1_09_1 && (val_to_write ^ env->sptbr))
{
helper_riscv_tlb_flush(env);
env->sptbr = val_to_write & (((target_ulong)
1 << (TARGET_PHYS_ADDR_SPACE_BITS - PGSHIFT)) - 1);
}
if (env->priv_ver >= PRIV_VERSION_1_10_0 &&
validate_vm(env, get_field(val_to_write, SATP_MODE)) &&
((val_to_write ^ env->satp) & (SATP_MODE | SATP_ASID | SATP_PPN)))
{
helper_riscv_tlb_flush(env);
env->satp = val_to_write;
}
break;
}
case CSR_SEPC:
env->sepc = val_to_write;
break;
case CSR_STVEC:
/* bits [1:0] encode mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val_to_write & 3) == 0) {
env->stvec = val_to_write >> 2 << 2;
} else {
qemu_log_mask(LOG_UNIMP,
"CSR_STVEC: vectored traps not supported\n");
}
break;
case CSR_SCOUNTEREN:
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
env->scounteren = val_to_write;
break;
} else {
goto do_illegal;
}
case CSR_SSCRATCH:
env->sscratch = val_to_write;
break;
case CSR_SCAUSE:
env->scause = val_to_write;
break;
case CSR_SBADADDR:
env->sbadaddr = val_to_write;
break;
case CSR_MEPC:
env->mepc = val_to_write;
break;
case CSR_MTVEC:
/* bits [1:0] indicate mode; 0 = direct, 1 = vectored, 2 >= reserved */
if ((val_to_write & 3) == 0) {
env->mtvec = val_to_write >> 2 << 2;
} else {
qemu_log_mask(LOG_UNIMP,
"CSR_MTVEC: vectored traps not supported\n");
}
break;
case CSR_MCOUNTEREN:
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
env->mcounteren = val_to_write;
break;
} else {
goto do_illegal;
}
case CSR_MSCRATCH:
env->mscratch = val_to_write;
break;
case CSR_MCAUSE:
env->mcause = val_to_write;
break;
case CSR_MBADADDR:
env->mbadaddr = val_to_write;
break;
case CSR_MISA:
/* misa is WARL so unsupported writes are ignored */
break;
case CSR_PMPCFG0:
case CSR_PMPCFG1:
case CSR_PMPCFG2:
case CSR_PMPCFG3:
pmpcfg_csr_write(env, csrno - CSR_PMPCFG0, val_to_write);
break;
case CSR_PMPADDR0:
case CSR_PMPADDR1:
case CSR_PMPADDR2:
case CSR_PMPADDR3:
case CSR_PMPADDR4:
case CSR_PMPADDR5:
case CSR_PMPADDR6:
case CSR_PMPADDR7:
case CSR_PMPADDR8:
case CSR_PMPADDR9:
case CSR_PMPADDR10:
case CSR_PMPADDR11:
case CSR_PMPADDR12:
case CSR_PMPADDR13:
case CSR_PMPADDR14:
case CSR_PMPADDR15:
pmpaddr_csr_write(env, csrno - CSR_PMPADDR0, val_to_write);
break;
#endif
#if !defined(CONFIG_USER_ONLY)
do_illegal:
#endif
default:
do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
}
/*
* Handle reads to CSRs and any resulting special behavior
*
* Adapted from Spike's processor_t::get_csr
*/
target_ulong csr_read_helper(CPURISCVState *env, target_ulong csrno)
{
#ifndef CONFIG_USER_ONLY
target_ulong ctr_en = env->priv == PRV_U ? env->scounteren :
env->priv == PRV_S ? env->mcounteren : -1U;
#else
target_ulong ctr_en = -1;
#endif
target_ulong ctr_ok = (ctr_en >> (csrno & 31)) & 1;
if (csrno >= CSR_HPMCOUNTER3 && csrno <= CSR_HPMCOUNTER31) {
if (ctr_ok) {
return 0;
}
}
#if defined(TARGET_RISCV32)
if (csrno >= CSR_HPMCOUNTER3H && csrno <= CSR_HPMCOUNTER31H) {
if (ctr_ok) {
return 0;
}
}
#endif
if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) {
return 0;
}
#if defined(TARGET_RISCV32)
if (csrno >= CSR_MHPMCOUNTER3 && csrno <= CSR_MHPMCOUNTER31) {
return 0;
}
#endif
if (csrno >= CSR_MHPMEVENT3 && csrno <= CSR_MHPMEVENT31) {
return 0;
}
switch (csrno) {
case CSR_FFLAGS:
validate_mstatus_fs(env, GETPC());
return cpu_riscv_get_fflags(env);
case CSR_FRM:
validate_mstatus_fs(env, GETPC());
return env->frm;
case CSR_FCSR:
validate_mstatus_fs(env, GETPC());
return (cpu_riscv_get_fflags(env) << FSR_AEXC_SHIFT)
| (env->frm << FSR_RD_SHIFT);
/* rdtime/rdtimeh is trapped and emulated by bbl in system mode */
#ifdef CONFIG_USER_ONLY
case CSR_TIME:
// Unicorn: Commented out
//return cpu_get_host_ticks();
// Unicorn: Default value:
return 0;
#if defined(TARGET_RISCV32)
case CSR_TIMEH:
// Unicorn: Commented out
//return cpu_get_host_ticks() >> 32;
// Unicorn: Default value
return 0;
#endif
#endif
case CSR_INSTRET:
case CSR_CYCLE:
if (ctr_ok) {
#if !defined(CONFIG_USER_ONLY)
// Unicorn: commented out
/*if (use_icount) {
return cpu_get_icount();
} else {
return cpu_get_host_ticks();
}*/
// Unicorn: Default value:
return 0;
#else
// Unicorn: Default value
//return cpu_get_host_ticks();
return 0;
#endif
}
break;
#if defined(TARGET_RISCV32)
case CSR_INSTRETH:
case CSR_CYCLEH:
if (ctr_ok) {
#if !defined(CONFIG_USER_ONLY)
// Unicorn commented out
/*if (use_icount) {
return cpu_get_icount() >> 32;
} else {
return cpu_get_host_ticks() >> 32;
}
*/
// Unicorn: default value
return 0;
#else
// Unicorn: default value
// return cpu_get_host_ticks() >> 32;
return 0;
#endif
}
break;
#endif
#ifndef CONFIG_USER_ONLY
case CSR_MINSTRET:
case CSR_MCYCLE:
// Unicorn: Commented out
/*if (use_icount) {
return cpu_get_icount();
} else {
return cpu_get_host_ticks();
}
*/
// Unicorn: default value
return 0;
case CSR_MINSTRETH:
case CSR_MCYCLEH:
#if defined(TARGET_RISCV32)
// Unicorn commented out
/*if (use_icount) {
return cpu_get_icount() >> 32;
} else {
return cpu_get_host_ticks() >> 32;
}*/
// Unicorn: default value
return 0;
#endif
break;
case CSR_MUCOUNTEREN:
if (env->priv_ver <= PRIV_VERSION_1_09_1) {
return env->scounteren;
} else {
break; /* illegal instruction */
}
case CSR_MSCOUNTEREN:
if (env->priv_ver <= PRIV_VERSION_1_09_1) {
return env->mcounteren;
} else {
break; /* illegal instruction */
}
case CSR_SSTATUS: {
target_ulong mask = SSTATUS_SIE | SSTATUS_SPIE | SSTATUS_UIE
| SSTATUS_UPIE | SSTATUS_SPP | SSTATUS_FS | SSTATUS_XS
| SSTATUS_SUM | SSTATUS_SD;
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
mask |= SSTATUS_MXR;
}
return env->mstatus & mask;
}
case CSR_SIP: {
// Unicorn: commented out
//qemu_mutex_lock_iothread();
target_ulong tmp = env->mip & env->mideleg;
// Unicorn: commented out
//qemu_mutex_unlock_iothread();
return tmp;
}
case CSR_SIE:
return env->mie & env->mideleg;
case CSR_SEPC:
return env->sepc;
case CSR_SBADADDR:
return env->sbadaddr;
case CSR_STVEC:
return env->stvec;
case CSR_SCOUNTEREN:
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
return env->scounteren;
} else {
break; /* illegal instruction */
}
case CSR_SCAUSE:
return env->scause;
case CSR_SATP: /* CSR_SPTBR */
if (!riscv_feature(env, RISCV_FEATURE_MMU)) {
return 0;
}
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
return env->satp;
} else {
return env->sptbr;
}
case CSR_SSCRATCH:
return env->sscratch;
case CSR_MSTATUS:
return env->mstatus;
case CSR_MIP: {
// Unicorn: commented out
//qemu_mutex_lock_iothread();
target_ulong tmp = env->mip;
// Unicorn: commented out
//qemu_mutex_unlock_iothread();
return tmp;
}
case CSR_MIE:
return env->mie;
case CSR_MEPC:
return env->mepc;
case CSR_MSCRATCH:
return env->mscratch;
case CSR_MCAUSE:
return env->mcause;
case CSR_MBADADDR:
return env->mbadaddr;
case CSR_MISA:
return env->misa;
case CSR_MARCHID:
return 0; /* as spike does */
case CSR_MIMPID:
return 0; /* as spike does */
case CSR_MVENDORID:
return 0; /* as spike does */
case CSR_MHARTID:
return env->mhartid;
case CSR_MTVEC:
return env->mtvec;
case CSR_MCOUNTEREN:
if (env->priv_ver >= PRIV_VERSION_1_10_0) {
return env->mcounteren;
} else {
break; /* illegal instruction */
}
case CSR_MEDELEG:
return env->medeleg;
case CSR_MIDELEG:
return env->mideleg;
case CSR_PMPCFG0:
case CSR_PMPCFG1:
case CSR_PMPCFG2:
case CSR_PMPCFG3:
return pmpcfg_csr_read(env, csrno - CSR_PMPCFG0);
case CSR_PMPADDR0:
case CSR_PMPADDR1:
case CSR_PMPADDR2:
case CSR_PMPADDR3:
case CSR_PMPADDR4:
case CSR_PMPADDR5:
case CSR_PMPADDR6:
case CSR_PMPADDR7:
case CSR_PMPADDR8:
case CSR_PMPADDR9:
case CSR_PMPADDR10:
case CSR_PMPADDR11:
case CSR_PMPADDR12:
case CSR_PMPADDR13:
case CSR_PMPADDR14:
case CSR_PMPADDR15:
return pmpaddr_csr_read(env, csrno - CSR_PMPADDR0);
#endif
}
/* used by e.g. MTIME read */
do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
/*
* Check that CSR access is allowed.
*
* Adapted from Spike's decode.h:validate_csr
*/
static void validate_csr(CPURISCVState *env, uint64_t which,
uint64_t write, uintptr_t ra)
{
#ifndef CONFIG_USER_ONLY
unsigned csr_priv = get_field((which), 0x300);
unsigned csr_read_only = get_field((which), 0xC00) == 3;
if (((write) && csr_read_only) || (env->priv < csr_priv)) {
do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, ra);
}
#endif
}
target_ulong helper_csrrw(CPURISCVState *env, target_ulong src,
target_ulong csr)
{
validate_csr(env, csr, 1, GETPC());
uint64_t csr_backup = csr_read_helper(env, csr);
csr_write_helper(env, src, csr);
return csr_backup;
}
target_ulong helper_csrrs(CPURISCVState *env, target_ulong src,
target_ulong csr, target_ulong rs1_pass)
{
validate_csr(env, csr, rs1_pass != 0, GETPC());
uint64_t csr_backup = csr_read_helper(env, csr);
if (rs1_pass != 0) {
csr_write_helper(env, src | csr_backup, csr);
}
return csr_backup;
}
target_ulong helper_csrrc(CPURISCVState *env, target_ulong src,
target_ulong csr, target_ulong rs1_pass)
{
validate_csr(env, csr, rs1_pass != 0, GETPC());
uint64_t csr_backup = csr_read_helper(env, csr);
if (rs1_pass != 0) {
csr_write_helper(env, (~src) & csr_backup, csr);
}
return csr_backup;
}
#ifndef CONFIG_USER_ONLY
/* iothread_mutex must be held */
void riscv_set_local_interrupt(RISCVCPU *cpu, target_ulong mask, int value)
{
target_ulong old_mip = cpu->env.mip;
cpu->env.mip = (old_mip & ~mask) | (value ? mask : 0);
if (cpu->env.mip && !old_mip) {
cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
} else if (!cpu->env.mip && old_mip) {
cpu_reset_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
}
}
void riscv_set_mode(CPURISCVState *env, target_ulong newpriv)
{
if (newpriv > PRV_M) {
g_assert_not_reached();
}
if (newpriv == PRV_H) {
newpriv = PRV_U;
}
/* tlb_flush is unnecessary as mode is contained in mmu_idx */
env->priv = newpriv;
}
target_ulong helper_sret(CPURISCVState *env, target_ulong cpu_pc_deb)
{
if (!(env->priv >= PRV_S)) {
do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong retpc = env->sepc;
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
target_ulong mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_SPP);
mstatus = set_field(mstatus,
env->priv_ver >= PRIV_VERSION_1_10_0 ?
MSTATUS_SIE : MSTATUS_UIE << prev_priv,
get_field(mstatus, MSTATUS_SPIE));
mstatus = set_field(mstatus, MSTATUS_SPIE, 0);
mstatus = set_field(mstatus, MSTATUS_SPP, PRV_U);
riscv_set_mode(env, prev_priv);
csr_write_helper(env, mstatus, CSR_MSTATUS);
return retpc;
}
target_ulong helper_mret(CPURISCVState *env, target_ulong cpu_pc_deb)
{
if (!(env->priv >= PRV_M)) {
do_raise_exception_err(env, RISCV_EXCP_ILLEGAL_INST, GETPC());
}
target_ulong retpc = env->mepc;
if (!riscv_has_ext(env, RVC) && (retpc & 0x3)) {
do_raise_exception_err(env, RISCV_EXCP_INST_ADDR_MIS, GETPC());
}
target_ulong mstatus = env->mstatus;
target_ulong prev_priv = get_field(mstatus, MSTATUS_MPP);
mstatus = set_field(mstatus,
env->priv_ver >= PRIV_VERSION_1_10_0 ?
MSTATUS_MIE : MSTATUS_UIE << prev_priv,
get_field(mstatus, MSTATUS_MPIE));
mstatus = set_field(mstatus, MSTATUS_MPIE, 0);
mstatus = set_field(mstatus, MSTATUS_MPP, PRV_U);
riscv_set_mode(env, prev_priv);
csr_write_helper(env, mstatus, CSR_MSTATUS);
return retpc;
}
void helper_wfi(CPURISCVState *env)
{
CPUState *cs = CPU(riscv_env_get_cpu(env));
cs->halted = 1;
cs->exception_index = EXCP_HLT;
cpu_loop_exit(cs);
}
void helper_riscv_tlb_flush(CPURISCVState *env)
{
RISCVCPU *cpu = riscv_env_get_cpu(env);
CPUState *cs = CPU(cpu);
tlb_flush(cs);
}
#endif /* !CONFIG_USER_ONLY */

380
qemu/target/riscv/pmp.c Normal file
View file

@ -0,0 +1,380 @@
/*
* QEMU RISC-V PMP (Physical Memory Protection)
*
* Author: Daire McNamara, daire.mcnamara@emdalo.com
* Ivan Griffin, ivan.griffin@emdalo.com
*
* This provides a RISC-V Physical Memory Protection implementation
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* PMP (Physical Memory Protection) is as-of-yet unused and needs testing.
*/
#include "qemu/osdep.h"
#include "qemu/log.h"
#include "qapi/error.h"
#include "cpu.h"
#include "qemu-common.h"
#ifndef CONFIG_USER_ONLY
#define RISCV_DEBUG_PMP 0
#define PMP_DEBUG(fmt, ...) \
do { \
if (RISCV_DEBUG_PMP) { \
qemu_log_mask(LOG_TRACE, "%s: " fmt "\n", __func__, ##__VA_ARGS__);\
} \
} while (0)
static void pmp_write_cfg(CPURISCVState *env, uint32_t addr_index,
uint8_t val);
static uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t addr_index);
static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index);
/*
* Accessor method to extract address matching type 'a field' from cfg reg
*/
static inline uint8_t pmp_get_a_field(uint8_t cfg)
{
uint8_t a = cfg >> 3;
return a & 0x3;
}
/*
* Check whether a PMP is locked or not.
*/
static inline int pmp_is_locked(CPURISCVState *env, uint32_t pmp_index)
{
if (env->pmp_state.pmp[pmp_index].cfg_reg & PMP_LOCK) {
return 1;
}
/* Top PMP has no 'next' to check */
if ((pmp_index + 1u) >= MAX_RISCV_PMPS) {
return 0;
}
/* In TOR mode, need to check the lock bit of the next pmp
* (if there is a next)
*/
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[pmp_index + 1].cfg_reg);
if ((env->pmp_state.pmp[pmp_index + 1u].cfg_reg & PMP_LOCK) &&
(PMP_AMATCH_TOR == a_field)) {
return 1;
}
return 0;
}
/*
* Count the number of active rules.
*/
static inline uint32_t pmp_get_num_rules(CPURISCVState *env)
{
return env->pmp_state.num_rules;
}
/*
* Accessor to get the cfg reg for a specific PMP/HART
*/
static inline uint8_t pmp_read_cfg(CPURISCVState *env, uint32_t pmp_index)
{
if (pmp_index < MAX_RISCV_PMPS) {
return env->pmp_state.pmp[pmp_index].cfg_reg;
}
return 0;
}
/*
* Accessor to set the cfg reg for a specific PMP/HART
* Bounds checks and relevant lock bit.
*/
static void pmp_write_cfg(CPURISCVState *env, uint32_t pmp_index, uint8_t val)
{
if (pmp_index < MAX_RISCV_PMPS) {
if (!pmp_is_locked(env, pmp_index)) {
env->pmp_state.pmp[pmp_index].cfg_reg = val;
pmp_update_rule(env, pmp_index);
} else {
PMP_DEBUG("ignoring write - locked");
}
} else {
PMP_DEBUG("ignoring write - out of bounds");
}
}
static void pmp_decode_napot(target_ulong a, target_ulong *sa, target_ulong *ea)
{
/*
aaaa...aaa0 8-byte NAPOT range
aaaa...aa01 16-byte NAPOT range
aaaa...a011 32-byte NAPOT range
...
aa01...1111 2^XLEN-byte NAPOT range
a011...1111 2^(XLEN+1)-byte NAPOT range
0111...1111 2^(XLEN+2)-byte NAPOT range
1111...1111 Reserved
*/
if (a == -1) {
*sa = 0u;
*ea = -1;
return;
} else {
target_ulong t1 = ctz64(~a);
target_ulong base = (a & ~(((target_ulong)1 << t1) - 1)) << 3;
target_ulong range = ((target_ulong)1 << (t1 + 3)) - 1;
*sa = base;
*ea = base + range;
}
}
/* Convert cfg/addr reg values here into simple 'sa' --> start address and 'ea'
* end address values.
* This function is called relatively infrequently whereas the check that
* an address is within a pmp rule is called often, so optimise that one
*/
static void pmp_update_rule(CPURISCVState *env, uint32_t pmp_index)
{
int i;
env->pmp_state.num_rules = 0;
uint8_t this_cfg = env->pmp_state.pmp[pmp_index].cfg_reg;
target_ulong this_addr = env->pmp_state.pmp[pmp_index].addr_reg;
target_ulong prev_addr = 0u;
target_ulong sa = 0u;
target_ulong ea = 0u;
if (pmp_index >= 1u) {
prev_addr = env->pmp_state.pmp[pmp_index - 1].addr_reg;
}
switch (pmp_get_a_field(this_cfg)) {
case PMP_AMATCH_OFF:
sa = 0u;
ea = -1;
break;
case PMP_AMATCH_TOR:
sa = prev_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
ea = (this_addr << 2) - 1u;
break;
case PMP_AMATCH_NA4:
sa = this_addr << 2; /* shift up from [xx:0] to [xx+2:2] */
ea = (this_addr + 4u) - 1u;
break;
case PMP_AMATCH_NAPOT:
pmp_decode_napot(this_addr, &sa, &ea);
break;
default:
sa = 0u;
ea = 0u;
break;
}
env->pmp_state.addr[pmp_index].sa = sa;
env->pmp_state.addr[pmp_index].ea = ea;
for (i = 0; i < MAX_RISCV_PMPS; i++) {
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
if (PMP_AMATCH_OFF != a_field) {
env->pmp_state.num_rules++;
}
}
}
static int pmp_is_in_range(CPURISCVState *env, int pmp_index, target_ulong addr)
{
int result = 0;
if ((addr >= env->pmp_state.addr[pmp_index].sa)
&& (addr <= env->pmp_state.addr[pmp_index].ea)) {
result = 1;
} else {
result = 0;
}
return result;
}
/*
* Public Interface
*/
/*
* Check if the address has required RWX privs to complete desired operation
*/
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
target_ulong size, pmp_priv_t privs)
{
int i = 0;
int ret = -1;
target_ulong s = 0;
target_ulong e = 0;
pmp_priv_t allowed_privs = 0;
/* Short cut if no rules */
if (0 == pmp_get_num_rules(env)) {
return true;
}
/* 1.10 draft priv spec states there is an implicit order
from low to high */
for (i = 0; i < MAX_RISCV_PMPS; i++) {
s = pmp_is_in_range(env, i, addr);
e = pmp_is_in_range(env, i, addr + size);
/* partially inside */
if ((s + e) == 1) {
PMP_DEBUG("pmp violation - access is partially inside");
ret = 0;
break;
}
/* fully inside */
const uint8_t a_field =
pmp_get_a_field(env->pmp_state.pmp[i].cfg_reg);
if ((s + e) == 2) {
if (PMP_AMATCH_OFF == a_field) {
return 1;
}
allowed_privs = PMP_READ | PMP_WRITE | PMP_EXEC;
if ((env->priv != PRV_M) || pmp_is_locked(env, i)) {
allowed_privs &= env->pmp_state.pmp[i].cfg_reg;
}
if ((privs & allowed_privs) == privs) {
ret = 1;
break;
} else {
ret = 0;
break;
}
}
}
/* No rule matched */
if (ret == -1) {
if (env->priv == PRV_M) {
ret = 1; /* Privileged spec v1.10 states if no PMP entry matches an
* M-Mode access, the access succeeds */
} else {
ret = 0; /* Other modes are not allowed to succeed if they don't
* match a rule, but there are rules. We've checked for
* no rule earlier in this function. */
}
}
return ret == 1 ? true : false;
}
/*
* Handle a write to a pmpcfg CSP
*/
void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
target_ulong val)
{
int i;
uint8_t cfg_val;
PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx,
env->mhartid, reg_index, val);
if ((reg_index & 1) && (sizeof(target_ulong) == 8)) {
PMP_DEBUG("ignoring write - incorrect address");
return;
}
for (i = 0; i < sizeof(target_ulong); i++) {
cfg_val = (val >> 8 * i) & 0xff;
pmp_write_cfg(env, (reg_index * sizeof(target_ulong)) + i,
cfg_val);
}
}
/*
* Handle a read from a pmpcfg CSP
*/
target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index)
{
int i;
target_ulong cfg_val = 0;
uint8_t val = 0;
for (i = 0; i < sizeof(target_ulong); i++) {
val = pmp_read_cfg(env, (reg_index * sizeof(target_ulong)) + i);
cfg_val |= (val << (i * 8));
}
PMP_DEBUG("hart " TARGET_FMT_ld ": reg%d, val: 0x" TARGET_FMT_lx,
env->mhartid, reg_index, cfg_val);
return cfg_val;
}
/*
* Handle a write to a pmpaddr CSP
*/
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong val)
{
PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx,
env->mhartid, addr_index, val);
if (addr_index < MAX_RISCV_PMPS) {
if (!pmp_is_locked(env, addr_index)) {
env->pmp_state.pmp[addr_index].addr_reg = val;
pmp_update_rule(env, addr_index);
} else {
PMP_DEBUG("ignoring write - locked");
}
} else {
PMP_DEBUG("ignoring write - out of bounds");
}
}
/*
* Handle a read from a pmpaddr CSP
*/
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index)
{
PMP_DEBUG("hart " TARGET_FMT_ld ": addr%d, val: 0x" TARGET_FMT_lx,
env->mhartid, addr_index,
env->pmp_state.pmp[addr_index].addr_reg);
if (addr_index < MAX_RISCV_PMPS) {
return env->pmp_state.pmp[addr_index].addr_reg;
} else {
PMP_DEBUG("ignoring read - out of bounds");
return 0;
}
}
#endif

64
qemu/target/riscv/pmp.h Normal file
View file

@ -0,0 +1,64 @@
/*
* QEMU RISC-V PMP (Physical Memory Protection)
*
* Author: Daire McNamara, daire.mcnamara@emdalo.com
* Ivan Griffin, ivan.griffin@emdalo.com
*
* This provides a RISC-V Physical Memory Protection interface
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2 or later, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _RISCV_PMP_H_
#define _RISCV_PMP_H_
typedef enum {
PMP_READ = 1 << 0,
PMP_WRITE = 1 << 1,
PMP_EXEC = 1 << 2,
PMP_LOCK = 1 << 7
} pmp_priv_t;
typedef enum {
PMP_AMATCH_OFF, /* Null (off) */
PMP_AMATCH_TOR, /* Top of Range */
PMP_AMATCH_NA4, /* Naturally aligned four-byte region */
PMP_AMATCH_NAPOT /* Naturally aligned power-of-two region */
} pmp_am_t;
typedef struct {
target_ulong addr_reg;
uint8_t cfg_reg;
} pmp_entry_t;
typedef struct {
target_ulong sa;
target_ulong ea;
} pmp_addr_t;
typedef struct {
pmp_entry_t pmp[MAX_RISCV_PMPS];
pmp_addr_t addr[MAX_RISCV_PMPS];
uint32_t num_rules;
} pmp_table_t;
void pmpcfg_csr_write(CPURISCVState *env, uint32_t reg_index,
target_ulong val);
target_ulong pmpcfg_csr_read(CPURISCVState *env, uint32_t reg_index);
void pmpaddr_csr_write(CPURISCVState *env, uint32_t addr_index,
target_ulong val);
target_ulong pmpaddr_csr_read(CPURISCVState *env, uint32_t addr_index);
bool pmp_hart_has_privs(CPURISCVState *env, target_ulong addr,
target_ulong size, pmp_priv_t priv);
#endif

File diff suppressed because it is too large Load diff

114
qemu/target/riscv/unicorn.c Normal file
View file

@ -0,0 +1,114 @@
/* Unicorn Emulator Engine */
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2015 */
#include <string.h>
#include "qemu/osdep.h"
#include "cpu.h"
#include "hw/boards.h"
#include "hw/riscv/spike.h"
#include "sysemu/cpus.h"
#include "unicorn.h"
#include "unicorn_common.h"
#include "uc_priv.h"
#ifdef TARGET_RISCV32
const int RISCV32_REGS_STORAGE_SIZE = offsetof(CPURISCVState, tlb_table);
#else
const int RISCV64_REGS_STORAGE_SIZE = offsetof(CPURISCVState, tlb_table);
#endif
static void riscv_release(void *ctx) {
TCGContext *tcg_ctx = (TCGContext *) ctx;
release_common(ctx);
g_free(tcg_ctx->tb_ctx.tbs);
}
static void riscv_reg_reset(struct uc_struct *uc) {
CPUArchState *env = uc->cpu->env_ptr;
memset(env->gpr, 0, sizeof(env->gpr));
memset(env->fpr, 0, sizeof(env->fpr));
env->priv = PRV_M;
env->mstatus &= ~(MSTATUS_MIE | MSTATUS_MPRV);
env->mcause = 0;
env->pc = env->resetvec;
set_default_nan_mode(1, &env->fp_status);
}
static int riscv_reg_read(struct uc_struct *uc, unsigned int *regs, void **vals, int count) {
CPUState *const cs = uc->cpu;
CPURISCVState *const state = &RISCV_CPU(cs)->env;
for (int i = 0; i < count; i++) {
const unsigned int reg_id = regs[i];
void *const value = vals[i];
if (reg_id >= UC_RISCV_REG_X0 && reg_id <= UC_RISCV_REG_X31) {
memcpy(value, &state->gpr[i - UC_RISCV_REG_X0], sizeof(state->gpr[0]));
} else if (reg_id >= UC_RISCV_REG_F0 && reg_id <= UC_RISCV_REG_F31) {
memcpy(value, &state->gpr[i - UC_RISCV_REG_F0], sizeof(state->fpr[0]));
} else if (reg_id == UC_RISCV_REG_PC) {
memcpy(value, &state->pc, sizeof(state->pc));
}
}
return 0;
}
static int riscv_reg_write(struct uc_struct *uc, unsigned int *regs, void *const *vals, int count) {
CPUState *const cs = uc->cpu;
CPURISCVState *const state = &RISCV_CPU(cs)->env;
for (int i = 0; i < count; i++) {
const unsigned int reg_id = regs[i];
const void *value = vals[i];
// Intentionally exclude the zero register (X0) in the lower-bound
if (reg_id > UC_RISCV_REG_X0 && reg_id <= UC_RISCV_REG_X31) {
memcpy(&state->gpr[i - UC_RISCV_REG_X0], value, sizeof(state->gpr[0]));
} else if (reg_id >= UC_RISCV_REG_F0 && reg_id <= UC_RISCV_REG_F31) {
memcpy(&state->gpr[i - UC_RISCV_REG_F0], value, sizeof(state->fpr[0]));
} else if (reg_id == UC_RISCV_REG_PC) {
memcpy(&state->pc, value, sizeof(state->pc));
}
}
return 0;
}
static void riscv_set_pc(struct uc_struct *uc, uint64_t address) {
CPURISCVState *state = uc->cpu->env_ptr;
state->pc = address;
}
static bool riscv_stop_interrupt(int int_no) {
switch(int_no) {
default:
return false;
}
}
DEFAULT_VISIBILITY
#ifdef TARGET_RISCV32
void riscv32_uc_init(struct uc_struct *uc) {
#else
void riscv64_uc_init(struct uc_struct *uc) {
#endif
register_accel_types(uc);
riscv_cpu_register_types(uc);
spike_v1_10_0_machine_init_register_types(uc);
uc->release = riscv_release;
uc->reg_read = riscv_reg_read;
uc->reg_write = riscv_reg_write;
uc->reg_reset = riscv_reg_reset;
uc->set_pc = riscv_set_pc;
uc->stop_interrupt = riscv_stop_interrupt;
uc_common_init(uc);
}

View file

@ -0,0 +1,13 @@
/* Unicorn Emulator Engine */
/* By Nguyen Anh Quynh <aquynh@gmail.com>, 2018 */
#ifndef UC_QEMU_TARGET_RISCV_H
#define UC_QEMU_TARGET_RISCV_H
void riscv32_uc_init(struct uc_struct *uc);
void riscv64_uc_init(struct uc_struct *uc);
extern const int RISCV32_REGS_STORAGE_SIZE_riscv32;
extern const int RISCV64_REGS_STORAGE_SIZE_riscv64;
#endif /* UC_QEMU_TARGET_RISCV_H */

View file

@ -844,7 +844,7 @@ struct TCGContext {
struct tcg_temp_info temps2[TCG_MAX_TEMPS];
TCGTempSet temps2_used;
/* qemu/target-m68k/translate.c */
/* qemu/target/m68k/translate.c */
TCGv_i32 cpu_halted;
char cpu_reg_names[2 * 8 * 3 + 5 * 4];
TCGv cpu_dregs[8];
@ -865,20 +865,20 @@ struct TCGContext {
/* Used to distinguish stores from bad addressing modes. */
TCGv store_dummy;
/* qemu/target-arm/translate.c */
/* qemu/target/arm/translate.c */
/* We reuse the same 64-bit temporaries for efficiency. */
TCGv_i32 cpu_R[16];
TCGv_i32 cpu_CF, cpu_NF, cpu_VF, cpu_ZF;
TCGv_i64 cpu_exclusive_addr;
TCGv_i64 cpu_exclusive_val;
/* qemu/target-arm/translate-a64.c */
/* qemu/target/arm/translate-a64.c */
TCGv_i64 cpu_pc;
/* Load/store exclusive handling */
TCGv_i64 cpu_exclusive_high;
TCGv_i64 cpu_X[32];
/* qemu/target-mips/translate.c */
/* qemu/target/mips/translate.c */
/* global register indices */
TCGv cpu_gpr[32];
TCGv cpu_PC;
@ -891,7 +891,14 @@ struct TCGContext {
TCGv_i64 fpu_f64[32];
TCGv_i64 msa_wr_d[64];
/* qemu/target-sparc/translate.c */
/* qemu/target/riscv/translate.c */
TCGv riscv_cpu_gpr[32];
TCGv riscv_cpu_pc;
TCGv_i64 riscv_cpu_fpr[32]; /* assume F and D extensions */
TCGv load_res;
TCGv load_val;
/* qemu/target/sparc/translate.c */
/* global register indexes */
TCGv_ptr cpu_regwptr;
TCGv_i32 cpu_psr;

View file

@ -217,7 +217,6 @@
#define clz32 clz32_x86_64
#define clz64 clz64_x86_64
#define cmp_flatrange_addr cmp_flatrange_addr_x86_64
#define code_gen_alloc code_gen_alloc_x86_64
#define commonNaNToFloat128 commonNaNToFloat128_x86_64
#define commonNaNToFloat16 commonNaNToFloat16_x86_64
#define commonNaNToFloat32 commonNaNToFloat32_x86_64
@ -281,7 +280,6 @@
#define cpu_exec_init_all cpu_exec_init_all_x86_64
#define cpu_exec_step_atomic cpu_exec_step_atomic_x86_64
#define cpu_flush_icache_range cpu_flush_icache_range_x86_64
#define cpu_gen_init cpu_gen_init_x86_64
#define cpu_get_address_space cpu_get_address_space_x86_64
#define cpu_get_clock cpu_get_clock_x86_64
#define cpu_get_real_ticks cpu_get_real_ticks_x86_64

View file

@ -76,6 +76,9 @@ endif
#ifneq (,$(findstring ppc,$(UNICORN_ARCHS)))
#SOURCES += sample_ppc.c
#endif
ifneq (,$(findstring riscv,$(UNICORN_ARCHS)))
SOURCES += sample_riscv.c
endif
ifneq (,$(findstring sparc,$(UNICORN_ARCHS)))
SOURCES += sample_sparc.c
endif

146
samples/sample_riscv.c Normal file
View file

@ -0,0 +1,146 @@
/* Unicorn Emulator Engine */
/* By Nguyen Anh Quynh, 2015 */
/* Sample code to demonstrate how to emulate Mips code (big endian) */
#include <unicorn/unicorn.h>
#include <string.h>
// code to be emulated
#define RISCV_CODE "\x93\xE0\xF0\x7F" // ori $x1, $x1, 0xFF;
// memory address where emulation starts
#define START_ADDRESS 0x10000
static void hook_block(uc_engine *uc, uint64_t address, uint32_t size, void *user_data)
{
printf(">>> Tracing basic block at 0x%" PRIx64 ", block size = 0x%x\n", address, size);
}
static void hook_code(uc_engine *uc, uint64_t address, uint32_t size, void *user_data)
{
printf(">>> Tracing instruction at 0x%" PRIx64 ", instruction size = 0x%x\n", address, size);
}
static void test_riscv32(void)
{
uc_engine *uc;
uc_err err;
uc_hook trace1, trace2;
int x1 = 0x6789; // X1 register
printf("Emulate 32-bit RISC-V code\n");
// Initialize emulator in RISC-V 32-bit mode
err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV32, &uc);
if (err) {
printf("Failed on uc_open() with error returned: %u (%s)\n",
err, uc_strerror(err));
return;
}
// map 2MB memory for this emulation
uc_mem_map(uc, START_ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL);
// write machine code to be emulated to memory
uc_mem_write(uc, START_ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1);
// initialize machine registers
uc_reg_write(uc, UC_RISCV_REG_X1, &x1);
// tracing all basic blocks with customized callback
uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0);
// tracing one instruction at START_ADDRESS with customized callback
uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, START_ADDRESS, START_ADDRESS);
// emulate machine code in infinite time (last param = 0), or when
// finishing all the code.
err = uc_emu_start(uc, START_ADDRESS, START_ADDRESS + sizeof(RISCV_CODE) - 1, 0, 0);
if (err) {
printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err));
}
// now print out some registers
printf(">>> Emulation done. Below is the CPU context\n");
uc_reg_read(uc, UC_RISCV_REG_X1, &x1);
printf(">>> X1 = 0x%08X\n", x1);
uc_close(uc);
}
static void test_riscv64(void)
{
uc_engine *uc;
uc_err err;
uc_hook trace1, trace2;
int x1 = 0x6789; // X1 register
printf("===========================\n");
printf("Emulate 64-bit RISC-V code\n");
// Initialize emulator in RISC-V 64-bit mode
err = uc_open(UC_ARCH_RISCV, UC_MODE_RISCV64, &uc);
if (err) {
printf("Failed on uc_open() with error returned: %u (%s)\n",
err, uc_strerror(err));
return;
}
// map 2MB memory for this emulation
uc_mem_map(uc, START_ADDRESS, 2 * 1024 * 1024, UC_PROT_ALL);
// write machine code to be emulated to memory
uc_mem_write(uc, START_ADDRESS, RISCV_CODE, sizeof(RISCV_CODE) - 1);
// initialize machine registers
uc_reg_write(uc, UC_RISCV_REG_X1, &x1);
// tracing all basic blocks with customized callback
uc_hook_add(uc, &trace1, UC_HOOK_BLOCK, hook_block, NULL, 1, 0);
// tracing one instruction at START_ADDRESS with customized callback
uc_hook_add(uc, &trace2, UC_HOOK_CODE, hook_code, NULL, START_ADDRESS, START_ADDRESS);
// emulate machine code in infinite time (last param = 0), or when
// finishing all the code.
err = uc_emu_start(uc, START_ADDRESS, START_ADDRESS + sizeof(RISCV_CODE) - 1, 0, 0);
if (err) {
printf("Failed on uc_emu_start() with error returned: %u (%s)\n", err, uc_strerror(err));
}
// now print out some registers
printf(">>> Emulation done. Below is the CPU context\n");
uc_reg_read(uc, UC_RISCV_REG_X1, &x1);
printf(">>> X1 = 0x%08X\n", x1);
uc_close(uc);
}
int main(int argc, char **argv, char **envp)
{
// dynamically load shared library
#ifdef DYNLOAD
if (!uc_dyn_load(NULL, 0)) {
printf("Error dynamically loading shared library.\n");
printf("Please check that unicorn.dll/unicorn.so is available as well as\n");
printf("any other dependent dll/so files.\n");
printf("The easiest way is to place them in the same directory as this app.\n");
return 1;
}
#endif
test_riscv32();
test_riscv64();
// dynamically free shared library
#ifdef DYNLOAD
uc_dyn_free();
#endif
return 0;
}

54
uc.c
View file

@ -20,6 +20,7 @@
#include "qemu/target/i386/unicorn.h"
#include "qemu/target/m68k/unicorn.h"
#include "qemu/target/mips/unicorn.h"
#include "qemu/target/riscv/unicorn.h"
#include "qemu/target/sparc/unicorn.h"
#include "qemu/include/hw/boards.h"
@ -125,6 +126,9 @@ bool uc_arch_supported(uc_arch arch)
#ifdef UNICORN_HAS_PPC
case UC_ARCH_PPC: return true;
#endif
#ifdef UNICORN_HAS_RISCV
case UC_ARCH_RISCV: return true;
#endif
#ifdef UNICORN_HAS_SPARC
case UC_ARCH_SPARC: return true;
#endif
@ -247,6 +251,24 @@ uc_err uc_open(uc_arch arch, uc_mode mode, uc_engine **result)
break;
#endif
#ifdef UNICORN_HAS_RISCV
case UC_ARCH_RISCV:
if (mode & ~UC_MODE_RISCV_MASK) {
free(uc);
return UC_ERR_MODE;
}
if (mode & UC_MODE_RISCV64) {
#ifdef UNICORN_HAS_RISCV64
uc->init_arch = riscv64_uc_init;
#endif
} else {
#ifdef UNICORN_HAS_RISCV32
uc->init_arch = riscv32_uc_init;
#endif
}
break;
#endif /* UNICORN_HAS_RISCV */
#ifdef UNICORN_HAS_SPARC
case UC_ARCH_SPARC:
if ((mode & ~UC_MODE_SPARC_MASK) ||
@ -585,6 +607,11 @@ uc_err uc_emu_start(uc_engine* uc, uint64_t begin, uint64_t until, uint64_t time
uc_reg_write(uc, UC_MIPS_REG_PC, &begin);
break;
#endif
#ifdef UNICORN_HAS_RISCV
case UC_ARCH_RISCV:
uc_reg_write(uc, UC_RISCV_REG_PC, &begin);
break;
#endif
#ifdef UNICORN_HAS_SPARC
case UC_ARCH_SPARC:
// TODO: Sparc/Sparc64
@ -1211,16 +1238,20 @@ static size_t cpu_context_size(uc_arch arch, uc_mode mode)
// of the interesting CPU registers
switch (arch) {
#ifdef UNICORN_HAS_M68K
case UC_ARCH_M68K: return M68K_REGS_STORAGE_SIZE;
case UC_ARCH_M68K:
return M68K_REGS_STORAGE_SIZE;
#endif
#ifdef UNICORN_HAS_X86
case UC_ARCH_X86: return X86_REGS_STORAGE_SIZE;
case UC_ARCH_X86:
return X86_REGS_STORAGE_SIZE;
#endif
#ifdef UNICORN_HAS_ARM
case UC_ARCH_ARM: return mode & UC_MODE_BIG_ENDIAN ? ARM_REGS_STORAGE_SIZE_armeb : ARM_REGS_STORAGE_SIZE_arm;
case UC_ARCH_ARM:
return mode & UC_MODE_BIG_ENDIAN ? ARM_REGS_STORAGE_SIZE_armeb : ARM_REGS_STORAGE_SIZE_arm;
#endif
#ifdef UNICORN_HAS_ARM64
case UC_ARCH_ARM64: return mode & UC_MODE_BIG_ENDIAN ? ARM64_REGS_STORAGE_SIZE_aarch64eb : ARM64_REGS_STORAGE_SIZE_aarch64;
case UC_ARCH_ARM64:
return mode & UC_MODE_BIG_ENDIAN ? ARM64_REGS_STORAGE_SIZE_aarch64eb : ARM64_REGS_STORAGE_SIZE_aarch64;
#endif
#ifdef UNICORN_HAS_MIPS
case UC_ARCH_MIPS:
@ -1238,10 +1269,19 @@ static size_t cpu_context_size(uc_arch arch, uc_mode mode)
}
}
#endif
#ifdef UNICORN_HAS_SPARC
case UC_ARCH_SPARC: return mode & UC_MODE_SPARC64 ? SPARC64_REGS_STORAGE_SIZE : SPARC_REGS_STORAGE_SIZE;
#ifdef UNICORN_HAS_RISCV
case UC_ARCH_RISCV:
if (mode & UC_MODE_RISCV64) {
return RISCV64_REGS_STORAGE_SIZE_riscv64;
}
return RISCV32_REGS_STORAGE_SIZE_riscv32;
#endif
default: return 0;
#ifdef UNICORN_HAS_SPARC
case UC_ARCH_SPARC:
return mode & UC_MODE_SPARC64 ? SPARC64_REGS_STORAGE_SIZE : SPARC_REGS_STORAGE_SIZE;
#endif
default:
return 0;
}
}