diff --git a/qemu/target/arm/cpu.h b/qemu/target/arm/cpu.h index c633e406..7769770b 100644 --- a/qemu/target/arm/cpu.h +++ b/qemu/target/arm/cpu.h @@ -684,7 +684,7 @@ typedef struct CPUARMState { * should first be updated to something sparse instead of the current * supported_event_map[] array. */ -#define MAX_EVENT_ID 0x11 +#define MAX_EVENT_ID 0x24 uint16_t supported_event_map[MAX_EVENT_ID + 1]; // Unicorn engine diff --git a/qemu/target/arm/helper.c b/qemu/target/arm/helper.c index 227ff468..8c80a68f 100644 --- a/qemu/target/arm/helper.c +++ b/qemu/target/arm/helper.c @@ -914,6 +914,13 @@ typedef struct pm_event { * counters hold a difference from the return value from this function */ uint64_t (*get_count)(CPUARMState *); + /* + * Return how many nanoseconds it will take (at a minimum) for count events + * to occur. A negative value indicates the counter will never overflow, or + * that the counter has otherwise arranged for the overflow bit to be set + * and the PMU interrupt to be raised on overflow. + */ + int64_t (*ns_per_count)(uint64_t); } pm_event; static bool event_always_supported(CPUARMState *env) @@ -930,6 +937,11 @@ static uint64_t swinc_get_count(CPUARMState *env) return 0; } +static int64_t swinc_ns_per(uint64_t ignored) +{ + return -1; +} + /* * Return the underlying cycle count for the PMU cycle counters. If we're in * usermode, simply return 0. @@ -945,6 +957,11 @@ static uint64_t cycles_get_count(CPUARMState *env) } #ifndef CONFIG_USER_ONLY +static int64_t cycles_ns_per(uint64_t cycles) +{ + return (ARM_CPU_FREQ / NANOSECONDS_PER_SECOND) * cycles; +} + static bool instructions_supported(CPUARMState *env) { return 0; /* Precise instruction counting */ @@ -956,25 +973,52 @@ static uint64_t instructions_get_count(CPUARMState *env) } #endif +static bool pmu_8_1_events_supported(CPUARMState *env) +{ + /* For events which are supported in any v8.1 PMU */ + return cpu_isar_feature(any_pmu_8_1, env_archcpu(env)); +} + +static uint64_t zero_event_get_count(CPUARMState *env) +{ + /* For events which on QEMU never fire, so their count is always zero */ + return 0; +} + +static int64_t zero_event_ns_per(uint64_t cycles) +{ + /* An event which never fires can never overflow */ + return -1; +} static const pm_event pm_events[] = { - { - 0x000, /* SW_INCR */ - event_always_supported, - swinc_get_count, + { .number = 0x000, /* SW_INCR */ + .supported = event_always_supported, + .get_count = swinc_get_count, + .ns_per_count = swinc_ns_per, }, #ifndef CONFIG_USER_ONLY - { - 0x008, /* INST_RETIRED, Instruction architecturally executed */ - instructions_supported, - instructions_get_count, + { .number = 0x008, /* INST_RETIRED, Instruction architecturally executed */ + .supported = instructions_supported, + .get_count = instructions_get_count, + .ns_per_count = swinc_ns_per, /* UNICORN: Stubbed */ + }, + { .number = 0x011, /* CPU_CYCLES, Cycle */ + .supported = event_always_supported, + .get_count = cycles_get_count, + .ns_per_count = cycles_ns_per, }, - { - 0x011, /* CPU_CYCLES, Cycle */ - event_always_supported, - cycles_get_count, - } #endif + { .number = 0x023, /* STALL_FRONTEND */ + .supported = pmu_8_1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, + { .number = 0x024, /* STALL_BACKEND */ + .supported = pmu_8_1_events_supported, + .get_count = zero_event_get_count, + .ns_per_count = zero_event_ns_per, + }, }; #define UNSUPPORTED_EVENT UINT16_MAX