mirror of
https://github.com/yuzu-emu/unicorn.git
synced 2025-11-19 19:44:48 +00:00
memory: Fix access_with_adjusted_size(small size) on big-endian memory regions
Memory regions configured as DEVICE_BIG_ENDIAN (or DEVICE_NATIVE_ENDIAN on
big-endian guest) behave incorrectly when the memory access 'size' is smaller
than the implementation 'access_size'.
In the following code segment from access_with_adjusted_size():
if (memory_region_big_endian(mr)) {
for (i = 0; i < size; i += access_size) {
r |= access_fn(mr, addr + i, value, access_size,
(size - access_size - i) * 8, access_mask, attrs);
}
(size - access_size - i) * 8 is the number of bits that will arithmetic
shift the current value.
Currently we can only 'left' shift a read() access, and 'right' shift a write().
When the access 'size' is smaller than the implementation, we get a negative
number of bits to shift.
For the read() case, a negative 'left' shift is a 'right' shift :)
However since the 'shift' type is unsigned, there is currently no way to
right shift.
Fix this by changing the access_fn() prototype to handle signed shift values,
and modify the memory_region_shift_read|write_access() helpers to correctly
arithmetic shift the opposite direction when the 'shift' value is negative.
This commit is contained in:
parent
de28c93299
commit
a3938167d4
|
|
@ -444,24 +444,36 @@ static void adjust_endianness(MemoryRegion *mr, uint64_t *data, unsigned size)
|
|||
}
|
||||
|
||||
static inline void memory_region_shift_read_access(uint64_t *value,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
uint64_t tmp)
|
||||
{
|
||||
*value |= (tmp & mask) << shift;
|
||||
if (shift >= 0) {
|
||||
*value |= (tmp & mask) << shift;
|
||||
} else {
|
||||
*value |= (tmp & mask) >> -shift;
|
||||
}
|
||||
}
|
||||
static inline uint64_t memory_region_shift_write_access(uint64_t *value,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask)
|
||||
{
|
||||
return (*value >> shift) & mask;
|
||||
uint64_t tmp;
|
||||
|
||||
if (shift >= 0) {
|
||||
tmp = (*value >> shift) & mask;
|
||||
} else {
|
||||
tmp = (*value << -shift) & mask;
|
||||
}
|
||||
|
||||
return tmp;
|
||||
}
|
||||
|
||||
static MemTxResult memory_region_oldmmio_read_accessor(MemoryRegion *mr,
|
||||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
|
|
@ -478,7 +490,7 @@ static MemTxResult memory_region_read_accessor(MemoryRegion *mr,
|
|||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
|
|
@ -497,7 +509,7 @@ static MemTxResult memory_region_read_with_attrs_accessor(MemoryRegion *mr,
|
|||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
|
|
@ -519,7 +531,7 @@ static MemTxResult memory_region_oldmmio_write_accessor(MemoryRegion *mr,
|
|||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
|
|
@ -533,7 +545,7 @@ static MemTxResult memory_region_write_accessor(MemoryRegion *mr,
|
|||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
|
|
@ -547,7 +559,7 @@ static MemTxResult memory_region_write_with_attrs_accessor(MemoryRegion *mr,
|
|||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs)
|
||||
{
|
||||
|
|
@ -571,7 +583,7 @@ static MemTxResult access_with_adjusted_size(hwaddr addr,
|
|||
hwaddr addr,
|
||||
uint64_t *value,
|
||||
unsigned size,
|
||||
unsigned shift,
|
||||
signed shift,
|
||||
uint64_t mask,
|
||||
MemTxAttrs attrs),
|
||||
MemoryRegion *mr,
|
||||
|
|
|
|||
Loading…
Reference in a new issue